2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
40 #include "hci_request.h"
42 #include "mgmt_util.h"
43 #include "mgmt_config.h"
46 #define MGMT_VERSION 1
47 #define MGMT_REVISION 21
49 static const u16 mgmt_commands[] = {
50 MGMT_OP_READ_INDEX_LIST,
53 MGMT_OP_SET_DISCOVERABLE,
54 MGMT_OP_SET_CONNECTABLE,
55 MGMT_OP_SET_FAST_CONNECTABLE,
57 MGMT_OP_SET_LINK_SECURITY,
61 MGMT_OP_SET_DEV_CLASS,
62 MGMT_OP_SET_LOCAL_NAME,
65 MGMT_OP_LOAD_LINK_KEYS,
66 MGMT_OP_LOAD_LONG_TERM_KEYS,
68 MGMT_OP_GET_CONNECTIONS,
69 MGMT_OP_PIN_CODE_REPLY,
70 MGMT_OP_PIN_CODE_NEG_REPLY,
71 MGMT_OP_SET_IO_CAPABILITY,
73 MGMT_OP_CANCEL_PAIR_DEVICE,
74 MGMT_OP_UNPAIR_DEVICE,
75 MGMT_OP_USER_CONFIRM_REPLY,
76 MGMT_OP_USER_CONFIRM_NEG_REPLY,
77 MGMT_OP_USER_PASSKEY_REPLY,
78 MGMT_OP_USER_PASSKEY_NEG_REPLY,
79 MGMT_OP_READ_LOCAL_OOB_DATA,
80 MGMT_OP_ADD_REMOTE_OOB_DATA,
81 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
82 MGMT_OP_START_DISCOVERY,
83 MGMT_OP_STOP_DISCOVERY,
86 MGMT_OP_UNBLOCK_DEVICE,
87 MGMT_OP_SET_DEVICE_ID,
88 MGMT_OP_SET_ADVERTISING,
90 MGMT_OP_SET_STATIC_ADDRESS,
91 MGMT_OP_SET_SCAN_PARAMS,
92 MGMT_OP_SET_SECURE_CONN,
93 MGMT_OP_SET_DEBUG_KEYS,
96 MGMT_OP_GET_CONN_INFO,
97 MGMT_OP_GET_CLOCK_INFO,
99 MGMT_OP_REMOVE_DEVICE,
100 MGMT_OP_LOAD_CONN_PARAM,
101 MGMT_OP_READ_UNCONF_INDEX_LIST,
102 MGMT_OP_READ_CONFIG_INFO,
103 MGMT_OP_SET_EXTERNAL_CONFIG,
104 MGMT_OP_SET_PUBLIC_ADDRESS,
105 MGMT_OP_START_SERVICE_DISCOVERY,
106 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
107 MGMT_OP_READ_EXT_INDEX_LIST,
108 MGMT_OP_READ_ADV_FEATURES,
109 MGMT_OP_ADD_ADVERTISING,
110 MGMT_OP_REMOVE_ADVERTISING,
111 MGMT_OP_GET_ADV_SIZE_INFO,
112 MGMT_OP_START_LIMITED_DISCOVERY,
113 MGMT_OP_READ_EXT_INFO,
114 MGMT_OP_SET_APPEARANCE,
115 MGMT_OP_GET_PHY_CONFIGURATION,
116 MGMT_OP_SET_PHY_CONFIGURATION,
117 MGMT_OP_SET_BLOCKED_KEYS,
118 MGMT_OP_SET_WIDEBAND_SPEECH,
119 MGMT_OP_READ_CONTROLLER_CAP,
120 MGMT_OP_READ_EXP_FEATURES_INFO,
121 MGMT_OP_SET_EXP_FEATURE,
122 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
123 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
124 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
125 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
126 MGMT_OP_GET_DEVICE_FLAGS,
127 MGMT_OP_SET_DEVICE_FLAGS,
128 MGMT_OP_READ_ADV_MONITOR_FEATURES,
129 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
130 MGMT_OP_REMOVE_ADV_MONITOR,
131 MGMT_OP_ADD_EXT_ADV_PARAMS,
132 MGMT_OP_ADD_EXT_ADV_DATA,
133 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
136 static const u16 mgmt_events[] = {
137 MGMT_EV_CONTROLLER_ERROR,
139 MGMT_EV_INDEX_REMOVED,
140 MGMT_EV_NEW_SETTINGS,
141 MGMT_EV_CLASS_OF_DEV_CHANGED,
142 MGMT_EV_LOCAL_NAME_CHANGED,
143 MGMT_EV_NEW_LINK_KEY,
144 MGMT_EV_NEW_LONG_TERM_KEY,
145 MGMT_EV_DEVICE_CONNECTED,
146 MGMT_EV_DEVICE_DISCONNECTED,
147 MGMT_EV_CONNECT_FAILED,
148 MGMT_EV_PIN_CODE_REQUEST,
149 MGMT_EV_USER_CONFIRM_REQUEST,
150 MGMT_EV_USER_PASSKEY_REQUEST,
152 MGMT_EV_DEVICE_FOUND,
154 MGMT_EV_DEVICE_BLOCKED,
155 MGMT_EV_DEVICE_UNBLOCKED,
156 MGMT_EV_DEVICE_UNPAIRED,
157 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_DEVICE_ADDED,
161 MGMT_EV_DEVICE_REMOVED,
162 MGMT_EV_NEW_CONN_PARAM,
163 MGMT_EV_UNCONF_INDEX_ADDED,
164 MGMT_EV_UNCONF_INDEX_REMOVED,
165 MGMT_EV_NEW_CONFIG_OPTIONS,
166 MGMT_EV_EXT_INDEX_ADDED,
167 MGMT_EV_EXT_INDEX_REMOVED,
168 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
169 MGMT_EV_ADVERTISING_ADDED,
170 MGMT_EV_ADVERTISING_REMOVED,
171 MGMT_EV_EXT_INFO_CHANGED,
172 MGMT_EV_PHY_CONFIGURATION_CHANGED,
173 MGMT_EV_EXP_FEATURE_CHANGED,
174 MGMT_EV_DEVICE_FLAGS_CHANGED,
175 MGMT_EV_ADV_MONITOR_ADDED,
176 MGMT_EV_ADV_MONITOR_REMOVED,
177 MGMT_EV_CONTROLLER_SUSPEND,
178 MGMT_EV_CONTROLLER_RESUME,
181 static const u16 mgmt_untrusted_commands[] = {
182 MGMT_OP_READ_INDEX_LIST,
184 MGMT_OP_READ_UNCONF_INDEX_LIST,
185 MGMT_OP_READ_CONFIG_INFO,
186 MGMT_OP_READ_EXT_INDEX_LIST,
187 MGMT_OP_READ_EXT_INFO,
188 MGMT_OP_READ_CONTROLLER_CAP,
189 MGMT_OP_READ_EXP_FEATURES_INFO,
190 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
194 static const u16 mgmt_untrusted_events[] = {
196 MGMT_EV_INDEX_REMOVED,
197 MGMT_EV_NEW_SETTINGS,
198 MGMT_EV_CLASS_OF_DEV_CHANGED,
199 MGMT_EV_LOCAL_NAME_CHANGED,
200 MGMT_EV_UNCONF_INDEX_ADDED,
201 MGMT_EV_UNCONF_INDEX_REMOVED,
202 MGMT_EV_NEW_CONFIG_OPTIONS,
203 MGMT_EV_EXT_INDEX_ADDED,
204 MGMT_EV_EXT_INDEX_REMOVED,
205 MGMT_EV_EXT_INFO_CHANGED,
206 MGMT_EV_EXP_FEATURE_CHANGED,
209 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 "\x00\x00\x00\x00\x00\x00\x00\x00"
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
217 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
218 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
219 MGMT_STATUS_FAILED, /* Hardware Failure */
220 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
221 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
222 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
223 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
224 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
225 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
226 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
227 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
228 MGMT_STATUS_BUSY, /* Command Disallowed */
229 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
230 MGMT_STATUS_REJECTED, /* Rejected Security */
231 MGMT_STATUS_REJECTED, /* Rejected Personal */
232 MGMT_STATUS_TIMEOUT, /* Host Timeout */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
234 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
235 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
236 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
237 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
238 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
239 MGMT_STATUS_BUSY, /* Repeated Attempts */
240 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
241 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
242 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
243 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
244 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
245 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
246 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
247 MGMT_STATUS_FAILED, /* Unspecified Error */
248 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
249 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
250 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
251 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
252 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
253 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
254 MGMT_STATUS_FAILED, /* Unit Link Key Used */
255 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
256 MGMT_STATUS_TIMEOUT, /* Instant Passed */
257 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
258 MGMT_STATUS_FAILED, /* Transaction Collision */
259 MGMT_STATUS_FAILED, /* Reserved for future use */
260 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
261 MGMT_STATUS_REJECTED, /* QoS Rejected */
262 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
263 MGMT_STATUS_REJECTED, /* Insufficient Security */
264 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
265 MGMT_STATUS_FAILED, /* Reserved for future use */
266 MGMT_STATUS_BUSY, /* Role Switch Pending */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_FAILED, /* Slot Violation */
269 MGMT_STATUS_FAILED, /* Role Switch Failed */
270 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
271 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
272 MGMT_STATUS_BUSY, /* Host Busy Pairing */
273 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
274 MGMT_STATUS_BUSY, /* Controller Busy */
275 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
276 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
277 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
278 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
279 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
282 static u8 mgmt_status(u8 hci_status)
284 if (hci_status < ARRAY_SIZE(mgmt_status_table))
285 return mgmt_status_table[hci_status];
287 return MGMT_STATUS_FAILED;
290 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
293 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
297 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
298 u16 len, int flag, struct sock *skip_sk)
300 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
304 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
305 struct sock *skip_sk)
307 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
308 HCI_SOCK_TRUSTED, skip_sk);
311 static u8 le_addr_type(u8 mgmt_addr_type)
313 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
314 return ADDR_LE_DEV_PUBLIC;
316 return ADDR_LE_DEV_RANDOM;
319 void mgmt_fill_version_info(void *ver)
321 struct mgmt_rp_read_version *rp = ver;
323 rp->version = MGMT_VERSION;
324 rp->revision = cpu_to_le16(MGMT_REVISION);
327 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
330 struct mgmt_rp_read_version rp;
332 bt_dev_dbg(hdev, "sock %p", sk);
334 mgmt_fill_version_info(&rp);
336 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
340 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
343 struct mgmt_rp_read_commands *rp;
344 u16 num_commands, num_events;
348 bt_dev_dbg(hdev, "sock %p", sk);
350 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
351 num_commands = ARRAY_SIZE(mgmt_commands);
352 num_events = ARRAY_SIZE(mgmt_events);
354 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
355 num_events = ARRAY_SIZE(mgmt_untrusted_events);
358 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
360 rp = kmalloc(rp_size, GFP_KERNEL);
364 rp->num_commands = cpu_to_le16(num_commands);
365 rp->num_events = cpu_to_le16(num_events);
367 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
368 __le16 *opcode = rp->opcodes;
370 for (i = 0; i < num_commands; i++, opcode++)
371 put_unaligned_le16(mgmt_commands[i], opcode);
373 for (i = 0; i < num_events; i++, opcode++)
374 put_unaligned_le16(mgmt_events[i], opcode);
376 __le16 *opcode = rp->opcodes;
378 for (i = 0; i < num_commands; i++, opcode++)
379 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
381 for (i = 0; i < num_events; i++, opcode++)
382 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
385 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
392 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
395 struct mgmt_rp_read_index_list *rp;
401 bt_dev_dbg(hdev, "sock %p", sk);
403 read_lock(&hci_dev_list_lock);
406 list_for_each_entry(d, &hci_dev_list, list) {
407 if (d->dev_type == HCI_PRIMARY &&
408 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
412 rp_len = sizeof(*rp) + (2 * count);
413 rp = kmalloc(rp_len, GFP_ATOMIC);
415 read_unlock(&hci_dev_list_lock);
420 list_for_each_entry(d, &hci_dev_list, list) {
421 if (hci_dev_test_flag(d, HCI_SETUP) ||
422 hci_dev_test_flag(d, HCI_CONFIG) ||
423 hci_dev_test_flag(d, HCI_USER_CHANNEL))
426 /* Devices marked as raw-only are neither configured
427 * nor unconfigured controllers.
429 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
432 if (d->dev_type == HCI_PRIMARY &&
433 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
434 rp->index[count++] = cpu_to_le16(d->id);
435 bt_dev_dbg(hdev, "Added hci%u", d->id);
439 rp->num_controllers = cpu_to_le16(count);
440 rp_len = sizeof(*rp) + (2 * count);
442 read_unlock(&hci_dev_list_lock);
444 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
452 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
453 void *data, u16 data_len)
455 struct mgmt_rp_read_unconf_index_list *rp;
461 bt_dev_dbg(hdev, "sock %p", sk);
463 read_lock(&hci_dev_list_lock);
466 list_for_each_entry(d, &hci_dev_list, list) {
467 if (d->dev_type == HCI_PRIMARY &&
468 hci_dev_test_flag(d, HCI_UNCONFIGURED))
472 rp_len = sizeof(*rp) + (2 * count);
473 rp = kmalloc(rp_len, GFP_ATOMIC);
475 read_unlock(&hci_dev_list_lock);
480 list_for_each_entry(d, &hci_dev_list, list) {
481 if (hci_dev_test_flag(d, HCI_SETUP) ||
482 hci_dev_test_flag(d, HCI_CONFIG) ||
483 hci_dev_test_flag(d, HCI_USER_CHANNEL))
486 /* Devices marked as raw-only are neither configured
487 * nor unconfigured controllers.
489 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
492 if (d->dev_type == HCI_PRIMARY &&
493 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
494 rp->index[count++] = cpu_to_le16(d->id);
495 bt_dev_dbg(hdev, "Added hci%u", d->id);
499 rp->num_controllers = cpu_to_le16(count);
500 rp_len = sizeof(*rp) + (2 * count);
502 read_unlock(&hci_dev_list_lock);
504 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
505 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
512 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
513 void *data, u16 data_len)
515 struct mgmt_rp_read_ext_index_list *rp;
520 bt_dev_dbg(hdev, "sock %p", sk);
522 read_lock(&hci_dev_list_lock);
525 list_for_each_entry(d, &hci_dev_list, list) {
526 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
530 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
532 read_unlock(&hci_dev_list_lock);
537 list_for_each_entry(d, &hci_dev_list, list) {
538 if (hci_dev_test_flag(d, HCI_SETUP) ||
539 hci_dev_test_flag(d, HCI_CONFIG) ||
540 hci_dev_test_flag(d, HCI_USER_CHANNEL))
543 /* Devices marked as raw-only are neither configured
544 * nor unconfigured controllers.
546 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
549 if (d->dev_type == HCI_PRIMARY) {
550 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
551 rp->entry[count].type = 0x01;
553 rp->entry[count].type = 0x00;
554 } else if (d->dev_type == HCI_AMP) {
555 rp->entry[count].type = 0x02;
560 rp->entry[count].bus = d->bus;
561 rp->entry[count++].index = cpu_to_le16(d->id);
562 bt_dev_dbg(hdev, "Added hci%u", d->id);
565 rp->num_controllers = cpu_to_le16(count);
567 read_unlock(&hci_dev_list_lock);
569 /* If this command is called at least once, then all the
570 * default index and unconfigured index events are disabled
571 * and from now on only extended index events are used.
573 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
574 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
575 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
577 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
578 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
579 struct_size(rp, entry, count));
586 static bool is_configured(struct hci_dev *hdev)
588 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
589 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
592 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
593 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
594 !bacmp(&hdev->public_addr, BDADDR_ANY))
600 static __le32 get_missing_options(struct hci_dev *hdev)
604 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
605 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
606 options |= MGMT_OPTION_EXTERNAL_CONFIG;
608 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
609 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
610 !bacmp(&hdev->public_addr, BDADDR_ANY))
611 options |= MGMT_OPTION_PUBLIC_ADDRESS;
613 return cpu_to_le32(options);
616 static int new_options(struct hci_dev *hdev, struct sock *skip)
618 __le32 options = get_missing_options(hdev);
620 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
621 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
624 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
626 __le32 options = get_missing_options(hdev);
628 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
632 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
633 void *data, u16 data_len)
635 struct mgmt_rp_read_config_info rp;
638 bt_dev_dbg(hdev, "sock %p", sk);
642 memset(&rp, 0, sizeof(rp));
643 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
645 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
646 options |= MGMT_OPTION_EXTERNAL_CONFIG;
648 if (hdev->set_bdaddr)
649 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651 rp.supported_options = cpu_to_le32(options);
652 rp.missing_options = get_missing_options(hdev);
654 hci_dev_unlock(hdev);
656 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
660 static u32 get_supported_phys(struct hci_dev *hdev)
662 u32 supported_phys = 0;
664 if (lmp_bredr_capable(hdev)) {
665 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
667 if (hdev->features[0][0] & LMP_3SLOT)
668 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
670 if (hdev->features[0][0] & LMP_5SLOT)
671 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
673 if (lmp_edr_2m_capable(hdev)) {
674 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
676 if (lmp_edr_3slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
679 if (lmp_edr_5slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
682 if (lmp_edr_3m_capable(hdev)) {
683 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
685 if (lmp_edr_3slot_capable(hdev))
686 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
688 if (lmp_edr_5slot_capable(hdev))
689 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
694 if (lmp_le_capable(hdev)) {
695 supported_phys |= MGMT_PHY_LE_1M_TX;
696 supported_phys |= MGMT_PHY_LE_1M_RX;
698 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
699 supported_phys |= MGMT_PHY_LE_2M_TX;
700 supported_phys |= MGMT_PHY_LE_2M_RX;
703 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
704 supported_phys |= MGMT_PHY_LE_CODED_TX;
705 supported_phys |= MGMT_PHY_LE_CODED_RX;
709 return supported_phys;
712 static u32 get_selected_phys(struct hci_dev *hdev)
714 u32 selected_phys = 0;
716 if (lmp_bredr_capable(hdev)) {
717 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
719 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
720 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
722 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
723 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
725 if (lmp_edr_2m_capable(hdev)) {
726 if (!(hdev->pkt_type & HCI_2DH1))
727 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
729 if (lmp_edr_3slot_capable(hdev) &&
730 !(hdev->pkt_type & HCI_2DH3))
731 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
733 if (lmp_edr_5slot_capable(hdev) &&
734 !(hdev->pkt_type & HCI_2DH5))
735 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
737 if (lmp_edr_3m_capable(hdev)) {
738 if (!(hdev->pkt_type & HCI_3DH1))
739 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
741 if (lmp_edr_3slot_capable(hdev) &&
742 !(hdev->pkt_type & HCI_3DH3))
743 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
745 if (lmp_edr_5slot_capable(hdev) &&
746 !(hdev->pkt_type & HCI_3DH5))
747 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
752 if (lmp_le_capable(hdev)) {
753 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
754 selected_phys |= MGMT_PHY_LE_1M_TX;
756 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
757 selected_phys |= MGMT_PHY_LE_1M_RX;
759 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
760 selected_phys |= MGMT_PHY_LE_2M_TX;
762 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
763 selected_phys |= MGMT_PHY_LE_2M_RX;
765 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
766 selected_phys |= MGMT_PHY_LE_CODED_TX;
768 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
769 selected_phys |= MGMT_PHY_LE_CODED_RX;
772 return selected_phys;
775 static u32 get_configurable_phys(struct hci_dev *hdev)
777 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
778 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
781 static u32 get_supported_settings(struct hci_dev *hdev)
785 settings |= MGMT_SETTING_POWERED;
786 settings |= MGMT_SETTING_BONDABLE;
787 settings |= MGMT_SETTING_DEBUG_KEYS;
788 settings |= MGMT_SETTING_CONNECTABLE;
789 settings |= MGMT_SETTING_DISCOVERABLE;
791 if (lmp_bredr_capable(hdev)) {
792 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
793 settings |= MGMT_SETTING_FAST_CONNECTABLE;
794 settings |= MGMT_SETTING_BREDR;
795 settings |= MGMT_SETTING_LINK_SECURITY;
797 if (lmp_ssp_capable(hdev)) {
798 settings |= MGMT_SETTING_SSP;
799 if (IS_ENABLED(CONFIG_BT_HS))
800 settings |= MGMT_SETTING_HS;
803 if (lmp_sc_capable(hdev))
804 settings |= MGMT_SETTING_SECURE_CONN;
806 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
808 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
811 if (lmp_le_capable(hdev)) {
812 settings |= MGMT_SETTING_LE;
813 settings |= MGMT_SETTING_SECURE_CONN;
814 settings |= MGMT_SETTING_PRIVACY;
815 settings |= MGMT_SETTING_STATIC_ADDRESS;
817 /* When the experimental feature for LL Privacy support is
818 * enabled, then advertising is no longer supported.
820 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
821 settings |= MGMT_SETTING_ADVERTISING;
824 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
826 settings |= MGMT_SETTING_CONFIGURATION;
828 settings |= MGMT_SETTING_PHY_CONFIGURATION;
833 static u32 get_current_settings(struct hci_dev *hdev)
837 if (hdev_is_powered(hdev))
838 settings |= MGMT_SETTING_POWERED;
840 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
841 settings |= MGMT_SETTING_CONNECTABLE;
843 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
844 settings |= MGMT_SETTING_FAST_CONNECTABLE;
846 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
847 settings |= MGMT_SETTING_DISCOVERABLE;
849 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
850 settings |= MGMT_SETTING_BONDABLE;
852 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
853 settings |= MGMT_SETTING_BREDR;
855 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
856 settings |= MGMT_SETTING_LE;
858 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
859 settings |= MGMT_SETTING_LINK_SECURITY;
861 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
862 settings |= MGMT_SETTING_SSP;
864 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
865 settings |= MGMT_SETTING_HS;
867 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
868 settings |= MGMT_SETTING_ADVERTISING;
870 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
871 settings |= MGMT_SETTING_SECURE_CONN;
873 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
874 settings |= MGMT_SETTING_DEBUG_KEYS;
876 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
877 settings |= MGMT_SETTING_PRIVACY;
879 /* The current setting for static address has two purposes. The
880 * first is to indicate if the static address will be used and
881 * the second is to indicate if it is actually set.
883 * This means if the static address is not configured, this flag
884 * will never be set. If the address is configured, then if the
885 * address is actually used decides if the flag is set or not.
887 * For single mode LE only controllers and dual-mode controllers
888 * with BR/EDR disabled, the existence of the static address will
891 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
892 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
893 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
894 if (bacmp(&hdev->static_addr, BDADDR_ANY))
895 settings |= MGMT_SETTING_STATIC_ADDRESS;
898 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
899 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
904 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
906 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
909 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
910 struct hci_dev *hdev,
913 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
916 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
918 struct mgmt_pending_cmd *cmd;
920 /* If there's a pending mgmt command the flags will not yet have
921 * their final values, so check for this first.
923 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
925 struct mgmt_mode *cp = cmd->param;
927 return LE_AD_GENERAL;
928 else if (cp->val == 0x02)
929 return LE_AD_LIMITED;
931 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
932 return LE_AD_LIMITED;
933 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
934 return LE_AD_GENERAL;
940 bool mgmt_get_connectable(struct hci_dev *hdev)
942 struct mgmt_pending_cmd *cmd;
944 /* If there's a pending mgmt command the flag will not yet have
945 * it's final value, so check for this first.
947 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
949 struct mgmt_mode *cp = cmd->param;
954 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
957 static void service_cache_off(struct work_struct *work)
959 struct hci_dev *hdev = container_of(work, struct hci_dev,
961 struct hci_request req;
963 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
966 hci_req_init(&req, hdev);
970 __hci_req_update_eir(&req);
971 __hci_req_update_class(&req);
973 hci_dev_unlock(hdev);
975 hci_req_run(&req, NULL);
978 static void rpa_expired(struct work_struct *work)
980 struct hci_dev *hdev = container_of(work, struct hci_dev,
982 struct hci_request req;
984 bt_dev_dbg(hdev, "");
986 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
988 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
991 /* The generation of a new RPA and programming it into the
992 * controller happens in the hci_req_enable_advertising()
995 hci_req_init(&req, hdev);
996 if (ext_adv_capable(hdev))
997 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
999 __hci_req_enable_advertising(&req);
1000 hci_req_run(&req, NULL);
1003 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1005 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1008 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1009 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1011 /* Non-mgmt controlled devices get this bit set
1012 * implicitly so that pairing works for them, however
1013 * for mgmt we require user-space to explicitly enable
1016 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1019 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1020 void *data, u16 data_len)
1022 struct mgmt_rp_read_info rp;
1024 bt_dev_dbg(hdev, "sock %p", sk);
1028 memset(&rp, 0, sizeof(rp));
1030 bacpy(&rp.bdaddr, &hdev->bdaddr);
1032 rp.version = hdev->hci_ver;
1033 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1035 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1036 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1038 memcpy(rp.dev_class, hdev->dev_class, 3);
1040 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1041 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1043 hci_dev_unlock(hdev);
1045 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1049 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1054 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1055 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1056 hdev->dev_class, 3);
1058 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1059 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1062 name_len = strlen(hdev->dev_name);
1063 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1064 hdev->dev_name, name_len);
1066 name_len = strlen(hdev->short_name);
1067 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1068 hdev->short_name, name_len);
1073 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1074 void *data, u16 data_len)
1077 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1080 bt_dev_dbg(hdev, "sock %p", sk);
1082 memset(&buf, 0, sizeof(buf));
1086 bacpy(&rp->bdaddr, &hdev->bdaddr);
1088 rp->version = hdev->hci_ver;
1089 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1091 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1092 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1095 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1096 rp->eir_len = cpu_to_le16(eir_len);
1098 hci_dev_unlock(hdev);
1100 /* If this command is called at least once, then the events
1101 * for class of device and local name changes are disabled
1102 * and only the new extended controller information event
1105 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1106 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1107 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1109 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1110 sizeof(*rp) + eir_len);
1113 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1116 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1119 memset(buf, 0, sizeof(buf));
1121 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1122 ev->eir_len = cpu_to_le16(eir_len);
1124 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1125 sizeof(*ev) + eir_len,
1126 HCI_MGMT_EXT_INFO_EVENTS, skip);
1129 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1131 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1133 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1137 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1139 bt_dev_dbg(hdev, "status 0x%02x", status);
1141 if (hci_conn_count(hdev) == 0) {
1142 cancel_delayed_work(&hdev->power_off);
1143 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1147 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1149 struct mgmt_ev_advertising_added ev;
1151 ev.instance = instance;
1153 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1156 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1159 struct mgmt_ev_advertising_removed ev;
1161 ev.instance = instance;
1163 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1166 static void cancel_adv_timeout(struct hci_dev *hdev)
1168 if (hdev->adv_instance_timeout) {
1169 hdev->adv_instance_timeout = 0;
1170 cancel_delayed_work(&hdev->adv_instance_expire);
1174 static int clean_up_hci_state(struct hci_dev *hdev)
1176 struct hci_request req;
1177 struct hci_conn *conn;
1178 bool discov_stopped;
1181 hci_req_init(&req, hdev);
1183 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1184 test_bit(HCI_PSCAN, &hdev->flags)) {
1186 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1189 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1191 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1192 __hci_req_disable_advertising(&req);
1194 discov_stopped = hci_req_stop_discovery(&req);
1196 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1197 /* 0x15 == Terminated due to Power Off */
1198 __hci_abort_conn(&req, conn, 0x15);
1201 err = hci_req_run(&req, clean_up_hci_complete);
1202 if (!err && discov_stopped)
1203 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1208 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1211 struct mgmt_mode *cp = data;
1212 struct mgmt_pending_cmd *cmd;
1215 bt_dev_dbg(hdev, "sock %p", sk);
1217 if (cp->val != 0x00 && cp->val != 0x01)
1218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1219 MGMT_STATUS_INVALID_PARAMS);
1223 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1224 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1229 if (!!cp->val == hdev_is_powered(hdev)) {
1230 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1234 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1241 queue_work(hdev->req_workqueue, &hdev->power_on);
1244 /* Disconnect connections, stop scans, etc */
1245 err = clean_up_hci_state(hdev);
1247 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1248 HCI_POWER_OFF_TIMEOUT);
1250 /* ENODATA means there were no HCI commands queued */
1251 if (err == -ENODATA) {
1252 cancel_delayed_work(&hdev->power_off);
1253 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1259 hci_dev_unlock(hdev);
1263 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1265 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1267 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1268 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1271 int mgmt_new_settings(struct hci_dev *hdev)
1273 return new_settings(hdev, NULL);
1278 struct hci_dev *hdev;
1282 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1284 struct cmd_lookup *match = data;
1286 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1288 list_del(&cmd->list);
1290 if (match->sk == NULL) {
1291 match->sk = cmd->sk;
1292 sock_hold(match->sk);
1295 mgmt_pending_free(cmd);
1298 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1302 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1303 mgmt_pending_remove(cmd);
1306 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1308 if (cmd->cmd_complete) {
1311 cmd->cmd_complete(cmd, *status);
1312 mgmt_pending_remove(cmd);
1317 cmd_status_rsp(cmd, data);
1320 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1322 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1323 cmd->param, cmd->param_len);
1326 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1328 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1329 cmd->param, sizeof(struct mgmt_addr_info));
1332 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1334 if (!lmp_bredr_capable(hdev))
1335 return MGMT_STATUS_NOT_SUPPORTED;
1336 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1337 return MGMT_STATUS_REJECTED;
1339 return MGMT_STATUS_SUCCESS;
1342 static u8 mgmt_le_support(struct hci_dev *hdev)
1344 if (!lmp_le_capable(hdev))
1345 return MGMT_STATUS_NOT_SUPPORTED;
1346 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1347 return MGMT_STATUS_REJECTED;
1349 return MGMT_STATUS_SUCCESS;
1352 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1354 struct mgmt_pending_cmd *cmd;
1356 bt_dev_dbg(hdev, "status 0x%02x", status);
1360 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1365 u8 mgmt_err = mgmt_status(status);
1366 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1367 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1371 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1372 hdev->discov_timeout > 0) {
1373 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1374 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1377 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1378 new_settings(hdev, cmd->sk);
1381 mgmt_pending_remove(cmd);
1384 hci_dev_unlock(hdev);
1387 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1390 struct mgmt_cp_set_discoverable *cp = data;
1391 struct mgmt_pending_cmd *cmd;
1395 bt_dev_dbg(hdev, "sock %p", sk);
1397 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1398 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1400 MGMT_STATUS_REJECTED);
1402 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1404 MGMT_STATUS_INVALID_PARAMS);
1406 timeout = __le16_to_cpu(cp->timeout);
1408 /* Disabling discoverable requires that no timeout is set,
1409 * and enabling limited discoverable requires a timeout.
1411 if ((cp->val == 0x00 && timeout > 0) ||
1412 (cp->val == 0x02 && timeout == 0))
1413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1414 MGMT_STATUS_INVALID_PARAMS);
1418 if (!hdev_is_powered(hdev) && timeout > 0) {
1419 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1420 MGMT_STATUS_NOT_POWERED);
1424 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1425 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1431 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1432 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1433 MGMT_STATUS_REJECTED);
1437 if (hdev->advertising_paused) {
1438 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1443 if (!hdev_is_powered(hdev)) {
1444 bool changed = false;
1446 /* Setting limited discoverable when powered off is
1447 * not a valid operation since it requires a timeout
1448 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1450 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1451 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1455 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1460 err = new_settings(hdev, sk);
1465 /* If the current mode is the same, then just update the timeout
1466 * value with the new value. And if only the timeout gets updated,
1467 * then no need for any HCI transactions.
1469 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1470 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1471 HCI_LIMITED_DISCOVERABLE)) {
1472 cancel_delayed_work(&hdev->discov_off);
1473 hdev->discov_timeout = timeout;
1475 if (cp->val && hdev->discov_timeout > 0) {
1476 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1477 queue_delayed_work(hdev->req_workqueue,
1478 &hdev->discov_off, to);
1481 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1485 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1491 /* Cancel any potential discoverable timeout that might be
1492 * still active and store new timeout value. The arming of
1493 * the timeout happens in the complete handler.
1495 cancel_delayed_work(&hdev->discov_off);
1496 hdev->discov_timeout = timeout;
1499 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1501 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1503 /* Limited discoverable mode */
1504 if (cp->val == 0x02)
1505 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1507 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1509 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1513 hci_dev_unlock(hdev);
1517 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1519 struct mgmt_pending_cmd *cmd;
1521 bt_dev_dbg(hdev, "status 0x%02x", status);
1525 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1530 u8 mgmt_err = mgmt_status(status);
1531 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1535 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1536 new_settings(hdev, cmd->sk);
1539 mgmt_pending_remove(cmd);
1542 hci_dev_unlock(hdev);
1545 static int set_connectable_update_settings(struct hci_dev *hdev,
1546 struct sock *sk, u8 val)
1548 bool changed = false;
1551 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1555 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1557 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1558 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1561 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1566 hci_req_update_scan(hdev);
1567 hci_update_background_scan(hdev);
1568 return new_settings(hdev, sk);
1574 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1577 struct mgmt_mode *cp = data;
1578 struct mgmt_pending_cmd *cmd;
1581 bt_dev_dbg(hdev, "sock %p", sk);
1583 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1584 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1585 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1586 MGMT_STATUS_REJECTED);
1588 if (cp->val != 0x00 && cp->val != 0x01)
1589 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1590 MGMT_STATUS_INVALID_PARAMS);
1594 if (!hdev_is_powered(hdev)) {
1595 err = set_connectable_update_settings(hdev, sk, cp->val);
1599 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1600 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1601 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1606 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1613 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1615 if (hdev->discov_timeout > 0)
1616 cancel_delayed_work(&hdev->discov_off);
1618 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1619 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1620 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1623 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1627 hci_dev_unlock(hdev);
1631 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1634 struct mgmt_mode *cp = data;
1638 bt_dev_dbg(hdev, "sock %p", sk);
1640 if (cp->val != 0x00 && cp->val != 0x01)
1641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1642 MGMT_STATUS_INVALID_PARAMS);
1647 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1649 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1651 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1656 /* In limited privacy mode the change of bondable mode
1657 * may affect the local advertising address.
1659 if (hdev_is_powered(hdev) &&
1660 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1661 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1662 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1663 queue_work(hdev->req_workqueue,
1664 &hdev->discoverable_update);
1666 err = new_settings(hdev, sk);
1670 hci_dev_unlock(hdev);
1674 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1677 struct mgmt_mode *cp = data;
1678 struct mgmt_pending_cmd *cmd;
1682 bt_dev_dbg(hdev, "sock %p", sk);
1684 status = mgmt_bredr_support(hdev);
1686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1689 if (cp->val != 0x00 && cp->val != 0x01)
1690 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1691 MGMT_STATUS_INVALID_PARAMS);
1695 if (!hdev_is_powered(hdev)) {
1696 bool changed = false;
1698 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1699 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1703 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1708 err = new_settings(hdev, sk);
1713 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1721 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1722 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1726 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1732 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1734 mgmt_pending_remove(cmd);
1739 hci_dev_unlock(hdev);
1743 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1745 struct mgmt_mode *cp = data;
1746 struct mgmt_pending_cmd *cmd;
1750 bt_dev_dbg(hdev, "sock %p", sk);
1752 status = mgmt_bredr_support(hdev);
1754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1756 if (!lmp_ssp_capable(hdev))
1757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1758 MGMT_STATUS_NOT_SUPPORTED);
1760 if (cp->val != 0x00 && cp->val != 0x01)
1761 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1762 MGMT_STATUS_INVALID_PARAMS);
1766 if (!hdev_is_powered(hdev)) {
1770 changed = !hci_dev_test_and_set_flag(hdev,
1773 changed = hci_dev_test_and_clear_flag(hdev,
1776 changed = hci_dev_test_and_clear_flag(hdev,
1779 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1782 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1787 err = new_settings(hdev, sk);
1792 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1793 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1798 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1799 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1803 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1809 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1810 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1811 sizeof(cp->val), &cp->val);
1813 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1815 mgmt_pending_remove(cmd);
1820 hci_dev_unlock(hdev);
1824 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1826 struct mgmt_mode *cp = data;
1831 bt_dev_dbg(hdev, "sock %p", sk);
1833 if (!IS_ENABLED(CONFIG_BT_HS))
1834 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1835 MGMT_STATUS_NOT_SUPPORTED);
1837 status = mgmt_bredr_support(hdev);
1839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1841 if (!lmp_ssp_capable(hdev))
1842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1843 MGMT_STATUS_NOT_SUPPORTED);
1845 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1847 MGMT_STATUS_REJECTED);
1849 if (cp->val != 0x00 && cp->val != 0x01)
1850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1851 MGMT_STATUS_INVALID_PARAMS);
1855 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1862 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1864 if (hdev_is_powered(hdev)) {
1865 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1866 MGMT_STATUS_REJECTED);
1870 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1873 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1878 err = new_settings(hdev, sk);
1881 hci_dev_unlock(hdev);
1885 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1887 struct cmd_lookup match = { NULL, hdev };
1892 u8 mgmt_err = mgmt_status(status);
1894 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1899 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1901 new_settings(hdev, match.sk);
1906 /* Make sure the controller has a good default for
1907 * advertising data. Restrict the update to when LE
1908 * has actually been enabled. During power on, the
1909 * update in powered_update_hci will take care of it.
1911 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1912 struct hci_request req;
1913 hci_req_init(&req, hdev);
1914 if (ext_adv_capable(hdev)) {
1917 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1919 __hci_req_update_scan_rsp_data(&req, 0x00);
1921 __hci_req_update_adv_data(&req, 0x00);
1922 __hci_req_update_scan_rsp_data(&req, 0x00);
1924 hci_req_run(&req, NULL);
1925 hci_update_background_scan(hdev);
1929 hci_dev_unlock(hdev);
1932 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1934 struct mgmt_mode *cp = data;
1935 struct hci_cp_write_le_host_supported hci_cp;
1936 struct mgmt_pending_cmd *cmd;
1937 struct hci_request req;
1941 bt_dev_dbg(hdev, "sock %p", sk);
1943 if (!lmp_le_capable(hdev))
1944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1945 MGMT_STATUS_NOT_SUPPORTED);
1947 if (cp->val != 0x00 && cp->val != 0x01)
1948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1949 MGMT_STATUS_INVALID_PARAMS);
1951 /* Bluetooth single mode LE only controllers or dual-mode
1952 * controllers configured as LE only devices, do not allow
1953 * switching LE off. These have either LE enabled explicitly
1954 * or BR/EDR has been previously switched off.
1956 * When trying to enable an already enabled LE, then gracefully
1957 * send a positive response. Trying to disable it however will
1958 * result into rejection.
1960 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1961 if (cp->val == 0x01)
1962 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1965 MGMT_STATUS_REJECTED);
1971 enabled = lmp_host_le_capable(hdev);
1974 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1976 if (!hdev_is_powered(hdev) || val == enabled) {
1977 bool changed = false;
1979 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1980 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1984 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1985 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1989 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1994 err = new_settings(hdev, sk);
1999 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2000 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2006 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2012 hci_req_init(&req, hdev);
2014 memset(&hci_cp, 0, sizeof(hci_cp));
2018 hci_cp.simul = 0x00;
2020 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2021 __hci_req_disable_advertising(&req);
2023 if (ext_adv_capable(hdev))
2024 __hci_req_clear_ext_adv_sets(&req);
2027 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2030 err = hci_req_run(&req, le_enable_complete);
2032 mgmt_pending_remove(cmd);
2035 hci_dev_unlock(hdev);
2039 /* This is a helper function to test for pending mgmt commands that can
2040 * cause CoD or EIR HCI commands. We can only allow one such pending
2041 * mgmt command at a time since otherwise we cannot easily track what
2042 * the current values are, will be, and based on that calculate if a new
2043 * HCI command needs to be sent and if yes with what value.
2045 static bool pending_eir_or_class(struct hci_dev *hdev)
2047 struct mgmt_pending_cmd *cmd;
2049 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2050 switch (cmd->opcode) {
2051 case MGMT_OP_ADD_UUID:
2052 case MGMT_OP_REMOVE_UUID:
2053 case MGMT_OP_SET_DEV_CLASS:
2054 case MGMT_OP_SET_POWERED:
2062 static const u8 bluetooth_base_uuid[] = {
2063 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2064 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2067 static u8 get_uuid_size(const u8 *uuid)
2071 if (memcmp(uuid, bluetooth_base_uuid, 12))
2074 val = get_unaligned_le32(&uuid[12]);
2081 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2083 struct mgmt_pending_cmd *cmd;
2087 cmd = pending_find(mgmt_op, hdev);
2091 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2092 mgmt_status(status), hdev->dev_class, 3);
2094 mgmt_pending_remove(cmd);
2097 hci_dev_unlock(hdev);
2100 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2102 bt_dev_dbg(hdev, "status 0x%02x", status);
2104 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2107 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2109 struct mgmt_cp_add_uuid *cp = data;
2110 struct mgmt_pending_cmd *cmd;
2111 struct hci_request req;
2112 struct bt_uuid *uuid;
2115 bt_dev_dbg(hdev, "sock %p", sk);
2119 if (pending_eir_or_class(hdev)) {
2120 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2125 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2131 memcpy(uuid->uuid, cp->uuid, 16);
2132 uuid->svc_hint = cp->svc_hint;
2133 uuid->size = get_uuid_size(cp->uuid);
2135 list_add_tail(&uuid->list, &hdev->uuids);
2137 hci_req_init(&req, hdev);
2139 __hci_req_update_class(&req);
2140 __hci_req_update_eir(&req);
2142 err = hci_req_run(&req, add_uuid_complete);
2144 if (err != -ENODATA)
2147 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2148 hdev->dev_class, 3);
2152 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2161 hci_dev_unlock(hdev);
2165 static bool enable_service_cache(struct hci_dev *hdev)
2167 if (!hdev_is_powered(hdev))
2170 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2171 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2179 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2181 bt_dev_dbg(hdev, "status 0x%02x", status);
2183 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2186 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2189 struct mgmt_cp_remove_uuid *cp = data;
2190 struct mgmt_pending_cmd *cmd;
2191 struct bt_uuid *match, *tmp;
2192 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2193 struct hci_request req;
2196 bt_dev_dbg(hdev, "sock %p", sk);
2200 if (pending_eir_or_class(hdev)) {
2201 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2206 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2207 hci_uuids_clear(hdev);
2209 if (enable_service_cache(hdev)) {
2210 err = mgmt_cmd_complete(sk, hdev->id,
2211 MGMT_OP_REMOVE_UUID,
2212 0, hdev->dev_class, 3);
2221 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2222 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2225 list_del(&match->list);
2231 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2232 MGMT_STATUS_INVALID_PARAMS);
2237 hci_req_init(&req, hdev);
2239 __hci_req_update_class(&req);
2240 __hci_req_update_eir(&req);
2242 err = hci_req_run(&req, remove_uuid_complete);
2244 if (err != -ENODATA)
2247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2248 hdev->dev_class, 3);
2252 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2261 hci_dev_unlock(hdev);
2265 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2267 bt_dev_dbg(hdev, "status 0x%02x", status);
2269 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2272 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2275 struct mgmt_cp_set_dev_class *cp = data;
2276 struct mgmt_pending_cmd *cmd;
2277 struct hci_request req;
2280 bt_dev_dbg(hdev, "sock %p", sk);
2282 if (!lmp_bredr_capable(hdev))
2283 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2284 MGMT_STATUS_NOT_SUPPORTED);
2288 if (pending_eir_or_class(hdev)) {
2289 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2294 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2295 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2296 MGMT_STATUS_INVALID_PARAMS);
2300 hdev->major_class = cp->major;
2301 hdev->minor_class = cp->minor;
2303 if (!hdev_is_powered(hdev)) {
2304 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2305 hdev->dev_class, 3);
2309 hci_req_init(&req, hdev);
2311 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2312 hci_dev_unlock(hdev);
2313 cancel_delayed_work_sync(&hdev->service_cache);
2315 __hci_req_update_eir(&req);
2318 __hci_req_update_class(&req);
2320 err = hci_req_run(&req, set_class_complete);
2322 if (err != -ENODATA)
2325 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2326 hdev->dev_class, 3);
2330 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2339 hci_dev_unlock(hdev);
2343 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2346 struct mgmt_cp_load_link_keys *cp = data;
2347 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2348 sizeof(struct mgmt_link_key_info));
2349 u16 key_count, expected_len;
2353 bt_dev_dbg(hdev, "sock %p", sk);
2355 if (!lmp_bredr_capable(hdev))
2356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2357 MGMT_STATUS_NOT_SUPPORTED);
2359 key_count = __le16_to_cpu(cp->key_count);
2360 if (key_count > max_key_count) {
2361 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2364 MGMT_STATUS_INVALID_PARAMS);
2367 expected_len = struct_size(cp, keys, key_count);
2368 if (expected_len != len) {
2369 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2372 MGMT_STATUS_INVALID_PARAMS);
2375 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2376 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2377 MGMT_STATUS_INVALID_PARAMS);
2379 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2382 for (i = 0; i < key_count; i++) {
2383 struct mgmt_link_key_info *key = &cp->keys[i];
2385 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2386 return mgmt_cmd_status(sk, hdev->id,
2387 MGMT_OP_LOAD_LINK_KEYS,
2388 MGMT_STATUS_INVALID_PARAMS);
2393 hci_link_keys_clear(hdev);
2396 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2398 changed = hci_dev_test_and_clear_flag(hdev,
2399 HCI_KEEP_DEBUG_KEYS);
2402 new_settings(hdev, NULL);
2404 for (i = 0; i < key_count; i++) {
2405 struct mgmt_link_key_info *key = &cp->keys[i];
2407 if (hci_is_blocked_key(hdev,
2408 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2410 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2415 /* Always ignore debug keys and require a new pairing if
2416 * the user wants to use them.
2418 if (key->type == HCI_LK_DEBUG_COMBINATION)
2421 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2422 key->type, key->pin_len, NULL);
2425 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2427 hci_dev_unlock(hdev);
2432 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2433 u8 addr_type, struct sock *skip_sk)
2435 struct mgmt_ev_device_unpaired ev;
2437 bacpy(&ev.addr.bdaddr, bdaddr);
2438 ev.addr.type = addr_type;
2440 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2444 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2447 struct mgmt_cp_unpair_device *cp = data;
2448 struct mgmt_rp_unpair_device rp;
2449 struct hci_conn_params *params;
2450 struct mgmt_pending_cmd *cmd;
2451 struct hci_conn *conn;
2455 memset(&rp, 0, sizeof(rp));
2456 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2457 rp.addr.type = cp->addr.type;
2459 if (!bdaddr_type_is_valid(cp->addr.type))
2460 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2461 MGMT_STATUS_INVALID_PARAMS,
2464 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2465 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2466 MGMT_STATUS_INVALID_PARAMS,
2471 if (!hdev_is_powered(hdev)) {
2472 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2473 MGMT_STATUS_NOT_POWERED, &rp,
2478 if (cp->addr.type == BDADDR_BREDR) {
2479 /* If disconnection is requested, then look up the
2480 * connection. If the remote device is connected, it
2481 * will be later used to terminate the link.
2483 * Setting it to NULL explicitly will cause no
2484 * termination of the link.
2487 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2492 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2494 err = mgmt_cmd_complete(sk, hdev->id,
2495 MGMT_OP_UNPAIR_DEVICE,
2496 MGMT_STATUS_NOT_PAIRED, &rp,
2504 /* LE address type */
2505 addr_type = le_addr_type(cp->addr.type);
2507 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2508 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2510 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2511 MGMT_STATUS_NOT_PAIRED, &rp,
2516 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2518 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2523 /* Defer clearing up the connection parameters until closing to
2524 * give a chance of keeping them if a repairing happens.
2526 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2528 /* Disable auto-connection parameters if present */
2529 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2531 if (params->explicit_connect)
2532 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2534 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2537 /* If disconnection is not requested, then clear the connection
2538 * variable so that the link is not terminated.
2540 if (!cp->disconnect)
2544 /* If the connection variable is set, then termination of the
2545 * link is requested.
2548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2550 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2554 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2561 cmd->cmd_complete = addr_cmd_complete;
2563 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2565 mgmt_pending_remove(cmd);
2568 hci_dev_unlock(hdev);
2572 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2575 struct mgmt_cp_disconnect *cp = data;
2576 struct mgmt_rp_disconnect rp;
2577 struct mgmt_pending_cmd *cmd;
2578 struct hci_conn *conn;
2581 bt_dev_dbg(hdev, "sock %p", sk);
2583 memset(&rp, 0, sizeof(rp));
2584 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2585 rp.addr.type = cp->addr.type;
2587 if (!bdaddr_type_is_valid(cp->addr.type))
2588 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2589 MGMT_STATUS_INVALID_PARAMS,
2594 if (!test_bit(HCI_UP, &hdev->flags)) {
2595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2596 MGMT_STATUS_NOT_POWERED, &rp,
2601 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2602 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2603 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2607 if (cp->addr.type == BDADDR_BREDR)
2608 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2611 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2612 le_addr_type(cp->addr.type));
2614 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2615 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2616 MGMT_STATUS_NOT_CONNECTED, &rp,
2621 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2627 cmd->cmd_complete = generic_cmd_complete;
2629 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2631 mgmt_pending_remove(cmd);
2634 hci_dev_unlock(hdev);
2638 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2640 switch (link_type) {
2642 switch (addr_type) {
2643 case ADDR_LE_DEV_PUBLIC:
2644 return BDADDR_LE_PUBLIC;
2647 /* Fallback to LE Random address type */
2648 return BDADDR_LE_RANDOM;
2652 /* Fallback to BR/EDR type */
2653 return BDADDR_BREDR;
2657 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2660 struct mgmt_rp_get_connections *rp;
2665 bt_dev_dbg(hdev, "sock %p", sk);
2669 if (!hdev_is_powered(hdev)) {
2670 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2671 MGMT_STATUS_NOT_POWERED);
2676 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2677 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2681 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2688 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2689 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2691 bacpy(&rp->addr[i].bdaddr, &c->dst);
2692 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2693 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2698 rp->conn_count = cpu_to_le16(i);
2700 /* Recalculate length in case of filtered SCO connections, etc */
2701 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2702 struct_size(rp, addr, i));
2707 hci_dev_unlock(hdev);
2711 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2712 struct mgmt_cp_pin_code_neg_reply *cp)
2714 struct mgmt_pending_cmd *cmd;
2717 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2722 cmd->cmd_complete = addr_cmd_complete;
2724 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2725 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2727 mgmt_pending_remove(cmd);
2732 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2735 struct hci_conn *conn;
2736 struct mgmt_cp_pin_code_reply *cp = data;
2737 struct hci_cp_pin_code_reply reply;
2738 struct mgmt_pending_cmd *cmd;
2741 bt_dev_dbg(hdev, "sock %p", sk);
2745 if (!hdev_is_powered(hdev)) {
2746 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2747 MGMT_STATUS_NOT_POWERED);
2751 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2753 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2754 MGMT_STATUS_NOT_CONNECTED);
2758 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2759 struct mgmt_cp_pin_code_neg_reply ncp;
2761 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2763 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2765 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2767 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2768 MGMT_STATUS_INVALID_PARAMS);
2773 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2779 cmd->cmd_complete = addr_cmd_complete;
2781 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2782 reply.pin_len = cp->pin_len;
2783 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2785 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2787 mgmt_pending_remove(cmd);
2790 hci_dev_unlock(hdev);
2794 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2797 struct mgmt_cp_set_io_capability *cp = data;
2799 bt_dev_dbg(hdev, "sock %p", sk);
2801 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2803 MGMT_STATUS_INVALID_PARAMS);
2807 hdev->io_capability = cp->io_capability;
2809 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2811 hci_dev_unlock(hdev);
2813 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2817 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2819 struct hci_dev *hdev = conn->hdev;
2820 struct mgmt_pending_cmd *cmd;
2822 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2823 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2826 if (cmd->user_data != conn)
2835 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2837 struct mgmt_rp_pair_device rp;
2838 struct hci_conn *conn = cmd->user_data;
2841 bacpy(&rp.addr.bdaddr, &conn->dst);
2842 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2844 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2845 status, &rp, sizeof(rp));
2847 /* So we don't get further callbacks for this connection */
2848 conn->connect_cfm_cb = NULL;
2849 conn->security_cfm_cb = NULL;
2850 conn->disconn_cfm_cb = NULL;
2852 hci_conn_drop(conn);
2854 /* The device is paired so there is no need to remove
2855 * its connection parameters anymore.
2857 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2864 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2866 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2867 struct mgmt_pending_cmd *cmd;
2869 cmd = find_pairing(conn);
2871 cmd->cmd_complete(cmd, status);
2872 mgmt_pending_remove(cmd);
2876 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2878 struct mgmt_pending_cmd *cmd;
2880 BT_DBG("status %u", status);
2882 cmd = find_pairing(conn);
2884 BT_DBG("Unable to find a pending command");
2888 cmd->cmd_complete(cmd, mgmt_status(status));
2889 mgmt_pending_remove(cmd);
2892 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2894 struct mgmt_pending_cmd *cmd;
2896 BT_DBG("status %u", status);
2901 cmd = find_pairing(conn);
2903 BT_DBG("Unable to find a pending command");
2907 cmd->cmd_complete(cmd, mgmt_status(status));
2908 mgmt_pending_remove(cmd);
2911 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2914 struct mgmt_cp_pair_device *cp = data;
2915 struct mgmt_rp_pair_device rp;
2916 struct mgmt_pending_cmd *cmd;
2917 u8 sec_level, auth_type;
2918 struct hci_conn *conn;
2921 bt_dev_dbg(hdev, "sock %p", sk);
2923 memset(&rp, 0, sizeof(rp));
2924 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2925 rp.addr.type = cp->addr.type;
2927 if (!bdaddr_type_is_valid(cp->addr.type))
2928 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2929 MGMT_STATUS_INVALID_PARAMS,
2932 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2933 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2934 MGMT_STATUS_INVALID_PARAMS,
2939 if (!hdev_is_powered(hdev)) {
2940 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2941 MGMT_STATUS_NOT_POWERED, &rp,
2946 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2947 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2948 MGMT_STATUS_ALREADY_PAIRED, &rp,
2953 sec_level = BT_SECURITY_MEDIUM;
2954 auth_type = HCI_AT_DEDICATED_BONDING;
2956 if (cp->addr.type == BDADDR_BREDR) {
2957 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2958 auth_type, CONN_REASON_PAIR_DEVICE);
2960 u8 addr_type = le_addr_type(cp->addr.type);
2961 struct hci_conn_params *p;
2963 /* When pairing a new device, it is expected to remember
2964 * this device for future connections. Adding the connection
2965 * parameter information ahead of time allows tracking
2966 * of the peripheral preferred values and will speed up any
2967 * further connection establishment.
2969 * If connection parameters already exist, then they
2970 * will be kept and this function does nothing.
2972 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2974 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2975 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2977 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2978 sec_level, HCI_LE_CONN_TIMEOUT,
2979 CONN_REASON_PAIR_DEVICE);
2985 if (PTR_ERR(conn) == -EBUSY)
2986 status = MGMT_STATUS_BUSY;
2987 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2988 status = MGMT_STATUS_NOT_SUPPORTED;
2989 else if (PTR_ERR(conn) == -ECONNREFUSED)
2990 status = MGMT_STATUS_REJECTED;
2992 status = MGMT_STATUS_CONNECT_FAILED;
2994 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2995 status, &rp, sizeof(rp));
2999 if (conn->connect_cfm_cb) {
3000 hci_conn_drop(conn);
3001 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3002 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3006 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3009 hci_conn_drop(conn);
3013 cmd->cmd_complete = pairing_complete;
3015 /* For LE, just connecting isn't a proof that the pairing finished */
3016 if (cp->addr.type == BDADDR_BREDR) {
3017 conn->connect_cfm_cb = pairing_complete_cb;
3018 conn->security_cfm_cb = pairing_complete_cb;
3019 conn->disconn_cfm_cb = pairing_complete_cb;
3021 conn->connect_cfm_cb = le_pairing_complete_cb;
3022 conn->security_cfm_cb = le_pairing_complete_cb;
3023 conn->disconn_cfm_cb = le_pairing_complete_cb;
3026 conn->io_capability = cp->io_cap;
3027 cmd->user_data = hci_conn_get(conn);
3029 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3030 hci_conn_security(conn, sec_level, auth_type, true)) {
3031 cmd->cmd_complete(cmd, 0);
3032 mgmt_pending_remove(cmd);
3038 hci_dev_unlock(hdev);
3042 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3045 struct mgmt_addr_info *addr = data;
3046 struct mgmt_pending_cmd *cmd;
3047 struct hci_conn *conn;
3050 bt_dev_dbg(hdev, "sock %p", sk);
3054 if (!hdev_is_powered(hdev)) {
3055 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3056 MGMT_STATUS_NOT_POWERED);
3060 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3062 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3063 MGMT_STATUS_INVALID_PARAMS);
3067 conn = cmd->user_data;
3069 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3070 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3071 MGMT_STATUS_INVALID_PARAMS);
3075 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3076 mgmt_pending_remove(cmd);
3078 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3079 addr, sizeof(*addr));
3081 /* Since user doesn't want to proceed with the connection, abort any
3082 * ongoing pairing and then terminate the link if it was created
3083 * because of the pair device action.
3085 if (addr->type == BDADDR_BREDR)
3086 hci_remove_link_key(hdev, &addr->bdaddr);
3088 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3089 le_addr_type(addr->type));
3091 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3092 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3095 hci_dev_unlock(hdev);
3099 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3100 struct mgmt_addr_info *addr, u16 mgmt_op,
3101 u16 hci_op, __le32 passkey)
3103 struct mgmt_pending_cmd *cmd;
3104 struct hci_conn *conn;
3109 if (!hdev_is_powered(hdev)) {
3110 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3111 MGMT_STATUS_NOT_POWERED, addr,
3116 if (addr->type == BDADDR_BREDR)
3117 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3119 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3120 le_addr_type(addr->type));
3123 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3124 MGMT_STATUS_NOT_CONNECTED, addr,
3129 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3130 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3132 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3133 MGMT_STATUS_SUCCESS, addr,
3136 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3137 MGMT_STATUS_FAILED, addr,
3143 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3149 cmd->cmd_complete = addr_cmd_complete;
3151 /* Continue with pairing via HCI */
3152 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3153 struct hci_cp_user_passkey_reply cp;
3155 bacpy(&cp.bdaddr, &addr->bdaddr);
3156 cp.passkey = passkey;
3157 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3159 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3163 mgmt_pending_remove(cmd);
3166 hci_dev_unlock(hdev);
3170 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3171 void *data, u16 len)
3173 struct mgmt_cp_pin_code_neg_reply *cp = data;
3175 bt_dev_dbg(hdev, "sock %p", sk);
3177 return user_pairing_resp(sk, hdev, &cp->addr,
3178 MGMT_OP_PIN_CODE_NEG_REPLY,
3179 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3182 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3185 struct mgmt_cp_user_confirm_reply *cp = data;
3187 bt_dev_dbg(hdev, "sock %p", sk);
3189 if (len != sizeof(*cp))
3190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3191 MGMT_STATUS_INVALID_PARAMS);
3193 return user_pairing_resp(sk, hdev, &cp->addr,
3194 MGMT_OP_USER_CONFIRM_REPLY,
3195 HCI_OP_USER_CONFIRM_REPLY, 0);
3198 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3199 void *data, u16 len)
3201 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3203 bt_dev_dbg(hdev, "sock %p", sk);
3205 return user_pairing_resp(sk, hdev, &cp->addr,
3206 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3207 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3210 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3213 struct mgmt_cp_user_passkey_reply *cp = data;
3215 bt_dev_dbg(hdev, "sock %p", sk);
3217 return user_pairing_resp(sk, hdev, &cp->addr,
3218 MGMT_OP_USER_PASSKEY_REPLY,
3219 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3222 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3223 void *data, u16 len)
3225 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3227 bt_dev_dbg(hdev, "sock %p", sk);
3229 return user_pairing_resp(sk, hdev, &cp->addr,
3230 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3231 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3234 static void adv_expire(struct hci_dev *hdev, u32 flags)
3236 struct adv_info *adv_instance;
3237 struct hci_request req;
3240 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3244 /* stop if current instance doesn't need to be changed */
3245 if (!(adv_instance->flags & flags))
3248 cancel_adv_timeout(hdev);
3250 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3254 hci_req_init(&req, hdev);
3255 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3260 hci_req_run(&req, NULL);
3263 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3265 struct mgmt_cp_set_local_name *cp;
3266 struct mgmt_pending_cmd *cmd;
3268 bt_dev_dbg(hdev, "status 0x%02x", status);
3272 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3279 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3280 mgmt_status(status));
3282 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3285 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3286 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3289 mgmt_pending_remove(cmd);
3292 hci_dev_unlock(hdev);
3295 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3298 struct mgmt_cp_set_local_name *cp = data;
3299 struct mgmt_pending_cmd *cmd;
3300 struct hci_request req;
3303 bt_dev_dbg(hdev, "sock %p", sk);
3307 /* If the old values are the same as the new ones just return a
3308 * direct command complete event.
3310 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3311 !memcmp(hdev->short_name, cp->short_name,
3312 sizeof(hdev->short_name))) {
3313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3318 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3320 if (!hdev_is_powered(hdev)) {
3321 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3323 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3328 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3329 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3330 ext_info_changed(hdev, sk);
3335 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3341 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3343 hci_req_init(&req, hdev);
3345 if (lmp_bredr_capable(hdev)) {
3346 __hci_req_update_name(&req);
3347 __hci_req_update_eir(&req);
3350 /* The name is stored in the scan response data and so
3351 * no need to update the advertising data here.
3353 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3354 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3356 err = hci_req_run(&req, set_name_complete);
3358 mgmt_pending_remove(cmd);
3361 hci_dev_unlock(hdev);
3365 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3368 struct mgmt_cp_set_appearance *cp = data;
3372 bt_dev_dbg(hdev, "sock %p", sk);
3374 if (!lmp_le_capable(hdev))
3375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3376 MGMT_STATUS_NOT_SUPPORTED);
3378 appearance = le16_to_cpu(cp->appearance);
3382 if (hdev->appearance != appearance) {
3383 hdev->appearance = appearance;
3385 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3386 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3388 ext_info_changed(hdev, sk);
3391 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3394 hci_dev_unlock(hdev);
3399 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3400 void *data, u16 len)
3402 struct mgmt_rp_get_phy_configuration rp;
3404 bt_dev_dbg(hdev, "sock %p", sk);
3408 memset(&rp, 0, sizeof(rp));
3410 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3411 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3412 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3414 hci_dev_unlock(hdev);
3416 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3420 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3422 struct mgmt_ev_phy_configuration_changed ev;
3424 memset(&ev, 0, sizeof(ev));
3426 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3428 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3432 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3433 u16 opcode, struct sk_buff *skb)
3435 struct mgmt_pending_cmd *cmd;
3437 bt_dev_dbg(hdev, "status 0x%02x", status);
3441 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3446 mgmt_cmd_status(cmd->sk, hdev->id,
3447 MGMT_OP_SET_PHY_CONFIGURATION,
3448 mgmt_status(status));
3450 mgmt_cmd_complete(cmd->sk, hdev->id,
3451 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3454 mgmt_phy_configuration_changed(hdev, cmd->sk);
3457 mgmt_pending_remove(cmd);
3460 hci_dev_unlock(hdev);
3463 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3464 void *data, u16 len)
3466 struct mgmt_cp_set_phy_configuration *cp = data;
3467 struct hci_cp_le_set_default_phy cp_phy;
3468 struct mgmt_pending_cmd *cmd;
3469 struct hci_request req;
3470 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3471 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3472 bool changed = false;
3475 bt_dev_dbg(hdev, "sock %p", sk);
3477 configurable_phys = get_configurable_phys(hdev);
3478 supported_phys = get_supported_phys(hdev);
3479 selected_phys = __le32_to_cpu(cp->selected_phys);
3481 if (selected_phys & ~supported_phys)
3482 return mgmt_cmd_status(sk, hdev->id,
3483 MGMT_OP_SET_PHY_CONFIGURATION,
3484 MGMT_STATUS_INVALID_PARAMS);
3486 unconfigure_phys = supported_phys & ~configurable_phys;
3488 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3489 return mgmt_cmd_status(sk, hdev->id,
3490 MGMT_OP_SET_PHY_CONFIGURATION,
3491 MGMT_STATUS_INVALID_PARAMS);
3493 if (selected_phys == get_selected_phys(hdev))
3494 return mgmt_cmd_complete(sk, hdev->id,
3495 MGMT_OP_SET_PHY_CONFIGURATION,
3500 if (!hdev_is_powered(hdev)) {
3501 err = mgmt_cmd_status(sk, hdev->id,
3502 MGMT_OP_SET_PHY_CONFIGURATION,
3503 MGMT_STATUS_REJECTED);
3507 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3508 err = mgmt_cmd_status(sk, hdev->id,
3509 MGMT_OP_SET_PHY_CONFIGURATION,
3514 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3515 pkt_type |= (HCI_DH3 | HCI_DM3);
3517 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3519 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3520 pkt_type |= (HCI_DH5 | HCI_DM5);
3522 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3524 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3525 pkt_type &= ~HCI_2DH1;
3527 pkt_type |= HCI_2DH1;
3529 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3530 pkt_type &= ~HCI_2DH3;
3532 pkt_type |= HCI_2DH3;
3534 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3535 pkt_type &= ~HCI_2DH5;
3537 pkt_type |= HCI_2DH5;
3539 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3540 pkt_type &= ~HCI_3DH1;
3542 pkt_type |= HCI_3DH1;
3544 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3545 pkt_type &= ~HCI_3DH3;
3547 pkt_type |= HCI_3DH3;
3549 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3550 pkt_type &= ~HCI_3DH5;
3552 pkt_type |= HCI_3DH5;
3554 if (pkt_type != hdev->pkt_type) {
3555 hdev->pkt_type = pkt_type;
3559 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3560 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3562 mgmt_phy_configuration_changed(hdev, sk);
3564 err = mgmt_cmd_complete(sk, hdev->id,
3565 MGMT_OP_SET_PHY_CONFIGURATION,
3571 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3578 hci_req_init(&req, hdev);
3580 memset(&cp_phy, 0, sizeof(cp_phy));
3582 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3583 cp_phy.all_phys |= 0x01;
3585 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3586 cp_phy.all_phys |= 0x02;
3588 if (selected_phys & MGMT_PHY_LE_1M_TX)
3589 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3591 if (selected_phys & MGMT_PHY_LE_2M_TX)
3592 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3594 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3595 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3597 if (selected_phys & MGMT_PHY_LE_1M_RX)
3598 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3600 if (selected_phys & MGMT_PHY_LE_2M_RX)
3601 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3603 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3604 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3606 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3608 err = hci_req_run_skb(&req, set_default_phy_complete);
3610 mgmt_pending_remove(cmd);
3613 hci_dev_unlock(hdev);
3618 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3621 int err = MGMT_STATUS_SUCCESS;
3622 struct mgmt_cp_set_blocked_keys *keys = data;
3623 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3624 sizeof(struct mgmt_blocked_key_info));
3625 u16 key_count, expected_len;
3628 bt_dev_dbg(hdev, "sock %p", sk);
3630 key_count = __le16_to_cpu(keys->key_count);
3631 if (key_count > max_key_count) {
3632 bt_dev_err(hdev, "too big key_count value %u", key_count);
3633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3634 MGMT_STATUS_INVALID_PARAMS);
3637 expected_len = struct_size(keys, keys, key_count);
3638 if (expected_len != len) {
3639 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3642 MGMT_STATUS_INVALID_PARAMS);
3647 hci_blocked_keys_clear(hdev);
3649 for (i = 0; i < keys->key_count; ++i) {
3650 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3653 err = MGMT_STATUS_NO_RESOURCES;
3657 b->type = keys->keys[i].type;
3658 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3659 list_add_rcu(&b->list, &hdev->blocked_keys);
3661 hci_dev_unlock(hdev);
3663 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3667 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3668 void *data, u16 len)
3670 struct mgmt_mode *cp = data;
3672 bool changed = false;
3674 bt_dev_dbg(hdev, "sock %p", sk);
3676 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3677 return mgmt_cmd_status(sk, hdev->id,
3678 MGMT_OP_SET_WIDEBAND_SPEECH,
3679 MGMT_STATUS_NOT_SUPPORTED);
3681 if (cp->val != 0x00 && cp->val != 0x01)
3682 return mgmt_cmd_status(sk, hdev->id,
3683 MGMT_OP_SET_WIDEBAND_SPEECH,
3684 MGMT_STATUS_INVALID_PARAMS);
3688 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3689 err = mgmt_cmd_status(sk, hdev->id,
3690 MGMT_OP_SET_WIDEBAND_SPEECH,
3695 if (hdev_is_powered(hdev) &&
3696 !!cp->val != hci_dev_test_flag(hdev,
3697 HCI_WIDEBAND_SPEECH_ENABLED)) {
3698 err = mgmt_cmd_status(sk, hdev->id,
3699 MGMT_OP_SET_WIDEBAND_SPEECH,
3700 MGMT_STATUS_REJECTED);
3705 changed = !hci_dev_test_and_set_flag(hdev,
3706 HCI_WIDEBAND_SPEECH_ENABLED);
3708 changed = hci_dev_test_and_clear_flag(hdev,
3709 HCI_WIDEBAND_SPEECH_ENABLED);
3711 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3716 err = new_settings(hdev, sk);
3719 hci_dev_unlock(hdev);
3723 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3724 void *data, u16 data_len)
3727 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3730 u8 tx_power_range[2];
3732 bt_dev_dbg(hdev, "sock %p", sk);
3734 memset(&buf, 0, sizeof(buf));
3738 /* When the Read Simple Pairing Options command is supported, then
3739 * the remote public key validation is supported.
3741 * Alternatively, when Microsoft extensions are available, they can
3742 * indicate support for public key validation as well.
3744 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3745 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3747 flags |= 0x02; /* Remote public key validation (LE) */
3749 /* When the Read Encryption Key Size command is supported, then the
3750 * encryption key size is enforced.
3752 if (hdev->commands[20] & 0x10)
3753 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3755 flags |= 0x08; /* Encryption key size enforcement (LE) */
3757 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3760 /* When the Read Simple Pairing Options command is supported, then
3761 * also max encryption key size information is provided.
3763 if (hdev->commands[41] & 0x08)
3764 cap_len = eir_append_le16(rp->cap, cap_len,
3765 MGMT_CAP_MAX_ENC_KEY_SIZE,
3766 hdev->max_enc_key_size);
3768 cap_len = eir_append_le16(rp->cap, cap_len,
3769 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3770 SMP_MAX_ENC_KEY_SIZE);
3772 /* Append the min/max LE tx power parameters if we were able to fetch
3773 * it from the controller
3775 if (hdev->commands[38] & 0x80) {
3776 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3777 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3778 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3782 rp->cap_len = cpu_to_le16(cap_len);
3784 hci_dev_unlock(hdev);
3786 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3787 rp, sizeof(*rp) + cap_len);
3790 #ifdef CONFIG_BT_FEATURE_DEBUG
3791 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3792 static const u8 debug_uuid[16] = {
3793 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3794 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3798 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3799 static const u8 simult_central_periph_uuid[16] = {
3800 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3801 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3804 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3805 static const u8 rpa_resolution_uuid[16] = {
3806 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3807 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3810 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3811 void *data, u16 data_len)
3813 char buf[62]; /* Enough space for 3 features */
3814 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3818 bt_dev_dbg(hdev, "sock %p", sk);
3820 memset(&buf, 0, sizeof(buf));
3822 #ifdef CONFIG_BT_FEATURE_DEBUG
3824 flags = bt_dbg_get() ? BIT(0) : 0;
3826 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3827 rp->features[idx].flags = cpu_to_le32(flags);
3833 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3834 (hdev->le_states[4] & 0x08) && /* Central */
3835 (hdev->le_states[4] & 0x40) && /* Peripheral */
3836 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3841 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3842 rp->features[idx].flags = cpu_to_le32(flags);
3846 if (hdev && use_ll_privacy(hdev)) {
3847 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3848 flags = BIT(0) | BIT(1);
3852 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3853 rp->features[idx].flags = cpu_to_le32(flags);
3857 rp->feature_count = cpu_to_le16(idx);
3859 /* After reading the experimental features information, enable
3860 * the events to update client on any future change.
3862 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3864 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3865 MGMT_OP_READ_EXP_FEATURES_INFO,
3866 0, rp, sizeof(*rp) + (20 * idx));
3869 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3872 struct mgmt_ev_exp_feature_changed ev;
3874 memset(&ev, 0, sizeof(ev));
3875 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3876 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3878 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3880 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3884 #ifdef CONFIG_BT_FEATURE_DEBUG
3885 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3887 struct mgmt_ev_exp_feature_changed ev;
3889 memset(&ev, 0, sizeof(ev));
3890 memcpy(ev.uuid, debug_uuid, 16);
3891 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3893 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3895 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3899 #define EXP_FEAT(_uuid, _set_func) \
3902 .set_func = _set_func, \
3905 /* The zero key uuid is special. Multiple exp features are set through it. */
3906 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
3907 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3909 struct mgmt_rp_set_exp_feature rp;
3911 memset(rp.uuid, 0, 16);
3912 rp.flags = cpu_to_le32(0);
3914 #ifdef CONFIG_BT_FEATURE_DEBUG
3916 bool changed = bt_dbg_get();
3921 exp_debug_feature_changed(false, sk);
3925 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3928 changed = hci_dev_test_and_clear_flag(hdev,
3929 HCI_ENABLE_LL_PRIVACY);
3931 exp_ll_privacy_feature_changed(false, hdev, sk);
3934 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3936 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3937 MGMT_OP_SET_EXP_FEATURE, 0,
3941 #ifdef CONFIG_BT_FEATURE_DEBUG
3942 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
3943 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3945 struct mgmt_rp_set_exp_feature rp;
3950 /* Command requires to use the non-controller index */
3952 return mgmt_cmd_status(sk, hdev->id,
3953 MGMT_OP_SET_EXP_FEATURE,
3954 MGMT_STATUS_INVALID_INDEX);
3956 /* Parameters are limited to a single octet */
3957 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3958 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3959 MGMT_OP_SET_EXP_FEATURE,
3960 MGMT_STATUS_INVALID_PARAMS);
3962 /* Only boolean on/off is supported */
3963 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3964 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3965 MGMT_OP_SET_EXP_FEATURE,
3966 MGMT_STATUS_INVALID_PARAMS);
3968 val = !!cp->param[0];
3969 changed = val ? !bt_dbg_get() : bt_dbg_get();
3972 memcpy(rp.uuid, debug_uuid, 16);
3973 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3975 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3977 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3978 MGMT_OP_SET_EXP_FEATURE, 0,
3982 exp_debug_feature_changed(val, sk);
3988 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
3989 struct mgmt_cp_set_exp_feature *cp,
3992 struct mgmt_rp_set_exp_feature rp;
3997 /* Command requires to use the controller index */
3999 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4000 MGMT_OP_SET_EXP_FEATURE,
4001 MGMT_STATUS_INVALID_INDEX);
4003 /* Changes can only be made when controller is powered down */
4004 if (hdev_is_powered(hdev))
4005 return mgmt_cmd_status(sk, hdev->id,
4006 MGMT_OP_SET_EXP_FEATURE,
4007 MGMT_STATUS_REJECTED);
4009 /* Parameters are limited to a single octet */
4010 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4011 return mgmt_cmd_status(sk, hdev->id,
4012 MGMT_OP_SET_EXP_FEATURE,
4013 MGMT_STATUS_INVALID_PARAMS);
4015 /* Only boolean on/off is supported */
4016 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4017 return mgmt_cmd_status(sk, hdev->id,
4018 MGMT_OP_SET_EXP_FEATURE,
4019 MGMT_STATUS_INVALID_PARAMS);
4021 val = !!cp->param[0];
4024 changed = !hci_dev_test_and_set_flag(hdev,
4025 HCI_ENABLE_LL_PRIVACY);
4026 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4028 /* Enable LL privacy + supported settings changed */
4029 flags = BIT(0) | BIT(1);
4031 changed = hci_dev_test_and_clear_flag(hdev,
4032 HCI_ENABLE_LL_PRIVACY);
4034 /* Disable LL privacy + supported settings changed */
4038 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4039 rp.flags = cpu_to_le32(flags);
4041 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4043 err = mgmt_cmd_complete(sk, hdev->id,
4044 MGMT_OP_SET_EXP_FEATURE, 0,
4048 exp_ll_privacy_feature_changed(val, hdev, sk);
4053 static const struct mgmt_exp_feature {
4055 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4056 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4057 } exp_features[] = {
4058 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4059 #ifdef CONFIG_BT_FEATURE_DEBUG
4060 EXP_FEAT(debug_uuid, set_debug_func),
4062 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4064 /* end with a null feature */
4065 EXP_FEAT(NULL, NULL)
4068 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4069 void *data, u16 data_len)
4071 struct mgmt_cp_set_exp_feature *cp = data;
4074 bt_dev_dbg(hdev, "sock %p", sk);
4076 for (i = 0; exp_features[i].uuid; i++) {
4077 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4078 return exp_features[i].set_func(sk, hdev, cp, data_len);
4081 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4082 MGMT_OP_SET_EXP_FEATURE,
4083 MGMT_STATUS_NOT_SUPPORTED);
4086 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4088 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4091 struct mgmt_cp_get_device_flags *cp = data;
4092 struct mgmt_rp_get_device_flags rp;
4093 struct bdaddr_list_with_flags *br_params;
4094 struct hci_conn_params *params;
4095 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4096 u32 current_flags = 0;
4097 u8 status = MGMT_STATUS_INVALID_PARAMS;
4099 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4100 &cp->addr.bdaddr, cp->addr.type);
4104 memset(&rp, 0, sizeof(rp));
4106 if (cp->addr.type == BDADDR_BREDR) {
4107 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4113 current_flags = br_params->current_flags;
4115 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4116 le_addr_type(cp->addr.type));
4121 current_flags = params->current_flags;
4124 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4125 rp.addr.type = cp->addr.type;
4126 rp.supported_flags = cpu_to_le32(supported_flags);
4127 rp.current_flags = cpu_to_le32(current_flags);
4129 status = MGMT_STATUS_SUCCESS;
4132 hci_dev_unlock(hdev);
4134 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4138 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4139 bdaddr_t *bdaddr, u8 bdaddr_type,
4140 u32 supported_flags, u32 current_flags)
4142 struct mgmt_ev_device_flags_changed ev;
4144 bacpy(&ev.addr.bdaddr, bdaddr);
4145 ev.addr.type = bdaddr_type;
4146 ev.supported_flags = cpu_to_le32(supported_flags);
4147 ev.current_flags = cpu_to_le32(current_flags);
4149 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4152 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4155 struct mgmt_cp_set_device_flags *cp = data;
4156 struct bdaddr_list_with_flags *br_params;
4157 struct hci_conn_params *params;
4158 u8 status = MGMT_STATUS_INVALID_PARAMS;
4159 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4160 u32 current_flags = __le32_to_cpu(cp->current_flags);
4162 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4163 &cp->addr.bdaddr, cp->addr.type,
4164 __le32_to_cpu(current_flags));
4166 if ((supported_flags | current_flags) != supported_flags) {
4167 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4168 current_flags, supported_flags);
4174 if (cp->addr.type == BDADDR_BREDR) {
4175 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4180 br_params->current_flags = current_flags;
4181 status = MGMT_STATUS_SUCCESS;
4183 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4184 &cp->addr.bdaddr, cp->addr.type);
4187 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4188 le_addr_type(cp->addr.type));
4190 params->current_flags = current_flags;
4191 status = MGMT_STATUS_SUCCESS;
4193 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4195 le_addr_type(cp->addr.type));
4200 hci_dev_unlock(hdev);
4202 if (status == MGMT_STATUS_SUCCESS)
4203 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4204 supported_flags, current_flags);
4206 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4207 &cp->addr, sizeof(cp->addr));
4210 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4213 struct mgmt_ev_adv_monitor_added ev;
4215 ev.monitor_handle = cpu_to_le16(handle);
4217 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4220 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4222 struct mgmt_ev_adv_monitor_removed ev;
4223 struct mgmt_pending_cmd *cmd;
4224 struct sock *sk_skip = NULL;
4225 struct mgmt_cp_remove_adv_monitor *cp;
4227 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4231 if (cp->monitor_handle)
4235 ev.monitor_handle = cpu_to_le16(handle);
4237 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4240 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4241 void *data, u16 len)
4243 struct adv_monitor *monitor = NULL;
4244 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4247 __u32 supported = 0;
4249 __u16 num_handles = 0;
4250 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4252 BT_DBG("request for %s", hdev->name);
4256 if (msft_monitor_supported(hdev))
4257 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4259 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4260 handles[num_handles++] = monitor->handle;
4262 hci_dev_unlock(hdev);
4264 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4265 rp = kmalloc(rp_size, GFP_KERNEL);
4269 /* All supported features are currently enabled */
4270 enabled = supported;
4272 rp->supported_features = cpu_to_le32(supported);
4273 rp->enabled_features = cpu_to_le32(enabled);
4274 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4275 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4276 rp->num_handles = cpu_to_le16(num_handles);
4278 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4280 err = mgmt_cmd_complete(sk, hdev->id,
4281 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4282 MGMT_STATUS_SUCCESS, rp, rp_size);
4289 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4291 struct mgmt_rp_add_adv_patterns_monitor rp;
4292 struct mgmt_pending_cmd *cmd;
4293 struct adv_monitor *monitor;
4298 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4300 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4305 monitor = cmd->user_data;
4306 rp.monitor_handle = cpu_to_le16(monitor->handle);
4309 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4310 hdev->adv_monitors_cnt++;
4311 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4312 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4313 hci_update_background_scan(hdev);
4316 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4317 mgmt_status(status), &rp, sizeof(rp));
4318 mgmt_pending_remove(cmd);
4321 hci_dev_unlock(hdev);
4322 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4323 rp.monitor_handle, status);
4328 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4329 struct adv_monitor *m, u8 status,
4330 void *data, u16 len, u16 op)
4332 struct mgmt_rp_add_adv_patterns_monitor rp;
4333 struct mgmt_pending_cmd *cmd;
4342 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4343 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4344 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4345 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4346 status = MGMT_STATUS_BUSY;
4350 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4352 status = MGMT_STATUS_NO_RESOURCES;
4357 pending = hci_add_adv_monitor(hdev, m, &err);
4359 if (err == -ENOSPC || err == -ENOMEM)
4360 status = MGMT_STATUS_NO_RESOURCES;
4361 else if (err == -EINVAL)
4362 status = MGMT_STATUS_INVALID_PARAMS;
4364 status = MGMT_STATUS_FAILED;
4366 mgmt_pending_remove(cmd);
4371 mgmt_pending_remove(cmd);
4372 rp.monitor_handle = cpu_to_le16(m->handle);
4373 mgmt_adv_monitor_added(sk, hdev, m->handle);
4374 m->state = ADV_MONITOR_STATE_REGISTERED;
4375 hdev->adv_monitors_cnt++;
4377 hci_dev_unlock(hdev);
4378 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4382 hci_dev_unlock(hdev);
4387 hci_free_adv_monitor(hdev, m);
4388 hci_dev_unlock(hdev);
4389 return mgmt_cmd_status(sk, hdev->id, op, status);
4392 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4393 struct mgmt_adv_rssi_thresholds *rssi)
4396 m->rssi.low_threshold = rssi->low_threshold;
4397 m->rssi.low_threshold_timeout =
4398 __le16_to_cpu(rssi->low_threshold_timeout);
4399 m->rssi.high_threshold = rssi->high_threshold;
4400 m->rssi.high_threshold_timeout =
4401 __le16_to_cpu(rssi->high_threshold_timeout);
4402 m->rssi.sampling_period = rssi->sampling_period;
4404 /* Default values. These numbers are the least constricting
4405 * parameters for MSFT API to work, so it behaves as if there
4406 * are no rssi parameter to consider. May need to be changed
4407 * if other API are to be supported.
4409 m->rssi.low_threshold = -127;
4410 m->rssi.low_threshold_timeout = 60;
4411 m->rssi.high_threshold = -127;
4412 m->rssi.high_threshold_timeout = 0;
4413 m->rssi.sampling_period = 0;
4417 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4418 struct mgmt_adv_pattern *patterns)
4420 u8 offset = 0, length = 0;
4421 struct adv_pattern *p = NULL;
4424 for (i = 0; i < pattern_count; i++) {
4425 offset = patterns[i].offset;
4426 length = patterns[i].length;
4427 if (offset >= HCI_MAX_AD_LENGTH ||
4428 length > HCI_MAX_AD_LENGTH ||
4429 (offset + length) > HCI_MAX_AD_LENGTH)
4430 return MGMT_STATUS_INVALID_PARAMS;
4432 p = kmalloc(sizeof(*p), GFP_KERNEL);
4434 return MGMT_STATUS_NO_RESOURCES;
4436 p->ad_type = patterns[i].ad_type;
4437 p->offset = patterns[i].offset;
4438 p->length = patterns[i].length;
4439 memcpy(p->value, patterns[i].value, p->length);
4441 INIT_LIST_HEAD(&p->list);
4442 list_add(&p->list, &m->patterns);
4445 return MGMT_STATUS_SUCCESS;
4448 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4449 void *data, u16 len)
4451 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4452 struct adv_monitor *m = NULL;
4453 u8 status = MGMT_STATUS_SUCCESS;
4454 size_t expected_size = sizeof(*cp);
4456 BT_DBG("request for %s", hdev->name);
4458 if (len <= sizeof(*cp)) {
4459 status = MGMT_STATUS_INVALID_PARAMS;
4463 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4464 if (len != expected_size) {
4465 status = MGMT_STATUS_INVALID_PARAMS;
4469 m = kzalloc(sizeof(*m), GFP_KERNEL);
4471 status = MGMT_STATUS_NO_RESOURCES;
4475 INIT_LIST_HEAD(&m->patterns);
4477 parse_adv_monitor_rssi(m, NULL);
4478 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4481 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4482 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4485 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4486 void *data, u16 len)
4488 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4489 struct adv_monitor *m = NULL;
4490 u8 status = MGMT_STATUS_SUCCESS;
4491 size_t expected_size = sizeof(*cp);
4493 BT_DBG("request for %s", hdev->name);
4495 if (len <= sizeof(*cp)) {
4496 status = MGMT_STATUS_INVALID_PARAMS;
4500 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4501 if (len != expected_size) {
4502 status = MGMT_STATUS_INVALID_PARAMS;
4506 m = kzalloc(sizeof(*m), GFP_KERNEL);
4508 status = MGMT_STATUS_NO_RESOURCES;
4512 INIT_LIST_HEAD(&m->patterns);
4514 parse_adv_monitor_rssi(m, &cp->rssi);
4515 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4518 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4519 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4522 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4524 struct mgmt_rp_remove_adv_monitor rp;
4525 struct mgmt_cp_remove_adv_monitor *cp;
4526 struct mgmt_pending_cmd *cmd;
4531 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4536 rp.monitor_handle = cp->monitor_handle;
4539 hci_update_background_scan(hdev);
4541 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4542 mgmt_status(status), &rp, sizeof(rp));
4543 mgmt_pending_remove(cmd);
4546 hci_dev_unlock(hdev);
4547 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4548 rp.monitor_handle, status);
4553 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4554 void *data, u16 len)
4556 struct mgmt_cp_remove_adv_monitor *cp = data;
4557 struct mgmt_rp_remove_adv_monitor rp;
4558 struct mgmt_pending_cmd *cmd;
4559 u16 handle = __le16_to_cpu(cp->monitor_handle);
4563 BT_DBG("request for %s", hdev->name);
4564 rp.monitor_handle = cp->monitor_handle;
4568 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4569 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4570 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4571 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4572 status = MGMT_STATUS_BUSY;
4576 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4578 status = MGMT_STATUS_NO_RESOURCES;
4583 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4585 pending = hci_remove_all_adv_monitor(hdev, &err);
4588 mgmt_pending_remove(cmd);
4591 status = MGMT_STATUS_INVALID_INDEX;
4593 status = MGMT_STATUS_FAILED;
4598 /* monitor can be removed without forwarding request to controller */
4600 mgmt_pending_remove(cmd);
4601 hci_dev_unlock(hdev);
4603 return mgmt_cmd_complete(sk, hdev->id,
4604 MGMT_OP_REMOVE_ADV_MONITOR,
4605 MGMT_STATUS_SUCCESS,
4609 hci_dev_unlock(hdev);
4613 hci_dev_unlock(hdev);
4614 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4618 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4619 u16 opcode, struct sk_buff *skb)
4621 struct mgmt_rp_read_local_oob_data mgmt_rp;
4622 size_t rp_size = sizeof(mgmt_rp);
4623 struct mgmt_pending_cmd *cmd;
4625 bt_dev_dbg(hdev, "status %u", status);
4627 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4631 if (status || !skb) {
4632 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4633 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4637 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4639 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4640 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4642 if (skb->len < sizeof(*rp)) {
4643 mgmt_cmd_status(cmd->sk, hdev->id,
4644 MGMT_OP_READ_LOCAL_OOB_DATA,
4645 MGMT_STATUS_FAILED);
4649 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4650 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4652 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4654 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4656 if (skb->len < sizeof(*rp)) {
4657 mgmt_cmd_status(cmd->sk, hdev->id,
4658 MGMT_OP_READ_LOCAL_OOB_DATA,
4659 MGMT_STATUS_FAILED);
4663 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4664 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4666 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4667 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4670 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4671 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4674 mgmt_pending_remove(cmd);
4677 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4678 void *data, u16 data_len)
4680 struct mgmt_pending_cmd *cmd;
4681 struct hci_request req;
4684 bt_dev_dbg(hdev, "sock %p", sk);
4688 if (!hdev_is_powered(hdev)) {
4689 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4690 MGMT_STATUS_NOT_POWERED);
4694 if (!lmp_ssp_capable(hdev)) {
4695 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4696 MGMT_STATUS_NOT_SUPPORTED);
4700 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4701 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4706 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4712 hci_req_init(&req, hdev);
4714 if (bredr_sc_enabled(hdev))
4715 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4717 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4719 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4721 mgmt_pending_remove(cmd);
4724 hci_dev_unlock(hdev);
4728 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4729 void *data, u16 len)
4731 struct mgmt_addr_info *addr = data;
4734 bt_dev_dbg(hdev, "sock %p", sk);
4736 if (!bdaddr_type_is_valid(addr->type))
4737 return mgmt_cmd_complete(sk, hdev->id,
4738 MGMT_OP_ADD_REMOTE_OOB_DATA,
4739 MGMT_STATUS_INVALID_PARAMS,
4740 addr, sizeof(*addr));
4744 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4745 struct mgmt_cp_add_remote_oob_data *cp = data;
4748 if (cp->addr.type != BDADDR_BREDR) {
4749 err = mgmt_cmd_complete(sk, hdev->id,
4750 MGMT_OP_ADD_REMOTE_OOB_DATA,
4751 MGMT_STATUS_INVALID_PARAMS,
4752 &cp->addr, sizeof(cp->addr));
4756 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4757 cp->addr.type, cp->hash,
4758 cp->rand, NULL, NULL);
4760 status = MGMT_STATUS_FAILED;
4762 status = MGMT_STATUS_SUCCESS;
4764 err = mgmt_cmd_complete(sk, hdev->id,
4765 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4766 &cp->addr, sizeof(cp->addr));
4767 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4768 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4769 u8 *rand192, *hash192, *rand256, *hash256;
4772 if (bdaddr_type_is_le(cp->addr.type)) {
4773 /* Enforce zero-valued 192-bit parameters as
4774 * long as legacy SMP OOB isn't implemented.
4776 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4777 memcmp(cp->hash192, ZERO_KEY, 16)) {
4778 err = mgmt_cmd_complete(sk, hdev->id,
4779 MGMT_OP_ADD_REMOTE_OOB_DATA,
4780 MGMT_STATUS_INVALID_PARAMS,
4781 addr, sizeof(*addr));
4788 /* In case one of the P-192 values is set to zero,
4789 * then just disable OOB data for P-192.
4791 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4792 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4796 rand192 = cp->rand192;
4797 hash192 = cp->hash192;
4801 /* In case one of the P-256 values is set to zero, then just
4802 * disable OOB data for P-256.
4804 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4805 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4809 rand256 = cp->rand256;
4810 hash256 = cp->hash256;
4813 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4814 cp->addr.type, hash192, rand192,
4817 status = MGMT_STATUS_FAILED;
4819 status = MGMT_STATUS_SUCCESS;
4821 err = mgmt_cmd_complete(sk, hdev->id,
4822 MGMT_OP_ADD_REMOTE_OOB_DATA,
4823 status, &cp->addr, sizeof(cp->addr));
4825 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4827 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4828 MGMT_STATUS_INVALID_PARAMS);
4832 hci_dev_unlock(hdev);
4836 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4837 void *data, u16 len)
4839 struct mgmt_cp_remove_remote_oob_data *cp = data;
4843 bt_dev_dbg(hdev, "sock %p", sk);
4845 if (cp->addr.type != BDADDR_BREDR)
4846 return mgmt_cmd_complete(sk, hdev->id,
4847 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4848 MGMT_STATUS_INVALID_PARAMS,
4849 &cp->addr, sizeof(cp->addr));
4853 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4854 hci_remote_oob_data_clear(hdev);
4855 status = MGMT_STATUS_SUCCESS;
4859 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4861 status = MGMT_STATUS_INVALID_PARAMS;
4863 status = MGMT_STATUS_SUCCESS;
4866 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4867 status, &cp->addr, sizeof(cp->addr));
4869 hci_dev_unlock(hdev);
4873 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4875 struct mgmt_pending_cmd *cmd;
4877 bt_dev_dbg(hdev, "status %u", status);
4881 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4883 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4886 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4889 cmd->cmd_complete(cmd, mgmt_status(status));
4890 mgmt_pending_remove(cmd);
4893 hci_dev_unlock(hdev);
4895 /* Handle suspend notifier */
4896 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4897 hdev->suspend_tasks)) {
4898 bt_dev_dbg(hdev, "Unpaused discovery");
4899 wake_up(&hdev->suspend_wait_q);
4903 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4904 uint8_t *mgmt_status)
4907 case DISCOV_TYPE_LE:
4908 *mgmt_status = mgmt_le_support(hdev);
4912 case DISCOV_TYPE_INTERLEAVED:
4913 *mgmt_status = mgmt_le_support(hdev);
4917 case DISCOV_TYPE_BREDR:
4918 *mgmt_status = mgmt_bredr_support(hdev);
4923 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4930 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4931 u16 op, void *data, u16 len)
4933 struct mgmt_cp_start_discovery *cp = data;
4934 struct mgmt_pending_cmd *cmd;
4938 bt_dev_dbg(hdev, "sock %p", sk);
4942 if (!hdev_is_powered(hdev)) {
4943 err = mgmt_cmd_complete(sk, hdev->id, op,
4944 MGMT_STATUS_NOT_POWERED,
4945 &cp->type, sizeof(cp->type));
4949 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4950 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4951 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4952 &cp->type, sizeof(cp->type));
4956 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4957 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4958 &cp->type, sizeof(cp->type));
4962 /* Can't start discovery when it is paused */
4963 if (hdev->discovery_paused) {
4964 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4965 &cp->type, sizeof(cp->type));
4969 /* Clear the discovery filter first to free any previously
4970 * allocated memory for the UUID list.
4972 hci_discovery_filter_clear(hdev);
4974 hdev->discovery.type = cp->type;
4975 hdev->discovery.report_invalid_rssi = false;
4976 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4977 hdev->discovery.limited = true;
4979 hdev->discovery.limited = false;
4981 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4987 cmd->cmd_complete = generic_cmd_complete;
4989 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4990 queue_work(hdev->req_workqueue, &hdev->discov_update);
4994 hci_dev_unlock(hdev);
4998 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4999 void *data, u16 len)
5001 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5005 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5006 void *data, u16 len)
5008 return start_discovery_internal(sk, hdev,
5009 MGMT_OP_START_LIMITED_DISCOVERY,
5013 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
5016 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
5020 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5021 void *data, u16 len)
5023 struct mgmt_cp_start_service_discovery *cp = data;
5024 struct mgmt_pending_cmd *cmd;
5025 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5026 u16 uuid_count, expected_len;
5030 bt_dev_dbg(hdev, "sock %p", sk);
5034 if (!hdev_is_powered(hdev)) {
5035 err = mgmt_cmd_complete(sk, hdev->id,
5036 MGMT_OP_START_SERVICE_DISCOVERY,
5037 MGMT_STATUS_NOT_POWERED,
5038 &cp->type, sizeof(cp->type));
5042 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5043 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5044 err = mgmt_cmd_complete(sk, hdev->id,
5045 MGMT_OP_START_SERVICE_DISCOVERY,
5046 MGMT_STATUS_BUSY, &cp->type,
5051 if (hdev->discovery_paused) {
5052 err = mgmt_cmd_complete(sk, hdev->id,
5053 MGMT_OP_START_SERVICE_DISCOVERY,
5054 MGMT_STATUS_BUSY, &cp->type,
5059 uuid_count = __le16_to_cpu(cp->uuid_count);
5060 if (uuid_count > max_uuid_count) {
5061 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5063 err = mgmt_cmd_complete(sk, hdev->id,
5064 MGMT_OP_START_SERVICE_DISCOVERY,
5065 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5070 expected_len = sizeof(*cp) + uuid_count * 16;
5071 if (expected_len != len) {
5072 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5074 err = mgmt_cmd_complete(sk, hdev->id,
5075 MGMT_OP_START_SERVICE_DISCOVERY,
5076 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5081 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5082 err = mgmt_cmd_complete(sk, hdev->id,
5083 MGMT_OP_START_SERVICE_DISCOVERY,
5084 status, &cp->type, sizeof(cp->type));
5088 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5095 cmd->cmd_complete = service_discovery_cmd_complete;
5097 /* Clear the discovery filter first to free any previously
5098 * allocated memory for the UUID list.
5100 hci_discovery_filter_clear(hdev);
5102 hdev->discovery.result_filtering = true;
5103 hdev->discovery.type = cp->type;
5104 hdev->discovery.rssi = cp->rssi;
5105 hdev->discovery.uuid_count = uuid_count;
5107 if (uuid_count > 0) {
5108 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5110 if (!hdev->discovery.uuids) {
5111 err = mgmt_cmd_complete(sk, hdev->id,
5112 MGMT_OP_START_SERVICE_DISCOVERY,
5114 &cp->type, sizeof(cp->type));
5115 mgmt_pending_remove(cmd);
5120 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5121 queue_work(hdev->req_workqueue, &hdev->discov_update);
5125 hci_dev_unlock(hdev);
5129 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5131 struct mgmt_pending_cmd *cmd;
5133 bt_dev_dbg(hdev, "status %u", status);
5137 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5139 cmd->cmd_complete(cmd, mgmt_status(status));
5140 mgmt_pending_remove(cmd);
5143 hci_dev_unlock(hdev);
5145 /* Handle suspend notifier */
5146 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5147 bt_dev_dbg(hdev, "Paused discovery");
5148 wake_up(&hdev->suspend_wait_q);
5152 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5155 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5156 struct mgmt_pending_cmd *cmd;
5159 bt_dev_dbg(hdev, "sock %p", sk);
5163 if (!hci_discovery_active(hdev)) {
5164 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5165 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5166 sizeof(mgmt_cp->type));
5170 if (hdev->discovery.type != mgmt_cp->type) {
5171 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5172 MGMT_STATUS_INVALID_PARAMS,
5173 &mgmt_cp->type, sizeof(mgmt_cp->type));
5177 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5183 cmd->cmd_complete = generic_cmd_complete;
5185 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5186 queue_work(hdev->req_workqueue, &hdev->discov_update);
5190 hci_dev_unlock(hdev);
5194 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5197 struct mgmt_cp_confirm_name *cp = data;
5198 struct inquiry_entry *e;
5201 bt_dev_dbg(hdev, "sock %p", sk);
5205 if (!hci_discovery_active(hdev)) {
5206 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5207 MGMT_STATUS_FAILED, &cp->addr,
5212 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5214 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5215 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5220 if (cp->name_known) {
5221 e->name_state = NAME_KNOWN;
5224 e->name_state = NAME_NEEDED;
5225 hci_inquiry_cache_update_resolve(hdev, e);
5228 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5229 &cp->addr, sizeof(cp->addr));
5232 hci_dev_unlock(hdev);
5236 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5239 struct mgmt_cp_block_device *cp = data;
5243 bt_dev_dbg(hdev, "sock %p", sk);
5245 if (!bdaddr_type_is_valid(cp->addr.type))
5246 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5247 MGMT_STATUS_INVALID_PARAMS,
5248 &cp->addr, sizeof(cp->addr));
5252 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5255 status = MGMT_STATUS_FAILED;
5259 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5261 status = MGMT_STATUS_SUCCESS;
5264 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5265 &cp->addr, sizeof(cp->addr));
5267 hci_dev_unlock(hdev);
5272 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5275 struct mgmt_cp_unblock_device *cp = data;
5279 bt_dev_dbg(hdev, "sock %p", sk);
5281 if (!bdaddr_type_is_valid(cp->addr.type))
5282 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5283 MGMT_STATUS_INVALID_PARAMS,
5284 &cp->addr, sizeof(cp->addr));
5288 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5291 status = MGMT_STATUS_INVALID_PARAMS;
5295 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5297 status = MGMT_STATUS_SUCCESS;
5300 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5301 &cp->addr, sizeof(cp->addr));
5303 hci_dev_unlock(hdev);
5308 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5311 struct mgmt_cp_set_device_id *cp = data;
5312 struct hci_request req;
5316 bt_dev_dbg(hdev, "sock %p", sk);
5318 source = __le16_to_cpu(cp->source);
5320 if (source > 0x0002)
5321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5322 MGMT_STATUS_INVALID_PARAMS);
5326 hdev->devid_source = source;
5327 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5328 hdev->devid_product = __le16_to_cpu(cp->product);
5329 hdev->devid_version = __le16_to_cpu(cp->version);
5331 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5334 hci_req_init(&req, hdev);
5335 __hci_req_update_eir(&req);
5336 hci_req_run(&req, NULL);
5338 hci_dev_unlock(hdev);
5343 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5346 bt_dev_dbg(hdev, "status %u", status);
5349 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5352 struct cmd_lookup match = { NULL, hdev };
5353 struct hci_request req;
5355 struct adv_info *adv_instance;
5361 u8 mgmt_err = mgmt_status(status);
5363 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5364 cmd_status_rsp, &mgmt_err);
5368 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5369 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5371 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5373 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5376 new_settings(hdev, match.sk);
5381 /* Handle suspend notifier */
5382 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5383 hdev->suspend_tasks)) {
5384 bt_dev_dbg(hdev, "Paused advertising");
5385 wake_up(&hdev->suspend_wait_q);
5386 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5387 hdev->suspend_tasks)) {
5388 bt_dev_dbg(hdev, "Unpaused advertising");
5389 wake_up(&hdev->suspend_wait_q);
5392 /* If "Set Advertising" was just disabled and instance advertising was
5393 * set up earlier, then re-enable multi-instance advertising.
5395 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5396 list_empty(&hdev->adv_instances))
5399 instance = hdev->cur_adv_instance;
5401 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5402 struct adv_info, list);
5406 instance = adv_instance->instance;
5409 hci_req_init(&req, hdev);
5411 err = __hci_req_schedule_adv_instance(&req, instance, true);
5414 err = hci_req_run(&req, enable_advertising_instance);
5417 bt_dev_err(hdev, "failed to re-configure advertising");
5420 hci_dev_unlock(hdev);
5423 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5426 struct mgmt_mode *cp = data;
5427 struct mgmt_pending_cmd *cmd;
5428 struct hci_request req;
5432 bt_dev_dbg(hdev, "sock %p", sk);
5434 status = mgmt_le_support(hdev);
5436 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5439 /* Enabling the experimental LL Privay support disables support for
5442 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5444 MGMT_STATUS_NOT_SUPPORTED);
5446 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5448 MGMT_STATUS_INVALID_PARAMS);
5450 if (hdev->advertising_paused)
5451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5458 /* The following conditions are ones which mean that we should
5459 * not do any HCI communication but directly send a mgmt
5460 * response to user space (after toggling the flag if
5463 if (!hdev_is_powered(hdev) ||
5464 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5465 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5466 hci_conn_num(hdev, LE_LINK) > 0 ||
5467 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5468 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5472 hdev->cur_adv_instance = 0x00;
5473 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5474 if (cp->val == 0x02)
5475 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5477 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5479 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5480 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5483 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5488 err = new_settings(hdev, sk);
5493 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5494 pending_find(MGMT_OP_SET_LE, hdev)) {
5495 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5500 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5506 hci_req_init(&req, hdev);
5508 if (cp->val == 0x02)
5509 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5511 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5513 cancel_adv_timeout(hdev);
5516 /* Switch to instance "0" for the Set Advertising setting.
5517 * We cannot use update_[adv|scan_rsp]_data() here as the
5518 * HCI_ADVERTISING flag is not yet set.
5520 hdev->cur_adv_instance = 0x00;
5522 if (ext_adv_capable(hdev)) {
5523 __hci_req_start_ext_adv(&req, 0x00);
5525 __hci_req_update_adv_data(&req, 0x00);
5526 __hci_req_update_scan_rsp_data(&req, 0x00);
5527 __hci_req_enable_advertising(&req);
5530 __hci_req_disable_advertising(&req);
5533 err = hci_req_run(&req, set_advertising_complete);
5535 mgmt_pending_remove(cmd);
5538 hci_dev_unlock(hdev);
5542 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5543 void *data, u16 len)
5545 struct mgmt_cp_set_static_address *cp = data;
5548 bt_dev_dbg(hdev, "sock %p", sk);
5550 if (!lmp_le_capable(hdev))
5551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5552 MGMT_STATUS_NOT_SUPPORTED);
5554 if (hdev_is_powered(hdev))
5555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5556 MGMT_STATUS_REJECTED);
5558 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5559 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5560 return mgmt_cmd_status(sk, hdev->id,
5561 MGMT_OP_SET_STATIC_ADDRESS,
5562 MGMT_STATUS_INVALID_PARAMS);
5564 /* Two most significant bits shall be set */
5565 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5566 return mgmt_cmd_status(sk, hdev->id,
5567 MGMT_OP_SET_STATIC_ADDRESS,
5568 MGMT_STATUS_INVALID_PARAMS);
5573 bacpy(&hdev->static_addr, &cp->bdaddr);
5575 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5579 err = new_settings(hdev, sk);
5582 hci_dev_unlock(hdev);
5586 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5587 void *data, u16 len)
5589 struct mgmt_cp_set_scan_params *cp = data;
5590 __u16 interval, window;
5593 bt_dev_dbg(hdev, "sock %p", sk);
5595 if (!lmp_le_capable(hdev))
5596 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5597 MGMT_STATUS_NOT_SUPPORTED);
5599 interval = __le16_to_cpu(cp->interval);
5601 if (interval < 0x0004 || interval > 0x4000)
5602 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5603 MGMT_STATUS_INVALID_PARAMS);
5605 window = __le16_to_cpu(cp->window);
5607 if (window < 0x0004 || window > 0x4000)
5608 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5609 MGMT_STATUS_INVALID_PARAMS);
5611 if (window > interval)
5612 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5613 MGMT_STATUS_INVALID_PARAMS);
5617 hdev->le_scan_interval = interval;
5618 hdev->le_scan_window = window;
5620 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5623 /* If background scan is running, restart it so new parameters are
5626 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5627 hdev->discovery.state == DISCOVERY_STOPPED) {
5628 struct hci_request req;
5630 hci_req_init(&req, hdev);
5632 hci_req_add_le_scan_disable(&req, false);
5633 hci_req_add_le_passive_scan(&req);
5635 hci_req_run(&req, NULL);
5638 hci_dev_unlock(hdev);
5643 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5646 struct mgmt_pending_cmd *cmd;
5648 bt_dev_dbg(hdev, "status 0x%02x", status);
5652 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5657 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5658 mgmt_status(status));
5660 struct mgmt_mode *cp = cmd->param;
5663 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5665 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5667 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5668 new_settings(hdev, cmd->sk);
5671 mgmt_pending_remove(cmd);
5674 hci_dev_unlock(hdev);
5677 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5678 void *data, u16 len)
5680 struct mgmt_mode *cp = data;
5681 struct mgmt_pending_cmd *cmd;
5682 struct hci_request req;
5685 bt_dev_dbg(hdev, "sock %p", sk);
5687 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5688 hdev->hci_ver < BLUETOOTH_VER_1_2)
5689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5690 MGMT_STATUS_NOT_SUPPORTED);
5692 if (cp->val != 0x00 && cp->val != 0x01)
5693 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5694 MGMT_STATUS_INVALID_PARAMS);
5698 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5699 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5704 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5705 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5710 if (!hdev_is_powered(hdev)) {
5711 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5712 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5714 new_settings(hdev, sk);
5718 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5725 hci_req_init(&req, hdev);
5727 __hci_req_write_fast_connectable(&req, cp->val);
5729 err = hci_req_run(&req, fast_connectable_complete);
5731 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5732 MGMT_STATUS_FAILED);
5733 mgmt_pending_remove(cmd);
5737 hci_dev_unlock(hdev);
5742 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5744 struct mgmt_pending_cmd *cmd;
5746 bt_dev_dbg(hdev, "status 0x%02x", status);
5750 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5755 u8 mgmt_err = mgmt_status(status);
5757 /* We need to restore the flag if related HCI commands
5760 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5762 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5764 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5765 new_settings(hdev, cmd->sk);
5768 mgmt_pending_remove(cmd);
5771 hci_dev_unlock(hdev);
5774 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5776 struct mgmt_mode *cp = data;
5777 struct mgmt_pending_cmd *cmd;
5778 struct hci_request req;
5781 bt_dev_dbg(hdev, "sock %p", sk);
5783 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5785 MGMT_STATUS_NOT_SUPPORTED);
5787 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5789 MGMT_STATUS_REJECTED);
5791 if (cp->val != 0x00 && cp->val != 0x01)
5792 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5793 MGMT_STATUS_INVALID_PARAMS);
5797 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5798 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5802 if (!hdev_is_powered(hdev)) {
5804 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5805 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5806 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5807 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5808 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5811 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5813 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5817 err = new_settings(hdev, sk);
5821 /* Reject disabling when powered on */
5823 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5824 MGMT_STATUS_REJECTED);
5827 /* When configuring a dual-mode controller to operate
5828 * with LE only and using a static address, then switching
5829 * BR/EDR back on is not allowed.
5831 * Dual-mode controllers shall operate with the public
5832 * address as its identity address for BR/EDR and LE. So
5833 * reject the attempt to create an invalid configuration.
5835 * The same restrictions applies when secure connections
5836 * has been enabled. For BR/EDR this is a controller feature
5837 * while for LE it is a host stack feature. This means that
5838 * switching BR/EDR back on when secure connections has been
5839 * enabled is not a supported transaction.
5841 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5842 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5843 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5844 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5845 MGMT_STATUS_REJECTED);
5850 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5851 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5856 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5862 /* We need to flip the bit already here so that
5863 * hci_req_update_adv_data generates the correct flags.
5865 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5867 hci_req_init(&req, hdev);
5869 __hci_req_write_fast_connectable(&req, false);
5870 __hci_req_update_scan(&req);
5872 /* Since only the advertising data flags will change, there
5873 * is no need to update the scan response data.
5875 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5877 err = hci_req_run(&req, set_bredr_complete);
5879 mgmt_pending_remove(cmd);
5882 hci_dev_unlock(hdev);
5886 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5888 struct mgmt_pending_cmd *cmd;
5889 struct mgmt_mode *cp;
5891 bt_dev_dbg(hdev, "status %u", status);
5895 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5900 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5901 mgmt_status(status));
5909 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5910 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5913 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5914 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5917 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5918 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5922 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5923 new_settings(hdev, cmd->sk);
5926 mgmt_pending_remove(cmd);
5928 hci_dev_unlock(hdev);
5931 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5932 void *data, u16 len)
5934 struct mgmt_mode *cp = data;
5935 struct mgmt_pending_cmd *cmd;
5936 struct hci_request req;
5940 bt_dev_dbg(hdev, "sock %p", sk);
5942 if (!lmp_sc_capable(hdev) &&
5943 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5945 MGMT_STATUS_NOT_SUPPORTED);
5947 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5948 lmp_sc_capable(hdev) &&
5949 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5951 MGMT_STATUS_REJECTED);
5953 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5954 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5955 MGMT_STATUS_INVALID_PARAMS);
5959 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5960 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5964 changed = !hci_dev_test_and_set_flag(hdev,
5966 if (cp->val == 0x02)
5967 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5969 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5971 changed = hci_dev_test_and_clear_flag(hdev,
5973 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5976 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5981 err = new_settings(hdev, sk);
5986 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5987 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5994 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5995 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5996 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6000 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6006 hci_req_init(&req, hdev);
6007 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
6008 err = hci_req_run(&req, sc_enable_complete);
6010 mgmt_pending_remove(cmd);
6015 hci_dev_unlock(hdev);
6019 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6020 void *data, u16 len)
6022 struct mgmt_mode *cp = data;
6023 bool changed, use_changed;
6026 bt_dev_dbg(hdev, "sock %p", sk);
6028 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6030 MGMT_STATUS_INVALID_PARAMS);
6035 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6037 changed = hci_dev_test_and_clear_flag(hdev,
6038 HCI_KEEP_DEBUG_KEYS);
6040 if (cp->val == 0x02)
6041 use_changed = !hci_dev_test_and_set_flag(hdev,
6042 HCI_USE_DEBUG_KEYS);
6044 use_changed = hci_dev_test_and_clear_flag(hdev,
6045 HCI_USE_DEBUG_KEYS);
6047 if (hdev_is_powered(hdev) && use_changed &&
6048 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6049 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6050 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6051 sizeof(mode), &mode);
6054 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6059 err = new_settings(hdev, sk);
6062 hci_dev_unlock(hdev);
6066 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6069 struct mgmt_cp_set_privacy *cp = cp_data;
6073 bt_dev_dbg(hdev, "sock %p", sk);
6075 if (!lmp_le_capable(hdev))
6076 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6077 MGMT_STATUS_NOT_SUPPORTED);
6079 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6080 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6081 MGMT_STATUS_INVALID_PARAMS);
6083 if (hdev_is_powered(hdev))
6084 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6085 MGMT_STATUS_REJECTED);
6089 /* If user space supports this command it is also expected to
6090 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6092 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6095 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6096 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6097 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6098 hci_adv_instances_set_rpa_expired(hdev, true);
6099 if (cp->privacy == 0x02)
6100 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6102 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6104 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6105 memset(hdev->irk, 0, sizeof(hdev->irk));
6106 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6107 hci_adv_instances_set_rpa_expired(hdev, false);
6108 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6111 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6116 err = new_settings(hdev, sk);
6119 hci_dev_unlock(hdev);
6123 static bool irk_is_valid(struct mgmt_irk_info *irk)
6125 switch (irk->addr.type) {
6126 case BDADDR_LE_PUBLIC:
6129 case BDADDR_LE_RANDOM:
6130 /* Two most significant bits shall be set */
6131 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6139 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6142 struct mgmt_cp_load_irks *cp = cp_data;
6143 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6144 sizeof(struct mgmt_irk_info));
6145 u16 irk_count, expected_len;
6148 bt_dev_dbg(hdev, "sock %p", sk);
6150 if (!lmp_le_capable(hdev))
6151 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6152 MGMT_STATUS_NOT_SUPPORTED);
6154 irk_count = __le16_to_cpu(cp->irk_count);
6155 if (irk_count > max_irk_count) {
6156 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6158 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6159 MGMT_STATUS_INVALID_PARAMS);
6162 expected_len = struct_size(cp, irks, irk_count);
6163 if (expected_len != len) {
6164 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6166 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6167 MGMT_STATUS_INVALID_PARAMS);
6170 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6172 for (i = 0; i < irk_count; i++) {
6173 struct mgmt_irk_info *key = &cp->irks[i];
6175 if (!irk_is_valid(key))
6176 return mgmt_cmd_status(sk, hdev->id,
6178 MGMT_STATUS_INVALID_PARAMS);
6183 hci_smp_irks_clear(hdev);
6185 for (i = 0; i < irk_count; i++) {
6186 struct mgmt_irk_info *irk = &cp->irks[i];
6188 if (hci_is_blocked_key(hdev,
6189 HCI_BLOCKED_KEY_TYPE_IRK,
6191 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6196 hci_add_irk(hdev, &irk->addr.bdaddr,
6197 le_addr_type(irk->addr.type), irk->val,
6201 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6203 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6205 hci_dev_unlock(hdev);
6211 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
6212 void *data, u16 len)
6214 struct mgmt_cp_set_advertising_params *cp = data;
6219 BT_DBG("%s", hdev->name);
6221 if (!lmp_le_capable(hdev))
6222 return mgmt_cmd_status(sk, hdev->id,
6223 MGMT_OP_SET_ADVERTISING_PARAMS,
6224 MGMT_STATUS_NOT_SUPPORTED);
6226 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6227 return mgmt_cmd_status(sk, hdev->id,
6228 MGMT_OP_SET_ADVERTISING_PARAMS,
6231 min_interval = __le16_to_cpu(cp->interval_min);
6232 max_interval = __le16_to_cpu(cp->interval_max);
6234 if (min_interval > max_interval ||
6235 min_interval < 0x0020 || max_interval > 0x4000)
6236 return mgmt_cmd_status(sk, hdev->id,
6237 MGMT_OP_SET_ADVERTISING_PARAMS,
6238 MGMT_STATUS_INVALID_PARAMS);
6242 hdev->le_adv_min_interval = min_interval;
6243 hdev->le_adv_max_interval = max_interval;
6244 hdev->adv_filter_policy = cp->filter_policy;
6245 hdev->adv_type = cp->type;
6247 err = mgmt_cmd_complete(sk, hdev->id,
6248 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
6250 hci_dev_unlock(hdev);
6255 static void set_advertising_data_complete(struct hci_dev *hdev,
6256 u8 status, u16 opcode)
6258 struct mgmt_cp_set_advertising_data *cp;
6259 struct mgmt_pending_cmd *cmd;
6261 BT_DBG("status 0x%02x", status);
6265 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
6272 mgmt_cmd_status(cmd->sk, hdev->id,
6273 MGMT_OP_SET_ADVERTISING_DATA,
6274 mgmt_status(status));
6276 mgmt_cmd_complete(cmd->sk, hdev->id,
6277 MGMT_OP_SET_ADVERTISING_DATA, 0,
6280 mgmt_pending_remove(cmd);
6283 hci_dev_unlock(hdev);
6286 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
6287 void *data, u16 len)
6289 struct mgmt_pending_cmd *cmd;
6290 struct hci_request req;
6291 struct mgmt_cp_set_advertising_data *cp = data;
6292 struct hci_cp_le_set_adv_data adv;
6295 BT_DBG("%s", hdev->name);
6297 if (!lmp_le_capable(hdev)) {
6298 return mgmt_cmd_status(sk, hdev->id,
6299 MGMT_OP_SET_ADVERTISING_DATA,
6300 MGMT_STATUS_NOT_SUPPORTED);
6305 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
6306 err = mgmt_cmd_status(sk, hdev->id,
6307 MGMT_OP_SET_ADVERTISING_DATA,
6312 if (len > HCI_MAX_AD_LENGTH) {
6313 err = mgmt_cmd_status(sk, hdev->id,
6314 MGMT_OP_SET_ADVERTISING_DATA,
6315 MGMT_STATUS_INVALID_PARAMS);
6319 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
6326 hci_req_init(&req, hdev);
6328 memset(&adv, 0, sizeof(adv));
6329 memcpy(adv.data, cp->data, len);
6332 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
6334 err = hci_req_run(&req, set_advertising_data_complete);
6336 mgmt_pending_remove(cmd);
6339 hci_dev_unlock(hdev);
6344 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
6347 struct mgmt_cp_set_scan_rsp_data *cp;
6348 struct mgmt_pending_cmd *cmd;
6350 BT_DBG("status 0x%02x", status);
6354 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
6361 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6362 mgmt_status(status));
6364 mgmt_cmd_complete(cmd->sk, hdev->id,
6365 MGMT_OP_SET_SCAN_RSP_DATA, 0,
6368 mgmt_pending_remove(cmd);
6371 hci_dev_unlock(hdev);
6374 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
6377 struct mgmt_pending_cmd *cmd;
6378 struct hci_request req;
6379 struct mgmt_cp_set_scan_rsp_data *cp = data;
6380 struct hci_cp_le_set_scan_rsp_data rsp;
6383 BT_DBG("%s", hdev->name);
6385 if (!lmp_le_capable(hdev))
6386 return mgmt_cmd_status(sk, hdev->id,
6387 MGMT_OP_SET_SCAN_RSP_DATA,
6388 MGMT_STATUS_NOT_SUPPORTED);
6392 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
6393 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6398 if (len > HCI_MAX_AD_LENGTH) {
6399 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6400 MGMT_STATUS_INVALID_PARAMS);
6404 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
6410 hci_req_init(&req, hdev);
6412 memset(&rsp, 0, sizeof(rsp));
6413 memcpy(rsp.data, cp->data, len);
6416 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
6418 err = hci_req_run(&req, set_scan_rsp_data_complete);
6420 mgmt_pending_remove(cmd);
6423 hci_dev_unlock(hdev);
6428 /* Adv White List feature */
6429 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6431 struct mgmt_cp_add_dev_white_list *cp;
6432 struct mgmt_pending_cmd *cmd;
6434 BT_DBG("status 0x%02x", status);
6438 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
6445 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6446 mgmt_status(status));
6448 mgmt_cmd_complete(cmd->sk, hdev->id,
6449 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
6451 mgmt_pending_remove(cmd);
6454 hci_dev_unlock(hdev);
6457 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
6458 void *data, u16 len)
6460 struct mgmt_pending_cmd *cmd;
6461 struct mgmt_cp_add_dev_white_list *cp = data;
6462 struct hci_request req;
6465 BT_DBG("%s", hdev->name);
6467 if (!lmp_le_capable(hdev))
6468 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6469 MGMT_STATUS_NOT_SUPPORTED);
6471 if (!hdev_is_powered(hdev))
6472 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6473 MGMT_STATUS_REJECTED);
6477 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
6478 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6483 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
6489 hci_req_init(&req, hdev);
6491 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
6493 err = hci_req_run(&req, add_white_list_complete);
6495 mgmt_pending_remove(cmd);
6500 hci_dev_unlock(hdev);
6505 static void remove_from_white_list_complete(struct hci_dev *hdev,
6506 u8 status, u16 opcode)
6508 struct mgmt_cp_remove_dev_from_white_list *cp;
6509 struct mgmt_pending_cmd *cmd;
6511 BT_DBG("status 0x%02x", status);
6515 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
6522 mgmt_cmd_status(cmd->sk, hdev->id,
6523 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6524 mgmt_status(status));
6526 mgmt_cmd_complete(cmd->sk, hdev->id,
6527 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
6530 mgmt_pending_remove(cmd);
6533 hci_dev_unlock(hdev);
6536 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
6537 void *data, u16 len)
6539 struct mgmt_pending_cmd *cmd;
6540 struct mgmt_cp_remove_dev_from_white_list *cp = data;
6541 struct hci_request req;
6544 BT_DBG("%s", hdev->name);
6546 if (!lmp_le_capable(hdev))
6547 return mgmt_cmd_status(sk, hdev->id,
6548 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6549 MGMT_STATUS_NOT_SUPPORTED);
6551 if (!hdev_is_powered(hdev))
6552 return mgmt_cmd_status(sk, hdev->id,
6553 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6554 MGMT_STATUS_REJECTED);
6558 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
6559 err = mgmt_cmd_status(sk, hdev->id,
6560 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6565 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6572 hci_req_init(&req, hdev);
6574 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
6576 err = hci_req_run(&req, remove_from_white_list_complete);
6578 mgmt_pending_remove(cmd);
6583 hci_dev_unlock(hdev);
6588 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
6591 struct mgmt_pending_cmd *cmd;
6593 BT_DBG("status 0x%02x", status);
6597 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
6602 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6603 mgmt_status(status));
6605 mgmt_cmd_complete(cmd->sk, hdev->id,
6606 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6609 mgmt_pending_remove(cmd);
6612 hci_dev_unlock(hdev);
6615 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
6616 void *data, u16 len)
6618 struct mgmt_pending_cmd *cmd;
6619 struct hci_request req;
6622 BT_DBG("%s", hdev->name);
6624 if (!lmp_le_capable(hdev))
6625 return mgmt_cmd_status(sk, hdev->id,
6626 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6627 MGMT_STATUS_NOT_SUPPORTED);
6629 if (!hdev_is_powered(hdev))
6630 return mgmt_cmd_status(sk, hdev->id,
6631 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6632 MGMT_STATUS_REJECTED);
6636 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
6637 err = mgmt_cmd_status(sk, hdev->id,
6638 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6643 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6650 hci_req_init(&req, hdev);
6652 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
6654 err = hci_req_run(&req, clear_white_list_complete);
6656 mgmt_pending_remove(cmd);
6661 hci_dev_unlock(hdev);
6666 static void set_rssi_threshold_complete(struct hci_dev *hdev,
6667 u8 status, u16 opcode)
6669 struct mgmt_pending_cmd *cmd;
6671 BT_DBG("status 0x%02x", status);
6675 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6680 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6681 mgmt_status(status));
6683 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6686 mgmt_pending_remove(cmd);
6689 hci_dev_unlock(hdev);
6692 static void set_rssi_disable_complete(struct hci_dev *hdev,
6693 u8 status, u16 opcode)
6695 struct mgmt_pending_cmd *cmd;
6697 BT_DBG("status 0x%02x", status);
6701 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6706 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6707 mgmt_status(status));
6709 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6712 mgmt_pending_remove(cmd);
6715 hci_dev_unlock(hdev);
6718 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
6719 void *data, u16 len)
6722 struct hci_cp_set_rssi_threshold th = { 0, };
6723 struct mgmt_cp_set_enable_rssi *cp = data;
6724 struct hci_conn *conn;
6725 struct mgmt_pending_cmd *cmd;
6726 struct hci_request req;
6731 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6733 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6734 MGMT_STATUS_FAILED);
6738 if (!lmp_le_capable(hdev)) {
6739 mgmt_pending_remove(cmd);
6740 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6741 MGMT_STATUS_NOT_SUPPORTED);
6745 if (!hdev_is_powered(hdev)) {
6746 BT_DBG("%s", hdev->name);
6747 mgmt_pending_remove(cmd);
6748 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6749 MGMT_STATUS_NOT_POWERED);
6753 if (cp->link_type == 0x01)
6754 dest_type = LE_LINK;
6756 dest_type = ACL_LINK;
6758 /* Get LE/ACL link handle info */
6759 conn = hci_conn_hash_lookup_ba(hdev,
6760 dest_type, &cp->bdaddr);
6763 err = mgmt_cmd_complete(sk, hdev->id,
6764 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
6765 mgmt_pending_remove(cmd);
6769 hci_req_init(&req, hdev);
6771 th.hci_le_ext_opcode = 0x0B;
6773 th.conn_handle = conn->handle;
6774 th.alert_mask = 0x07;
6775 th.low_th = cp->low_th;
6776 th.in_range_th = cp->in_range_th;
6777 th.high_th = cp->high_th;
6779 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6780 err = hci_req_run(&req, set_rssi_threshold_complete);
6783 mgmt_pending_remove(cmd);
6784 BT_ERR("Error in requesting hci_req_run");
6789 hci_dev_unlock(hdev);
6793 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
6794 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6796 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
6797 struct mgmt_cp_set_enable_rssi *cp = data;
6798 struct mgmt_pending_cmd *cmd;
6803 mgmt_rp.status = rp->status;
6804 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6805 mgmt_rp.bt_address = cp->bdaddr;
6806 mgmt_rp.link_type = cp->link_type;
6808 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6809 MGMT_STATUS_SUCCESS, &mgmt_rp,
6810 sizeof(struct mgmt_cc_rsp_enable_rssi));
6812 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
6813 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6815 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
6816 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6817 &mgmt_rp.bt_address, true);
6821 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6823 mgmt_pending_remove(cmd);
6825 hci_dev_unlock(hdev);
6828 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
6829 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6831 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
6832 struct mgmt_cp_disable_rssi *cp = data;
6833 struct mgmt_pending_cmd *cmd;
6838 mgmt_rp.status = rp->status;
6839 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6840 mgmt_rp.bt_address = cp->bdaddr;
6841 mgmt_rp.link_type = cp->link_type;
6843 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6844 MGMT_STATUS_SUCCESS, &mgmt_rp,
6845 sizeof(struct mgmt_cc_rsp_enable_rssi));
6847 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
6848 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6850 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6851 &mgmt_rp.bt_address, false);
6855 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6857 mgmt_pending_remove(cmd);
6859 hci_dev_unlock(hdev);
6862 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
6863 void *data, u16 len)
6865 struct mgmt_pending_cmd *cmd;
6866 struct hci_request req;
6867 struct hci_cp_set_enable_rssi cp_en = { 0, };
6870 BT_DBG("Set Disable RSSI.");
6872 cp_en.hci_le_ext_opcode = 0x01;
6873 cp_en.le_enable_cs_Features = 0x00;
6874 cp_en.data[0] = 0x00;
6875 cp_en.data[1] = 0x00;
6876 cp_en.data[2] = 0x00;
6880 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6882 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6883 MGMT_STATUS_FAILED);
6887 if (!lmp_le_capable(hdev)) {
6888 mgmt_pending_remove(cmd);
6889 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6890 MGMT_STATUS_NOT_SUPPORTED);
6894 if (!hdev_is_powered(hdev)) {
6895 BT_DBG("%s", hdev->name);
6896 mgmt_pending_remove(cmd);
6897 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6898 MGMT_STATUS_NOT_POWERED);
6902 hci_req_init(&req, hdev);
6904 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6905 sizeof(struct hci_cp_set_enable_rssi),
6906 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6907 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6909 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6910 err = hci_req_run(&req, set_rssi_disable_complete);
6913 mgmt_pending_remove(cmd);
6914 BT_ERR("Error in requesting hci_req_run");
6919 hci_dev_unlock(hdev);
6923 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
6925 struct hci_cc_rsp_enable_rssi *rp = response;
6926 struct mgmt_pending_cmd *cmd_enable = NULL;
6927 struct mgmt_pending_cmd *cmd_disable = NULL;
6928 struct mgmt_cp_set_enable_rssi *cp_en;
6929 struct mgmt_cp_disable_rssi *cp_dis;
6932 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6933 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6934 hci_dev_unlock(hdev);
6937 BT_DBG("Enable Request");
6940 BT_DBG("Disable Request");
6943 cp_en = cmd_enable->param;
6948 switch (rp->le_ext_opcode) {
6950 BT_DBG("RSSI enabled.. Setting Threshold...");
6951 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
6952 cp_en, sizeof(*cp_en));
6956 BT_DBG("Sending RSSI enable success");
6957 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
6958 cp_en, rp, rp->status);
6962 } else if (cmd_disable) {
6963 cp_dis = cmd_disable->param;
6968 switch (rp->le_ext_opcode) {
6970 BT_DBG("Sending RSSI disable success");
6971 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6972 cp_dis, rp, rp->status);
6977 * Only unset RSSI Threshold values for the Link if
6978 * RSSI is monitored for other BREDR or LE Links
6980 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
6981 BT_DBG("Unset Threshold. Other links being monitored");
6982 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6983 cp_dis, rp, rp->status);
6985 BT_DBG("Unset Threshold. Disabling...");
6986 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
6987 cp_dis, sizeof(*cp_dis));
6994 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
6997 struct mgmt_pending_cmd *cmd;
6999 BT_DBG("status 0x%02x", status);
7003 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7008 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7009 mgmt_status(status));
7011 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7014 mgmt_pending_remove(cmd);
7017 hci_dev_unlock(hdev);
7020 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7021 void *data, u16 len)
7023 struct mgmt_pending_cmd *cmd;
7024 struct hci_request req;
7025 struct mgmt_cp_set_enable_rssi *cp = data;
7026 struct hci_cp_set_enable_rssi cp_en = { 0, };
7029 BT_DBG("Set Enable RSSI.");
7031 cp_en.hci_le_ext_opcode = 0x01;
7032 cp_en.le_enable_cs_Features = 0x04;
7033 cp_en.data[0] = 0x00;
7034 cp_en.data[1] = 0x00;
7035 cp_en.data[2] = 0x00;
7039 if (!lmp_le_capable(hdev)) {
7040 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7041 MGMT_STATUS_NOT_SUPPORTED);
7045 if (!hdev_is_powered(hdev)) {
7046 BT_DBG("%s", hdev->name);
7047 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7048 MGMT_STATUS_NOT_POWERED);
7052 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
7053 BT_DBG("%s", hdev->name);
7054 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7059 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
7062 BT_DBG("%s", hdev->name);
7067 /* If RSSI is already enabled directly set Threshold values */
7068 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
7069 hci_dev_unlock(hdev);
7070 BT_DBG("RSSI Enabled. Directly set Threshold");
7071 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
7075 hci_req_init(&req, hdev);
7077 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7078 sizeof(struct hci_cp_set_enable_rssi),
7079 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7080 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7082 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7083 err = hci_req_run(&req, set_rssi_enable_complete);
7086 mgmt_pending_remove(cmd);
7087 BT_ERR("Error in requesting hci_req_run");
7092 hci_dev_unlock(hdev);
7097 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7099 struct mgmt_pending_cmd *cmd;
7101 BT_DBG("status 0x%02x", status);
7105 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
7109 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7110 MGMT_STATUS_SUCCESS, &status, 1);
7112 mgmt_pending_remove(cmd);
7115 hci_dev_unlock(hdev);
7118 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
7121 struct mgmt_pending_cmd *cmd;
7122 struct hci_request req;
7123 struct mgmt_cp_get_raw_rssi *cp = data;
7124 struct hci_cp_get_raw_rssi hci_cp;
7126 struct hci_conn *conn;
7130 BT_DBG("Get Raw RSSI.");
7134 if (!lmp_le_capable(hdev)) {
7135 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7136 MGMT_STATUS_NOT_SUPPORTED);
7140 if (cp->link_type == 0x01)
7141 dest_type = LE_LINK;
7143 dest_type = ACL_LINK;
7145 /* Get LE/BREDR link handle info */
7146 conn = hci_conn_hash_lookup_ba(hdev,
7147 dest_type, &cp->bt_address);
7149 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7150 MGMT_STATUS_NOT_CONNECTED);
7153 hci_cp.conn_handle = conn->handle;
7155 if (!hdev_is_powered(hdev)) {
7156 BT_DBG("%s", hdev->name);
7157 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7158 MGMT_STATUS_NOT_POWERED);
7162 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
7163 BT_DBG("%s", hdev->name);
7164 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7169 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
7171 BT_DBG("%s", hdev->name);
7176 hci_req_init(&req, hdev);
7178 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
7179 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
7180 err = hci_req_run(&req, get_raw_rssi_complete);
7183 mgmt_pending_remove(cmd);
7184 BT_ERR("Error in requesting hci_req_run");
7188 hci_dev_unlock(hdev);
7193 void mgmt_raw_rssi_response(struct hci_dev *hdev,
7194 struct hci_cc_rp_get_raw_rssi *rp, int success)
7196 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
7197 struct hci_conn *conn;
7199 mgmt_rp.status = rp->status;
7200 mgmt_rp.rssi_dbm = rp->rssi_dbm;
7202 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
7206 bacpy(&mgmt_rp.bt_address, &conn->dst);
7207 if (conn->type == LE_LINK)
7208 mgmt_rp.link_type = 0x01;
7210 mgmt_rp.link_type = 0x00;
7212 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
7213 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
7216 static void set_disable_threshold_complete(struct hci_dev *hdev,
7217 u8 status, u16 opcode)
7219 struct mgmt_pending_cmd *cmd;
7221 BT_DBG("status 0x%02x", status);
7225 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7229 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7230 MGMT_STATUS_SUCCESS, &status, 1);
7232 mgmt_pending_remove(cmd);
7235 hci_dev_unlock(hdev);
7238 /** Removes monitoring for a link*/
7239 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
7240 void *data, u16 len)
7243 struct hci_cp_set_rssi_threshold th = { 0, };
7244 struct mgmt_cp_disable_rssi *cp = data;
7245 struct hci_conn *conn;
7246 struct mgmt_pending_cmd *cmd;
7247 struct hci_request req;
7250 BT_DBG("Set Disable RSSI.");
7254 if (!lmp_le_capable(hdev)) {
7255 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7256 MGMT_STATUS_NOT_SUPPORTED);
7260 /* Get LE/ACL link handle info*/
7261 if (cp->link_type == 0x01)
7262 dest_type = LE_LINK;
7264 dest_type = ACL_LINK;
7266 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
7268 err = mgmt_cmd_complete(sk, hdev->id,
7269 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
7273 th.hci_le_ext_opcode = 0x0B;
7275 th.conn_handle = conn->handle;
7276 th.alert_mask = 0x00;
7278 th.in_range_th = 0x00;
7281 if (!hdev_is_powered(hdev)) {
7282 BT_DBG("%s", hdev->name);
7283 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7288 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
7289 BT_DBG("%s", hdev->name);
7290 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7295 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
7298 BT_DBG("%s", hdev->name);
7303 hci_req_init(&req, hdev);
7305 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7306 err = hci_req_run(&req, set_disable_threshold_complete);
7308 mgmt_pending_remove(cmd);
7309 BT_ERR("Error in requesting hci_req_run");
7314 hci_dev_unlock(hdev);
7319 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
7320 s8 alert_type, s8 rssi_dbm)
7322 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
7323 struct hci_conn *conn;
7325 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
7326 conn_handle, alert_type, rssi_dbm);
7328 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
7331 BT_ERR("RSSI alert Error: Device not found for handle");
7334 bacpy(&mgmt_ev.bdaddr, &conn->dst);
7336 if (conn->type == LE_LINK)
7337 mgmt_ev.link_type = 0x01;
7339 mgmt_ev.link_type = 0x00;
7341 mgmt_ev.alert_type = alert_type;
7342 mgmt_ev.rssi_dbm = rssi_dbm;
7344 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
7345 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
7349 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
7351 struct mgmt_pending_cmd *cmd;
7355 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7357 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7361 type = hdev->le_discovery.type;
7363 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7364 mgmt_status(status), &type, sizeof(type));
7365 mgmt_pending_remove(cmd);
7370 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
7373 unsigned long timeout = 0;
7375 BT_DBG("status %d", status);
7379 mgmt_start_le_discovery_failed(hdev, status);
7380 hci_dev_unlock(hdev);
7385 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
7386 hci_dev_unlock(hdev);
7388 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
7389 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
7394 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
7397 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
7398 void *data, u16 len)
7400 struct mgmt_cp_start_le_discovery *cp = data;
7401 struct mgmt_pending_cmd *cmd;
7402 struct hci_cp_le_set_scan_param param_cp;
7403 struct hci_cp_le_set_scan_enable enable_cp;
7404 struct hci_request req;
7405 u8 status, own_addr_type;
7408 BT_DBG("%s", hdev->name);
7412 if (!hdev_is_powered(hdev)) {
7413 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7414 MGMT_STATUS_NOT_POWERED);
7418 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
7419 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7424 if (cp->type != DISCOV_TYPE_LE) {
7425 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7426 MGMT_STATUS_INVALID_PARAMS);
7430 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
7436 hdev->le_discovery.type = cp->type;
7438 hci_req_init(&req, hdev);
7440 status = mgmt_le_support(hdev);
7442 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7444 mgmt_pending_remove(cmd);
7448 /* If controller is scanning, it means the background scanning
7449 * is running. Thus, we should temporarily stop it in order to
7450 * set the discovery scanning parameters.
7452 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
7453 hci_req_add_le_scan_disable(&req, false);
7455 memset(¶m_cp, 0, sizeof(param_cp));
7457 /* All active scans will be done with either a resolvable
7458 * private address (when privacy feature has been enabled)
7459 * or unresolvable private address.
7461 err = hci_update_random_address(&req, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
7463 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7464 MGMT_STATUS_FAILED);
7465 mgmt_pending_remove(cmd);
7469 param_cp.type = hdev->le_scan_type;
7470 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
7471 param_cp.window = cpu_to_le16(hdev->le_scan_window);
7472 param_cp.own_address_type = own_addr_type;
7473 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
7476 memset(&enable_cp, 0, sizeof(enable_cp));
7477 enable_cp.enable = LE_SCAN_ENABLE;
7478 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
7480 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
7483 err = hci_req_run(&req, start_le_discovery_complete);
7485 mgmt_pending_remove(cmd);
7487 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
7490 hci_dev_unlock(hdev);
7494 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
7496 struct mgmt_pending_cmd *cmd;
7499 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7503 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7504 mgmt_status(status), &hdev->le_discovery.type,
7505 sizeof(hdev->le_discovery.type));
7506 mgmt_pending_remove(cmd);
7511 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
7514 BT_DBG("status %d", status);
7519 mgmt_stop_le_discovery_failed(hdev, status);
7523 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7526 hci_dev_unlock(hdev);
7529 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
7530 void *data, u16 len)
7532 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
7533 struct mgmt_pending_cmd *cmd;
7534 struct hci_request req;
7537 BT_DBG("%s", hdev->name);
7541 if (!hci_le_discovery_active(hdev)) {
7542 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7543 MGMT_STATUS_REJECTED, &mgmt_cp->type,
7544 sizeof(mgmt_cp->type));
7548 if (hdev->le_discovery.type != mgmt_cp->type) {
7549 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7550 MGMT_STATUS_INVALID_PARAMS,
7551 &mgmt_cp->type, sizeof(mgmt_cp->type));
7555 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
7561 hci_req_init(&req, hdev);
7563 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
7564 BT_DBG("unknown le discovery state %u",
7565 hdev->le_discovery.state);
7567 mgmt_pending_remove(cmd);
7568 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7569 MGMT_STATUS_FAILED, &mgmt_cp->type,
7570 sizeof(mgmt_cp->type));
7574 cancel_delayed_work(&hdev->le_scan_disable);
7575 hci_req_add_le_scan_disable(&req, false);
7577 err = hci_req_run(&req, stop_le_discovery_complete);
7579 mgmt_pending_remove(cmd);
7581 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
7584 hci_dev_unlock(hdev);
7588 /* Separate LE discovery */
7589 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
7591 struct mgmt_ev_discovering ev;
7592 struct mgmt_pending_cmd *cmd;
7594 BT_DBG("%s le discovering %u", hdev->name, discovering);
7597 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7599 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7602 u8 type = hdev->le_discovery.type;
7604 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
7606 mgmt_pending_remove(cmd);
7609 memset(&ev, 0, sizeof(ev));
7610 ev.type = hdev->le_discovery.type;
7611 ev.discovering = discovering;
7613 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7616 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
7617 void *data, u16 len)
7621 BT_DBG("%s", hdev->name);
7625 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
7627 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
7629 hci_dev_unlock(hdev);
7634 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
7639 if (min > max || min < 6 || max > 3200)
7642 if (to_multiplier < 10 || to_multiplier > 3200)
7645 if (max >= to_multiplier * 8)
7648 max_latency = (to_multiplier * 8 / max) - 1;
7650 if (latency > 499 || latency > max_latency)
7656 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
7659 struct mgmt_cp_le_conn_update *cp = data;
7661 struct hci_conn *conn;
7662 u16 min, max, latency, supervision_timeout;
7665 if (!hdev_is_powered(hdev))
7666 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7667 MGMT_STATUS_NOT_POWERED);
7669 min = __le16_to_cpu(cp->conn_interval_min);
7670 max = __le16_to_cpu(cp->conn_interval_max);
7671 latency = __le16_to_cpu(cp->conn_latency);
7672 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
7674 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
7675 min, max, latency, supervision_timeout);
7677 err = check_le_conn_update_param(min, max, latency,
7678 supervision_timeout);
7681 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7682 MGMT_STATUS_INVALID_PARAMS);
7686 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
7688 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7689 MGMT_STATUS_NOT_CONNECTED);
7690 hci_dev_unlock(hdev);
7694 hci_dev_unlock(hdev);
7696 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
7698 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
7702 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
7705 struct mgmt_cp_set_manufacturer_data *cp;
7706 struct mgmt_pending_cmd *cmd;
7708 BT_DBG("status 0x%02x", status);
7712 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
7719 mgmt_cmd_status(cmd->sk, hdev->id,
7720 MGMT_OP_SET_MANUFACTURER_DATA,
7721 mgmt_status(status));
7723 mgmt_cmd_complete(cmd->sk, hdev->id,
7724 MGMT_OP_SET_MANUFACTURER_DATA, 0,
7727 mgmt_pending_remove(cmd);
7730 hci_dev_unlock(hdev);
7733 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
7734 void *data, u16 len)
7736 struct mgmt_pending_cmd *cmd;
7737 struct hci_request req;
7738 struct mgmt_cp_set_manufacturer_data *cp = data;
7739 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
7743 BT_DBG("%s", hdev->name);
7745 if (!lmp_bredr_capable(hdev))
7746 return mgmt_cmd_status(sk, hdev->id,
7747 MGMT_OP_SET_MANUFACTURER_DATA,
7748 MGMT_STATUS_NOT_SUPPORTED);
7750 if (cp->data[0] == 0 ||
7751 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
7752 return mgmt_cmd_status(sk, hdev->id,
7753 MGMT_OP_SET_MANUFACTURER_DATA,
7754 MGMT_STATUS_INVALID_PARAMS);
7756 if (cp->data[1] != 0xFF)
7757 return mgmt_cmd_status(sk, hdev->id,
7758 MGMT_OP_SET_MANUFACTURER_DATA,
7759 MGMT_STATUS_NOT_SUPPORTED);
7763 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
7764 err = mgmt_cmd_status(sk, hdev->id,
7765 MGMT_OP_SET_MANUFACTURER_DATA,
7770 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
7777 hci_req_init(&req, hdev);
7779 /* if new data is same as previous data then return command
7782 if (hdev->manufacturer_len == cp->data[0] - 1 &&
7783 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
7784 mgmt_pending_remove(cmd);
7785 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
7786 0, cp, sizeof(*cp));
7791 old_len = hdev->manufacturer_len;
7793 memcpy(old_data, hdev->manufacturer_data, old_len);
7795 hdev->manufacturer_len = cp->data[0] - 1;
7796 if (hdev->manufacturer_len > 0)
7797 memcpy(hdev->manufacturer_data, cp->data + 2,
7798 hdev->manufacturer_len);
7800 __hci_req_update_eir(&req);
7802 err = hci_req_run(&req, set_manufacturer_data_complete);
7804 mgmt_pending_remove(cmd);
7809 hci_dev_unlock(hdev);
7814 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
7815 hdev->manufacturer_len = old_len;
7816 if (hdev->manufacturer_len > 0)
7817 memcpy(hdev->manufacturer_data, old_data,
7818 hdev->manufacturer_len);
7819 hci_dev_unlock(hdev);
7823 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
7824 void *data, u16 len)
7826 struct mgmt_cp_le_set_scan_params *cp = data;
7827 __u16 interval, window;
7830 BT_DBG("%s", hdev->name);
7832 if (!lmp_le_capable(hdev))
7833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7834 MGMT_STATUS_NOT_SUPPORTED);
7836 interval = __le16_to_cpu(cp->interval);
7838 if (interval < 0x0004 || interval > 0x4000)
7839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7840 MGMT_STATUS_INVALID_PARAMS);
7842 window = __le16_to_cpu(cp->window);
7844 if (window < 0x0004 || window > 0x4000)
7845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7846 MGMT_STATUS_INVALID_PARAMS);
7848 if (window > interval)
7849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7850 MGMT_STATUS_INVALID_PARAMS);
7854 hdev->le_scan_type = cp->type;
7855 hdev->le_scan_interval = interval;
7856 hdev->le_scan_window = window;
7858 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
7861 /* If background scan is running, restart it so new parameters are
7864 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
7865 hdev->discovery.state == DISCOVERY_STOPPED) {
7866 struct hci_request req;
7868 hci_req_init(&req, hdev);
7870 hci_req_add_le_scan_disable(&req, false);
7871 hci_req_add_le_passive_scan(&req);
7873 hci_req_run(&req, NULL);
7876 hci_dev_unlock(hdev);
7881 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
7882 void *data, u16 len)
7884 struct mgmt_cp_set_voice_setting *cp = data;
7885 struct hci_conn *conn;
7886 struct hci_conn *sco_conn;
7890 BT_DBG("%s", hdev->name);
7892 if (!lmp_bredr_capable(hdev)) {
7893 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
7894 MGMT_STATUS_NOT_SUPPORTED);
7899 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
7901 err = mgmt_cmd_complete(sk, hdev->id,
7902 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
7906 conn->voice_setting = cp->voice_setting;
7907 conn->sco_role = cp->sco_role;
7909 sco_conn = hci_conn_hash_lookup_sco(hdev);
7910 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
7911 BT_ERR("There is other SCO connection.");
7915 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
7916 if (conn->voice_setting == 0x0063)
7917 sco_connect_set_wbc(hdev);
7919 sco_connect_set_nbc(hdev);
7921 if (conn->voice_setting == 0x0063)
7922 sco_connect_set_gw_wbc(hdev);
7924 sco_connect_set_gw_nbc(hdev);
7928 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
7932 hci_dev_unlock(hdev);
7936 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
7937 void *data, u16 len)
7939 struct mgmt_rp_get_adv_tx_power *rp;
7943 BT_DBG("%s", hdev->name);
7947 rp_len = sizeof(*rp);
7948 rp = kmalloc(rp_len, GFP_KERNEL);
7954 rp->adv_tx_power = hdev->adv_tx_power;
7956 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
7962 hci_dev_unlock(hdev);
7967 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
7969 struct mgmt_ev_hardware_error ev;
7971 ev.error_code = err_code;
7972 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
7975 void mgmt_tx_timeout_error(struct hci_dev *hdev)
7977 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
7980 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
7981 u8 state_change_reason, u16 connection_handle)
7983 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
7985 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
7986 adv_instance, state_change_reason, connection_handle);
7988 mgmt_ev.adv_instance = adv_instance;
7989 mgmt_ev.state_change_reason = state_change_reason;
7990 mgmt_ev.connection_handle = connection_handle;
7992 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
7993 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
7996 #endif /* TIZEN_BT */
7998 static bool ltk_is_valid(struct mgmt_ltk_info *key)
8000 if (key->initiator != 0x00 && key->initiator != 0x01)
8003 switch (key->addr.type) {
8004 case BDADDR_LE_PUBLIC:
8007 case BDADDR_LE_RANDOM:
8008 /* Two most significant bits shall be set */
8009 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
8017 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
8018 void *cp_data, u16 len)
8020 struct mgmt_cp_load_long_term_keys *cp = cp_data;
8021 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
8022 sizeof(struct mgmt_ltk_info));
8023 u16 key_count, expected_len;
8026 bt_dev_dbg(hdev, "sock %p", sk);
8028 if (!lmp_le_capable(hdev))
8029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8030 MGMT_STATUS_NOT_SUPPORTED);
8032 key_count = __le16_to_cpu(cp->key_count);
8033 if (key_count > max_key_count) {
8034 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
8036 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8037 MGMT_STATUS_INVALID_PARAMS);
8040 expected_len = struct_size(cp, keys, key_count);
8041 if (expected_len != len) {
8042 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
8044 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8045 MGMT_STATUS_INVALID_PARAMS);
8048 bt_dev_dbg(hdev, "key_count %u", key_count);
8050 for (i = 0; i < key_count; i++) {
8051 struct mgmt_ltk_info *key = &cp->keys[i];
8053 if (!ltk_is_valid(key))
8054 return mgmt_cmd_status(sk, hdev->id,
8055 MGMT_OP_LOAD_LONG_TERM_KEYS,
8056 MGMT_STATUS_INVALID_PARAMS);
8061 hci_smp_ltks_clear(hdev);
8063 for (i = 0; i < key_count; i++) {
8064 struct mgmt_ltk_info *key = &cp->keys[i];
8065 u8 type, authenticated;
8067 if (hci_is_blocked_key(hdev,
8068 HCI_BLOCKED_KEY_TYPE_LTK,
8070 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
8075 switch (key->type) {
8076 case MGMT_LTK_UNAUTHENTICATED:
8077 authenticated = 0x00;
8078 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8080 case MGMT_LTK_AUTHENTICATED:
8081 authenticated = 0x01;
8082 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8084 case MGMT_LTK_P256_UNAUTH:
8085 authenticated = 0x00;
8086 type = SMP_LTK_P256;
8088 case MGMT_LTK_P256_AUTH:
8089 authenticated = 0x01;
8090 type = SMP_LTK_P256;
8092 case MGMT_LTK_P256_DEBUG:
8093 authenticated = 0x00;
8094 type = SMP_LTK_P256_DEBUG;
8100 hci_add_ltk(hdev, &key->addr.bdaddr,
8101 le_addr_type(key->addr.type), type, authenticated,
8102 key->val, key->enc_size, key->ediv, key->rand);
8105 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
8108 hci_dev_unlock(hdev);
8113 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8115 struct hci_conn *conn = cmd->user_data;
8116 struct mgmt_rp_get_conn_info rp;
8119 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8121 if (status == MGMT_STATUS_SUCCESS) {
8122 rp.rssi = conn->rssi;
8123 rp.tx_power = conn->tx_power;
8124 rp.max_tx_power = conn->max_tx_power;
8126 rp.rssi = HCI_RSSI_INVALID;
8127 rp.tx_power = HCI_TX_POWER_INVALID;
8128 rp.max_tx_power = HCI_TX_POWER_INVALID;
8131 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
8132 status, &rp, sizeof(rp));
8134 hci_conn_drop(conn);
8140 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
8143 struct hci_cp_read_rssi *cp;
8144 struct mgmt_pending_cmd *cmd;
8145 struct hci_conn *conn;
8149 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
8153 /* Commands sent in request are either Read RSSI or Read Transmit Power
8154 * Level so we check which one was last sent to retrieve connection
8155 * handle. Both commands have handle as first parameter so it's safe to
8156 * cast data on the same command struct.
8158 * First command sent is always Read RSSI and we fail only if it fails.
8159 * In other case we simply override error to indicate success as we
8160 * already remembered if TX power value is actually valid.
8162 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
8164 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
8165 status = MGMT_STATUS_SUCCESS;
8167 status = mgmt_status(hci_status);
8171 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
8175 handle = __le16_to_cpu(cp->handle);
8176 conn = hci_conn_hash_lookup_handle(hdev, handle);
8178 bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
8183 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
8187 cmd->cmd_complete(cmd, status);
8188 mgmt_pending_remove(cmd);
8191 hci_dev_unlock(hdev);
8194 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
8197 struct mgmt_cp_get_conn_info *cp = data;
8198 struct mgmt_rp_get_conn_info rp;
8199 struct hci_conn *conn;
8200 unsigned long conn_info_age;
8203 bt_dev_dbg(hdev, "sock %p", sk);
8205 memset(&rp, 0, sizeof(rp));
8206 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8207 rp.addr.type = cp->addr.type;
8209 if (!bdaddr_type_is_valid(cp->addr.type))
8210 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8211 MGMT_STATUS_INVALID_PARAMS,
8216 if (!hdev_is_powered(hdev)) {
8217 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8218 MGMT_STATUS_NOT_POWERED, &rp,
8223 if (cp->addr.type == BDADDR_BREDR)
8224 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8227 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8229 if (!conn || conn->state != BT_CONNECTED) {
8230 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8231 MGMT_STATUS_NOT_CONNECTED, &rp,
8236 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
8237 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8238 MGMT_STATUS_BUSY, &rp, sizeof(rp));
8242 /* To avoid client trying to guess when to poll again for information we
8243 * calculate conn info age as random value between min/max set in hdev.
8245 conn_info_age = hdev->conn_info_min_age +
8246 prandom_u32_max(hdev->conn_info_max_age -
8247 hdev->conn_info_min_age);
8249 /* Query controller to refresh cached values if they are too old or were
8252 if (time_after(jiffies, conn->conn_info_timestamp +
8253 msecs_to_jiffies(conn_info_age)) ||
8254 !conn->conn_info_timestamp) {
8255 struct hci_request req;
8256 struct hci_cp_read_tx_power req_txp_cp;
8257 struct hci_cp_read_rssi req_rssi_cp;
8258 struct mgmt_pending_cmd *cmd;
8260 hci_req_init(&req, hdev);
8261 req_rssi_cp.handle = cpu_to_le16(conn->handle);
8262 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
8265 /* For LE links TX power does not change thus we don't need to
8266 * query for it once value is known.
8268 if (!bdaddr_type_is_le(cp->addr.type) ||
8269 conn->tx_power == HCI_TX_POWER_INVALID) {
8270 req_txp_cp.handle = cpu_to_le16(conn->handle);
8271 req_txp_cp.type = 0x00;
8272 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8273 sizeof(req_txp_cp), &req_txp_cp);
8276 /* Max TX power needs to be read only once per connection */
8277 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
8278 req_txp_cp.handle = cpu_to_le16(conn->handle);
8279 req_txp_cp.type = 0x01;
8280 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8281 sizeof(req_txp_cp), &req_txp_cp);
8284 err = hci_req_run(&req, conn_info_refresh_complete);
8288 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
8295 hci_conn_hold(conn);
8296 cmd->user_data = hci_conn_get(conn);
8297 cmd->cmd_complete = conn_info_cmd_complete;
8299 conn->conn_info_timestamp = jiffies;
8301 /* Cache is valid, just reply with values cached in hci_conn */
8302 rp.rssi = conn->rssi;
8303 rp.tx_power = conn->tx_power;
8304 rp.max_tx_power = conn->max_tx_power;
8306 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8307 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8311 hci_dev_unlock(hdev);
8315 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8317 struct hci_conn *conn = cmd->user_data;
8318 struct mgmt_rp_get_clock_info rp;
8319 struct hci_dev *hdev;
8322 memset(&rp, 0, sizeof(rp));
8323 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8328 hdev = hci_dev_get(cmd->index);
8330 rp.local_clock = cpu_to_le32(hdev->clock);
8335 rp.piconet_clock = cpu_to_le32(conn->clock);
8336 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
8340 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
8344 hci_conn_drop(conn);
8351 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8353 struct hci_cp_read_clock *hci_cp;
8354 struct mgmt_pending_cmd *cmd;
8355 struct hci_conn *conn;
8357 bt_dev_dbg(hdev, "status %u", status);
8361 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
8365 if (hci_cp->which) {
8366 u16 handle = __le16_to_cpu(hci_cp->handle);
8367 conn = hci_conn_hash_lookup_handle(hdev, handle);
8372 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
8376 cmd->cmd_complete(cmd, mgmt_status(status));
8377 mgmt_pending_remove(cmd);
8380 hci_dev_unlock(hdev);
8383 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
8386 struct mgmt_cp_get_clock_info *cp = data;
8387 struct mgmt_rp_get_clock_info rp;
8388 struct hci_cp_read_clock hci_cp;
8389 struct mgmt_pending_cmd *cmd;
8390 struct hci_request req;
8391 struct hci_conn *conn;
8394 bt_dev_dbg(hdev, "sock %p", sk);
8396 memset(&rp, 0, sizeof(rp));
8397 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8398 rp.addr.type = cp->addr.type;
8400 if (cp->addr.type != BDADDR_BREDR)
8401 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8402 MGMT_STATUS_INVALID_PARAMS,
8407 if (!hdev_is_powered(hdev)) {
8408 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8409 MGMT_STATUS_NOT_POWERED, &rp,
8414 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8415 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8417 if (!conn || conn->state != BT_CONNECTED) {
8418 err = mgmt_cmd_complete(sk, hdev->id,
8419 MGMT_OP_GET_CLOCK_INFO,
8420 MGMT_STATUS_NOT_CONNECTED,
8428 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
8434 cmd->cmd_complete = clock_info_cmd_complete;
8436 hci_req_init(&req, hdev);
8438 memset(&hci_cp, 0, sizeof(hci_cp));
8439 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8442 hci_conn_hold(conn);
8443 cmd->user_data = hci_conn_get(conn);
8445 hci_cp.handle = cpu_to_le16(conn->handle);
8446 hci_cp.which = 0x01; /* Piconet clock */
8447 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8450 err = hci_req_run(&req, get_clock_info_complete);
8452 mgmt_pending_remove(cmd);
8455 hci_dev_unlock(hdev);
8459 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
8461 struct hci_conn *conn;
8463 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
8467 if (conn->dst_type != type)
8470 if (conn->state != BT_CONNECTED)
8476 /* This function requires the caller holds hdev->lock */
8477 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
8478 u8 addr_type, u8 auto_connect)
8480 struct hci_conn_params *params;
8482 params = hci_conn_params_add(hdev, addr, addr_type);
8486 if (params->auto_connect == auto_connect)
8489 list_del_init(¶ms->action);
8491 switch (auto_connect) {
8492 case HCI_AUTO_CONN_DISABLED:
8493 case HCI_AUTO_CONN_LINK_LOSS:
8494 /* If auto connect is being disabled when we're trying to
8495 * connect to device, keep connecting.
8497 if (params->explicit_connect)
8498 list_add(¶ms->action, &hdev->pend_le_conns);
8500 case HCI_AUTO_CONN_REPORT:
8501 if (params->explicit_connect)
8502 list_add(¶ms->action, &hdev->pend_le_conns);
8504 list_add(¶ms->action, &hdev->pend_le_reports);
8506 case HCI_AUTO_CONN_DIRECT:
8507 case HCI_AUTO_CONN_ALWAYS:
8508 if (!is_connected(hdev, addr, addr_type))
8509 list_add(¶ms->action, &hdev->pend_le_conns);
8513 params->auto_connect = auto_connect;
8515 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
8516 addr, addr_type, auto_connect);
8521 static void device_added(struct sock *sk, struct hci_dev *hdev,
8522 bdaddr_t *bdaddr, u8 type, u8 action)
8524 struct mgmt_ev_device_added ev;
8526 bacpy(&ev.addr.bdaddr, bdaddr);
8527 ev.addr.type = type;
8530 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
8533 static int add_device(struct sock *sk, struct hci_dev *hdev,
8534 void *data, u16 len)
8536 struct mgmt_cp_add_device *cp = data;
8537 u8 auto_conn, addr_type;
8538 struct hci_conn_params *params;
8540 u32 current_flags = 0;
8542 bt_dev_dbg(hdev, "sock %p", sk);
8544 if (!bdaddr_type_is_valid(cp->addr.type) ||
8545 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
8546 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8547 MGMT_STATUS_INVALID_PARAMS,
8548 &cp->addr, sizeof(cp->addr));
8550 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
8551 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8552 MGMT_STATUS_INVALID_PARAMS,
8553 &cp->addr, sizeof(cp->addr));
8557 if (cp->addr.type == BDADDR_BREDR) {
8558 /* Only incoming connections action is supported for now */
8559 if (cp->action != 0x01) {
8560 err = mgmt_cmd_complete(sk, hdev->id,
8562 MGMT_STATUS_INVALID_PARAMS,
8563 &cp->addr, sizeof(cp->addr));
8567 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
8573 hci_req_update_scan(hdev);
8578 addr_type = le_addr_type(cp->addr.type);
8580 if (cp->action == 0x02)
8581 auto_conn = HCI_AUTO_CONN_ALWAYS;
8582 else if (cp->action == 0x01)
8583 auto_conn = HCI_AUTO_CONN_DIRECT;
8585 auto_conn = HCI_AUTO_CONN_REPORT;
8587 /* Kernel internally uses conn_params with resolvable private
8588 * address, but Add Device allows only identity addresses.
8589 * Make sure it is enforced before calling
8590 * hci_conn_params_lookup.
8592 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8593 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8594 MGMT_STATUS_INVALID_PARAMS,
8595 &cp->addr, sizeof(cp->addr));
8599 /* If the connection parameters don't exist for this device,
8600 * they will be created and configured with defaults.
8602 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
8604 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8605 MGMT_STATUS_FAILED, &cp->addr,
8609 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8612 current_flags = params->current_flags;
8615 hci_update_background_scan(hdev);
8618 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
8619 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
8620 SUPPORTED_DEVICE_FLAGS(), current_flags);
8622 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8623 MGMT_STATUS_SUCCESS, &cp->addr,
8627 hci_dev_unlock(hdev);
8631 static void device_removed(struct sock *sk, struct hci_dev *hdev,
8632 bdaddr_t *bdaddr, u8 type)
8634 struct mgmt_ev_device_removed ev;
8636 bacpy(&ev.addr.bdaddr, bdaddr);
8637 ev.addr.type = type;
8639 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
8642 static int remove_device(struct sock *sk, struct hci_dev *hdev,
8643 void *data, u16 len)
8645 struct mgmt_cp_remove_device *cp = data;
8648 bt_dev_dbg(hdev, "sock %p", sk);
8652 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8653 struct hci_conn_params *params;
8656 if (!bdaddr_type_is_valid(cp->addr.type)) {
8657 err = mgmt_cmd_complete(sk, hdev->id,
8658 MGMT_OP_REMOVE_DEVICE,
8659 MGMT_STATUS_INVALID_PARAMS,
8660 &cp->addr, sizeof(cp->addr));
8664 if (cp->addr.type == BDADDR_BREDR) {
8665 err = hci_bdaddr_list_del(&hdev->accept_list,
8669 err = mgmt_cmd_complete(sk, hdev->id,
8670 MGMT_OP_REMOVE_DEVICE,
8671 MGMT_STATUS_INVALID_PARAMS,
8677 hci_req_update_scan(hdev);
8679 device_removed(sk, hdev, &cp->addr.bdaddr,
8684 addr_type = le_addr_type(cp->addr.type);
8686 /* Kernel internally uses conn_params with resolvable private
8687 * address, but Remove Device allows only identity addresses.
8688 * Make sure it is enforced before calling
8689 * hci_conn_params_lookup.
8691 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8692 err = mgmt_cmd_complete(sk, hdev->id,
8693 MGMT_OP_REMOVE_DEVICE,
8694 MGMT_STATUS_INVALID_PARAMS,
8695 &cp->addr, sizeof(cp->addr));
8699 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8702 err = mgmt_cmd_complete(sk, hdev->id,
8703 MGMT_OP_REMOVE_DEVICE,
8704 MGMT_STATUS_INVALID_PARAMS,
8705 &cp->addr, sizeof(cp->addr));
8709 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
8710 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
8711 err = mgmt_cmd_complete(sk, hdev->id,
8712 MGMT_OP_REMOVE_DEVICE,
8713 MGMT_STATUS_INVALID_PARAMS,
8714 &cp->addr, sizeof(cp->addr));
8718 list_del(¶ms->action);
8719 list_del(¶ms->list);
8721 hci_update_background_scan(hdev);
8723 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
8725 struct hci_conn_params *p, *tmp;
8726 struct bdaddr_list *b, *btmp;
8728 if (cp->addr.type) {
8729 err = mgmt_cmd_complete(sk, hdev->id,
8730 MGMT_OP_REMOVE_DEVICE,
8731 MGMT_STATUS_INVALID_PARAMS,
8732 &cp->addr, sizeof(cp->addr));
8736 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
8737 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
8742 hci_req_update_scan(hdev);
8744 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
8745 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
8747 device_removed(sk, hdev, &p->addr, p->addr_type);
8748 if (p->explicit_connect) {
8749 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
8752 list_del(&p->action);
8757 bt_dev_dbg(hdev, "All LE connection parameters were removed");
8759 hci_update_background_scan(hdev);
8763 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
8764 MGMT_STATUS_SUCCESS, &cp->addr,
8767 hci_dev_unlock(hdev);
8771 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
8774 struct mgmt_cp_load_conn_param *cp = data;
8775 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
8776 sizeof(struct mgmt_conn_param));
8777 u16 param_count, expected_len;
8780 if (!lmp_le_capable(hdev))
8781 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8782 MGMT_STATUS_NOT_SUPPORTED);
8784 param_count = __le16_to_cpu(cp->param_count);
8785 if (param_count > max_param_count) {
8786 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
8788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8789 MGMT_STATUS_INVALID_PARAMS);
8792 expected_len = struct_size(cp, params, param_count);
8793 if (expected_len != len) {
8794 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
8796 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8797 MGMT_STATUS_INVALID_PARAMS);
8800 bt_dev_dbg(hdev, "param_count %u", param_count);
8804 hci_conn_params_clear_disabled(hdev);
8806 for (i = 0; i < param_count; i++) {
8807 struct mgmt_conn_param *param = &cp->params[i];
8808 struct hci_conn_params *hci_param;
8809 u16 min, max, latency, timeout;
8812 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
8815 if (param->addr.type == BDADDR_LE_PUBLIC) {
8816 addr_type = ADDR_LE_DEV_PUBLIC;
8817 } else if (param->addr.type == BDADDR_LE_RANDOM) {
8818 addr_type = ADDR_LE_DEV_RANDOM;
8820 bt_dev_err(hdev, "ignoring invalid connection parameters");
8824 min = le16_to_cpu(param->min_interval);
8825 max = le16_to_cpu(param->max_interval);
8826 latency = le16_to_cpu(param->latency);
8827 timeout = le16_to_cpu(param->timeout);
8829 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
8830 min, max, latency, timeout);
8832 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8833 bt_dev_err(hdev, "ignoring invalid connection parameters");
8837 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
8840 bt_dev_err(hdev, "failed to add connection parameters");
8844 hci_param->conn_min_interval = min;
8845 hci_param->conn_max_interval = max;
8846 hci_param->conn_latency = latency;
8847 hci_param->supervision_timeout = timeout;
8850 hci_dev_unlock(hdev);
8852 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8856 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8857 void *data, u16 len)
8859 struct mgmt_cp_set_external_config *cp = data;
8863 bt_dev_dbg(hdev, "sock %p", sk);
8865 if (hdev_is_powered(hdev))
8866 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8867 MGMT_STATUS_REJECTED);
8869 if (cp->config != 0x00 && cp->config != 0x01)
8870 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8871 MGMT_STATUS_INVALID_PARAMS);
8873 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
8874 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8875 MGMT_STATUS_NOT_SUPPORTED);
8880 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8882 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8884 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8891 err = new_options(hdev, sk);
8893 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8894 mgmt_index_removed(hdev);
8896 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8897 hci_dev_set_flag(hdev, HCI_CONFIG);
8898 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8900 queue_work(hdev->req_workqueue, &hdev->power_on);
8902 set_bit(HCI_RAW, &hdev->flags);
8903 mgmt_index_added(hdev);
8908 hci_dev_unlock(hdev);
8912 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8913 void *data, u16 len)
8915 struct mgmt_cp_set_public_address *cp = data;
8919 bt_dev_dbg(hdev, "sock %p", sk);
8921 if (hdev_is_powered(hdev))
8922 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8923 MGMT_STATUS_REJECTED);
8925 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8926 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8927 MGMT_STATUS_INVALID_PARAMS);
8929 if (!hdev->set_bdaddr)
8930 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8931 MGMT_STATUS_NOT_SUPPORTED);
8935 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8936 bacpy(&hdev->public_addr, &cp->bdaddr);
8938 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8945 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8946 err = new_options(hdev, sk);
8948 if (is_configured(hdev)) {
8949 mgmt_index_removed(hdev);
8951 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8953 hci_dev_set_flag(hdev, HCI_CONFIG);
8954 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8956 queue_work(hdev->req_workqueue, &hdev->power_on);
8960 hci_dev_unlock(hdev);
8965 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
8969 struct mgmt_ev_device_name_update *ev = (void *)buf;
8975 bacpy(&ev->addr.bdaddr, bdaddr);
8976 ev->addr.type = BDADDR_BREDR;
8978 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8981 ev->eir_len = cpu_to_le16(eir_len);
8983 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
8984 sizeof(*ev) + eir_len, NULL);
8987 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8988 u8 link_type, u8 addr_type, u8 status)
8990 struct mgmt_ev_conn_update_failed ev;
8992 bacpy(&ev.addr.bdaddr, bdaddr);
8993 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8996 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
8997 &ev, sizeof(ev), NULL);
9000 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
9001 u8 link_type, u8 addr_type, u16 conn_interval,
9002 u16 conn_latency, u16 supervision_timeout)
9004 struct mgmt_ev_conn_updated ev;
9006 bacpy(&ev.addr.bdaddr, bdaddr);
9007 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9008 ev.conn_interval = cpu_to_le16(conn_interval);
9009 ev.conn_latency = cpu_to_le16(conn_latency);
9010 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
9012 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
9013 &ev, sizeof(ev), NULL);
9016 /* le device found event - Pass adv type */
9017 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9018 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
9019 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
9022 struct mgmt_ev_le_device_found *ev = (void *)buf;
9025 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
9028 /* Make sure that the buffer is big enough. The 5 extra bytes
9029 * are for the potential CoD field.
9031 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9034 memset(buf, 0, sizeof(buf));
9036 bacpy(&ev->addr.bdaddr, bdaddr);
9037 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9039 ev->flags = cpu_to_le32(flags);
9040 ev->adv_type = adv_type;
9043 memcpy(ev->eir, eir, eir_len);
9045 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
9046 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9049 if (scan_rsp_len > 0)
9050 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9052 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9053 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9055 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9059 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
9060 u16 opcode, struct sk_buff *skb)
9062 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
9063 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
9064 u8 *h192, *r192, *h256, *r256;
9065 struct mgmt_pending_cmd *cmd;
9069 bt_dev_dbg(hdev, "status %u", status);
9071 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
9075 mgmt_cp = cmd->param;
9078 status = mgmt_status(status);
9085 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
9086 struct hci_rp_read_local_oob_data *rp;
9088 if (skb->len != sizeof(*rp)) {
9089 status = MGMT_STATUS_FAILED;
9092 status = MGMT_STATUS_SUCCESS;
9093 rp = (void *)skb->data;
9095 eir_len = 5 + 18 + 18;
9102 struct hci_rp_read_local_oob_ext_data *rp;
9104 if (skb->len != sizeof(*rp)) {
9105 status = MGMT_STATUS_FAILED;
9108 status = MGMT_STATUS_SUCCESS;
9109 rp = (void *)skb->data;
9111 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
9112 eir_len = 5 + 18 + 18;
9116 eir_len = 5 + 18 + 18 + 18 + 18;
9126 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
9133 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
9134 hdev->dev_class, 3);
9137 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9138 EIR_SSP_HASH_C192, h192, 16);
9139 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9140 EIR_SSP_RAND_R192, r192, 16);
9144 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9145 EIR_SSP_HASH_C256, h256, 16);
9146 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9147 EIR_SSP_RAND_R256, r256, 16);
9151 mgmt_rp->type = mgmt_cp->type;
9152 mgmt_rp->eir_len = cpu_to_le16(eir_len);
9154 err = mgmt_cmd_complete(cmd->sk, hdev->id,
9155 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
9156 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
9157 if (err < 0 || status)
9160 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
9162 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9163 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
9164 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
9167 mgmt_pending_remove(cmd);
9170 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
9171 struct mgmt_cp_read_local_oob_ext_data *cp)
9173 struct mgmt_pending_cmd *cmd;
9174 struct hci_request req;
9177 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
9182 hci_req_init(&req, hdev);
9184 if (bredr_sc_enabled(hdev))
9185 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
9187 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
9189 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
9191 mgmt_pending_remove(cmd);
9198 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
9199 void *data, u16 data_len)
9201 struct mgmt_cp_read_local_oob_ext_data *cp = data;
9202 struct mgmt_rp_read_local_oob_ext_data *rp;
9205 u8 status, flags, role, addr[7], hash[16], rand[16];
9208 bt_dev_dbg(hdev, "sock %p", sk);
9210 if (hdev_is_powered(hdev)) {
9212 case BIT(BDADDR_BREDR):
9213 status = mgmt_bredr_support(hdev);
9219 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9220 status = mgmt_le_support(hdev);
9224 eir_len = 9 + 3 + 18 + 18 + 3;
9227 status = MGMT_STATUS_INVALID_PARAMS;
9232 status = MGMT_STATUS_NOT_POWERED;
9236 rp_len = sizeof(*rp) + eir_len;
9237 rp = kmalloc(rp_len, GFP_ATOMIC);
9248 case BIT(BDADDR_BREDR):
9249 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9250 err = read_local_ssp_oob_req(hdev, sk, cp);
9251 hci_dev_unlock(hdev);
9255 status = MGMT_STATUS_FAILED;
9258 eir_len = eir_append_data(rp->eir, eir_len,
9260 hdev->dev_class, 3);
9263 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9264 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
9265 smp_generate_oob(hdev, hash, rand) < 0) {
9266 hci_dev_unlock(hdev);
9267 status = MGMT_STATUS_FAILED;
9271 /* This should return the active RPA, but since the RPA
9272 * is only programmed on demand, it is really hard to fill
9273 * this in at the moment. For now disallow retrieving
9274 * local out-of-band data when privacy is in use.
9276 * Returning the identity address will not help here since
9277 * pairing happens before the identity resolving key is
9278 * known and thus the connection establishment happens
9279 * based on the RPA and not the identity address.
9281 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
9282 hci_dev_unlock(hdev);
9283 status = MGMT_STATUS_REJECTED;
9287 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
9288 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
9289 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
9290 bacmp(&hdev->static_addr, BDADDR_ANY))) {
9291 memcpy(addr, &hdev->static_addr, 6);
9294 memcpy(addr, &hdev->bdaddr, 6);
9298 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
9299 addr, sizeof(addr));
9301 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
9306 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
9307 &role, sizeof(role));
9309 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
9310 eir_len = eir_append_data(rp->eir, eir_len,
9312 hash, sizeof(hash));
9314 eir_len = eir_append_data(rp->eir, eir_len,
9316 rand, sizeof(rand));
9319 flags = mgmt_get_adv_discov_flags(hdev);
9321 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
9322 flags |= LE_AD_NO_BREDR;
9324 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
9325 &flags, sizeof(flags));
9329 hci_dev_unlock(hdev);
9331 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
9333 status = MGMT_STATUS_SUCCESS;
9336 rp->type = cp->type;
9337 rp->eir_len = cpu_to_le16(eir_len);
9339 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
9340 status, rp, sizeof(*rp) + eir_len);
9341 if (err < 0 || status)
9344 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9345 rp, sizeof(*rp) + eir_len,
9346 HCI_MGMT_OOB_DATA_EVENTS, sk);
9354 static u32 get_supported_adv_flags(struct hci_dev *hdev)
9358 flags |= MGMT_ADV_FLAG_CONNECTABLE;
9359 flags |= MGMT_ADV_FLAG_DISCOV;
9360 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
9361 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
9362 flags |= MGMT_ADV_FLAG_APPEARANCE;
9363 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
9364 flags |= MGMT_ADV_PARAM_DURATION;
9365 flags |= MGMT_ADV_PARAM_TIMEOUT;
9366 flags |= MGMT_ADV_PARAM_INTERVALS;
9367 flags |= MGMT_ADV_PARAM_TX_POWER;
9368 flags |= MGMT_ADV_PARAM_SCAN_RSP;
9370 /* In extended adv TX_POWER returned from Set Adv Param
9371 * will be always valid.
9373 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
9374 ext_adv_capable(hdev))
9375 flags |= MGMT_ADV_FLAG_TX_POWER;
9377 if (ext_adv_capable(hdev)) {
9378 flags |= MGMT_ADV_FLAG_SEC_1M;
9379 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
9380 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
9382 if (hdev->le_features[1] & HCI_LE_PHY_2M)
9383 flags |= MGMT_ADV_FLAG_SEC_2M;
9385 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
9386 flags |= MGMT_ADV_FLAG_SEC_CODED;
9392 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
9393 void *data, u16 data_len)
9395 struct mgmt_rp_read_adv_features *rp;
9398 struct adv_info *adv_instance;
9399 u32 supported_flags;
9402 bt_dev_dbg(hdev, "sock %p", sk);
9404 if (!lmp_le_capable(hdev))
9405 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9406 MGMT_STATUS_REJECTED);
9408 /* Enabling the experimental LL Privay support disables support for
9411 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9412 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9413 MGMT_STATUS_NOT_SUPPORTED);
9417 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
9418 rp = kmalloc(rp_len, GFP_ATOMIC);
9420 hci_dev_unlock(hdev);
9424 supported_flags = get_supported_adv_flags(hdev);
9426 rp->supported_flags = cpu_to_le32(supported_flags);
9427 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
9428 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
9429 rp->max_instances = hdev->le_num_of_adv_sets;
9430 rp->num_instances = hdev->adv_instance_cnt;
9432 instance = rp->instance;
9433 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
9434 *instance = adv_instance->instance;
9438 hci_dev_unlock(hdev);
9440 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9441 MGMT_STATUS_SUCCESS, rp, rp_len);
9448 static u8 calculate_name_len(struct hci_dev *hdev)
9450 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
9452 return append_local_name(hdev, buf, 0);
9455 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
9458 u8 max_len = HCI_MAX_AD_LENGTH;
9461 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
9462 MGMT_ADV_FLAG_LIMITED_DISCOV |
9463 MGMT_ADV_FLAG_MANAGED_FLAGS))
9466 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
9469 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
9470 max_len -= calculate_name_len(hdev);
9472 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
9479 static bool flags_managed(u32 adv_flags)
9481 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
9482 MGMT_ADV_FLAG_LIMITED_DISCOV |
9483 MGMT_ADV_FLAG_MANAGED_FLAGS);
9486 static bool tx_power_managed(u32 adv_flags)
9488 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
9491 static bool name_managed(u32 adv_flags)
9493 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
9496 static bool appearance_managed(u32 adv_flags)
9498 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
9501 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
9502 u8 len, bool is_adv_data)
9507 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
9512 /* Make sure that the data is correctly formatted. */
9513 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
9519 if (data[i + 1] == EIR_FLAGS &&
9520 (!is_adv_data || flags_managed(adv_flags)))
9523 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
9526 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
9529 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
9532 if (data[i + 1] == EIR_APPEARANCE &&
9533 appearance_managed(adv_flags))
9536 /* If the current field length would exceed the total data
9537 * length, then it's invalid.
9539 if (i + cur_len >= len)
9546 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
9548 u32 supported_flags, phy_flags;
9550 /* The current implementation only supports a subset of the specified
9551 * flags. Also need to check mutual exclusiveness of sec flags.
9553 supported_flags = get_supported_adv_flags(hdev);
9554 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
9555 if (adv_flags & ~supported_flags ||
9556 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
9562 static bool adv_busy(struct hci_dev *hdev)
9564 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
9565 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
9566 pending_find(MGMT_OP_SET_LE, hdev) ||
9567 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
9568 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
9571 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
9574 struct mgmt_pending_cmd *cmd;
9575 struct mgmt_cp_add_advertising *cp;
9576 struct mgmt_rp_add_advertising rp;
9577 struct adv_info *adv_instance, *n;
9580 bt_dev_dbg(hdev, "status %u", status);
9584 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
9586 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
9588 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
9589 if (!adv_instance->pending)
9593 adv_instance->pending = false;
9597 instance = adv_instance->instance;
9599 if (hdev->cur_adv_instance == instance)
9600 cancel_adv_timeout(hdev);
9602 hci_remove_adv_instance(hdev, instance);
9603 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
9610 rp.instance = cp->instance;
9613 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9614 mgmt_status(status));
9616 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9617 mgmt_status(status), &rp, sizeof(rp));
9619 mgmt_pending_remove(cmd);
9622 hci_dev_unlock(hdev);
9625 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
9626 void *data, u16 data_len)
9628 struct mgmt_cp_add_advertising *cp = data;
9629 struct mgmt_rp_add_advertising rp;
9632 u16 timeout, duration;
9633 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
9634 u8 schedule_instance = 0;
9635 struct adv_info *next_instance;
9637 struct mgmt_pending_cmd *cmd;
9638 struct hci_request req;
9640 bt_dev_dbg(hdev, "sock %p", sk);
9642 status = mgmt_le_support(hdev);
9644 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9647 /* Enabling the experimental LL Privay support disables support for
9650 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9651 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9652 MGMT_STATUS_NOT_SUPPORTED);
9654 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9655 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9656 MGMT_STATUS_INVALID_PARAMS);
9658 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
9659 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9660 MGMT_STATUS_INVALID_PARAMS);
9662 flags = __le32_to_cpu(cp->flags);
9663 timeout = __le16_to_cpu(cp->timeout);
9664 duration = __le16_to_cpu(cp->duration);
9666 if (!requested_adv_flags_are_valid(hdev, flags))
9667 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9668 MGMT_STATUS_INVALID_PARAMS);
9672 if (timeout && !hdev_is_powered(hdev)) {
9673 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9674 MGMT_STATUS_REJECTED);
9678 if (adv_busy(hdev)) {
9679 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9684 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
9685 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
9686 cp->scan_rsp_len, false)) {
9687 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9688 MGMT_STATUS_INVALID_PARAMS);
9692 err = hci_add_adv_instance(hdev, cp->instance, flags,
9693 cp->adv_data_len, cp->data,
9695 cp->data + cp->adv_data_len,
9697 HCI_ADV_TX_POWER_NO_PREFERENCE,
9698 hdev->le_adv_min_interval,
9699 hdev->le_adv_max_interval);
9701 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9702 MGMT_STATUS_FAILED);
9706 /* Only trigger an advertising added event if a new instance was
9709 if (hdev->adv_instance_cnt > prev_instance_cnt)
9710 mgmt_advertising_added(sk, hdev, cp->instance);
9712 if (hdev->cur_adv_instance == cp->instance) {
9713 /* If the currently advertised instance is being changed then
9714 * cancel the current advertising and schedule the next
9715 * instance. If there is only one instance then the overridden
9716 * advertising data will be visible right away.
9718 cancel_adv_timeout(hdev);
9720 next_instance = hci_get_next_instance(hdev, cp->instance);
9722 schedule_instance = next_instance->instance;
9723 } else if (!hdev->adv_instance_timeout) {
9724 /* Immediately advertise the new instance if no other
9725 * instance is currently being advertised.
9727 schedule_instance = cp->instance;
9730 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
9731 * there is no instance to be advertised then we have no HCI
9732 * communication to make. Simply return.
9734 if (!hdev_is_powered(hdev) ||
9735 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
9736 !schedule_instance) {
9737 rp.instance = cp->instance;
9738 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9739 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9743 /* We're good to go, update advertising data, parameters, and start
9746 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
9753 hci_req_init(&req, hdev);
9755 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
9758 err = hci_req_run(&req, add_advertising_complete);
9761 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9762 MGMT_STATUS_FAILED);
9763 mgmt_pending_remove(cmd);
9767 hci_dev_unlock(hdev);
9772 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
9775 struct mgmt_pending_cmd *cmd;
9776 struct mgmt_cp_add_ext_adv_params *cp;
9777 struct mgmt_rp_add_ext_adv_params rp;
9778 struct adv_info *adv_instance;
9781 BT_DBG("%s", hdev->name);
9785 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
9790 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9794 rp.instance = cp->instance;
9795 rp.tx_power = adv_instance->tx_power;
9797 /* While we're at it, inform userspace of the available space for this
9798 * advertisement, given the flags that will be used.
9800 flags = __le32_to_cpu(cp->flags);
9801 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9802 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9805 /* If this advertisement was previously advertising and we
9806 * failed to update it, we signal that it has been removed and
9807 * delete its structure
9809 if (!adv_instance->pending)
9810 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
9812 hci_remove_adv_instance(hdev, cp->instance);
9814 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9815 mgmt_status(status));
9818 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9819 mgmt_status(status), &rp, sizeof(rp));
9824 mgmt_pending_remove(cmd);
9826 hci_dev_unlock(hdev);
9829 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
9830 void *data, u16 data_len)
9832 struct mgmt_cp_add_ext_adv_params *cp = data;
9833 struct mgmt_rp_add_ext_adv_params rp;
9834 struct mgmt_pending_cmd *cmd = NULL;
9835 struct adv_info *adv_instance;
9836 struct hci_request req;
9837 u32 flags, min_interval, max_interval;
9838 u16 timeout, duration;
9843 BT_DBG("%s", hdev->name);
9845 status = mgmt_le_support(hdev);
9847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9850 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9851 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9852 MGMT_STATUS_INVALID_PARAMS);
9854 /* The purpose of breaking add_advertising into two separate MGMT calls
9855 * for params and data is to allow more parameters to be added to this
9856 * structure in the future. For this reason, we verify that we have the
9857 * bare minimum structure we know of when the interface was defined. Any
9858 * extra parameters we don't know about will be ignored in this request.
9860 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
9861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9862 MGMT_STATUS_INVALID_PARAMS);
9864 flags = __le32_to_cpu(cp->flags);
9866 if (!requested_adv_flags_are_valid(hdev, flags))
9867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9868 MGMT_STATUS_INVALID_PARAMS);
9872 /* In new interface, we require that we are powered to register */
9873 if (!hdev_is_powered(hdev)) {
9874 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9875 MGMT_STATUS_REJECTED);
9879 if (adv_busy(hdev)) {
9880 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9885 /* Parse defined parameters from request, use defaults otherwise */
9886 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
9887 __le16_to_cpu(cp->timeout) : 0;
9889 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
9890 __le16_to_cpu(cp->duration) :
9891 hdev->def_multi_adv_rotation_duration;
9893 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9894 __le32_to_cpu(cp->min_interval) :
9895 hdev->le_adv_min_interval;
9897 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9898 __le32_to_cpu(cp->max_interval) :
9899 hdev->le_adv_max_interval;
9901 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
9903 HCI_ADV_TX_POWER_NO_PREFERENCE;
9905 /* Create advertising instance with no advertising or response data */
9906 err = hci_add_adv_instance(hdev, cp->instance, flags,
9907 0, NULL, 0, NULL, timeout, duration,
9908 tx_power, min_interval, max_interval);
9911 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9912 MGMT_STATUS_FAILED);
9916 /* Submit request for advertising params if ext adv available */
9917 if (ext_adv_capable(hdev)) {
9918 hci_req_init(&req, hdev);
9919 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9921 /* Updating parameters of an active instance will return a
9922 * Command Disallowed error, so we must first disable the
9923 * instance if it is active.
9925 if (!adv_instance->pending)
9926 __hci_req_disable_ext_adv_instance(&req, cp->instance);
9928 __hci_req_setup_ext_adv_instance(&req, cp->instance);
9930 err = hci_req_run(&req, add_ext_adv_params_complete);
9933 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
9934 hdev, data, data_len);
9937 hci_remove_adv_instance(hdev, cp->instance);
9942 rp.instance = cp->instance;
9943 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9944 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9945 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9946 err = mgmt_cmd_complete(sk, hdev->id,
9947 MGMT_OP_ADD_EXT_ADV_PARAMS,
9948 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9952 hci_dev_unlock(hdev);
9957 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9960 struct mgmt_cp_add_ext_adv_data *cp = data;
9961 struct mgmt_rp_add_ext_adv_data rp;
9962 u8 schedule_instance = 0;
9963 struct adv_info *next_instance;
9964 struct adv_info *adv_instance;
9966 struct mgmt_pending_cmd *cmd;
9967 struct hci_request req;
9969 BT_DBG("%s", hdev->name);
9973 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9975 if (!adv_instance) {
9976 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9977 MGMT_STATUS_INVALID_PARAMS);
9981 /* In new interface, we require that we are powered to register */
9982 if (!hdev_is_powered(hdev)) {
9983 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9984 MGMT_STATUS_REJECTED);
9985 goto clear_new_instance;
9988 if (adv_busy(hdev)) {
9989 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9991 goto clear_new_instance;
9994 /* Validate new data */
9995 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9996 cp->adv_data_len, true) ||
9997 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9998 cp->adv_data_len, cp->scan_rsp_len, false)) {
9999 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10000 MGMT_STATUS_INVALID_PARAMS);
10001 goto clear_new_instance;
10004 /* Set the data in the advertising instance */
10005 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
10006 cp->data, cp->scan_rsp_len,
10007 cp->data + cp->adv_data_len);
10009 /* We're good to go, update advertising data, parameters, and start
10013 hci_req_init(&req, hdev);
10015 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
10017 if (ext_adv_capable(hdev)) {
10018 __hci_req_update_adv_data(&req, cp->instance);
10019 __hci_req_update_scan_rsp_data(&req, cp->instance);
10020 __hci_req_enable_ext_advertising(&req, cp->instance);
10023 /* If using software rotation, determine next instance to use */
10025 if (hdev->cur_adv_instance == cp->instance) {
10026 /* If the currently advertised instance is being changed
10027 * then cancel the current advertising and schedule the
10028 * next instance. If there is only one instance then the
10029 * overridden advertising data will be visible right
10032 cancel_adv_timeout(hdev);
10034 next_instance = hci_get_next_instance(hdev,
10037 schedule_instance = next_instance->instance;
10038 } else if (!hdev->adv_instance_timeout) {
10039 /* Immediately advertise the new instance if no other
10040 * instance is currently being advertised.
10042 schedule_instance = cp->instance;
10045 /* If the HCI_ADVERTISING flag is set or there is no instance to
10046 * be advertised then we have no HCI communication to make.
10049 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10050 !schedule_instance) {
10051 if (adv_instance->pending) {
10052 mgmt_advertising_added(sk, hdev, cp->instance);
10053 adv_instance->pending = false;
10055 rp.instance = cp->instance;
10056 err = mgmt_cmd_complete(sk, hdev->id,
10057 MGMT_OP_ADD_EXT_ADV_DATA,
10058 MGMT_STATUS_SUCCESS, &rp,
10063 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
10067 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
10071 goto clear_new_instance;
10075 err = hci_req_run(&req, add_advertising_complete);
10078 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10079 MGMT_STATUS_FAILED);
10080 mgmt_pending_remove(cmd);
10081 goto clear_new_instance;
10084 /* We were successful in updating data, so trigger advertising_added
10085 * event if this is an instance that wasn't previously advertising. If
10086 * a failure occurs in the requests we initiated, we will remove the
10087 * instance again in add_advertising_complete
10089 if (adv_instance->pending)
10090 mgmt_advertising_added(sk, hdev, cp->instance);
10094 clear_new_instance:
10095 hci_remove_adv_instance(hdev, cp->instance);
10098 hci_dev_unlock(hdev);
10103 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
10106 struct mgmt_pending_cmd *cmd;
10107 struct mgmt_cp_remove_advertising *cp;
10108 struct mgmt_rp_remove_advertising rp;
10110 bt_dev_dbg(hdev, "status %u", status);
10112 hci_dev_lock(hdev);
10114 /* A failure status here only means that we failed to disable
10115 * advertising. Otherwise, the advertising instance has been removed,
10116 * so report success.
10118 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
10123 rp.instance = cp->instance;
10125 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
10127 mgmt_pending_remove(cmd);
10130 hci_dev_unlock(hdev);
10133 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
10134 void *data, u16 data_len)
10136 struct mgmt_cp_remove_advertising *cp = data;
10137 struct mgmt_rp_remove_advertising rp;
10138 struct mgmt_pending_cmd *cmd;
10139 struct hci_request req;
10142 bt_dev_dbg(hdev, "sock %p", sk);
10144 /* Enabling the experimental LL Privay support disables support for
10147 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
10148 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
10149 MGMT_STATUS_NOT_SUPPORTED);
10151 hci_dev_lock(hdev);
10153 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
10154 err = mgmt_cmd_status(sk, hdev->id,
10155 MGMT_OP_REMOVE_ADVERTISING,
10156 MGMT_STATUS_INVALID_PARAMS);
10160 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
10161 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
10162 pending_find(MGMT_OP_SET_LE, hdev)) {
10163 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10168 if (list_empty(&hdev->adv_instances)) {
10169 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10170 MGMT_STATUS_INVALID_PARAMS);
10174 hci_req_init(&req, hdev);
10176 /* If we use extended advertising, instance is disabled and removed */
10177 if (ext_adv_capable(hdev)) {
10178 __hci_req_disable_ext_adv_instance(&req, cp->instance);
10179 __hci_req_remove_ext_adv_instance(&req, cp->instance);
10182 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
10184 if (list_empty(&hdev->adv_instances))
10185 __hci_req_disable_advertising(&req);
10187 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
10188 * flag is set or the device isn't powered then we have no HCI
10189 * communication to make. Simply return.
10191 if (skb_queue_empty(&req.cmd_q) ||
10192 !hdev_is_powered(hdev) ||
10193 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
10194 hci_req_purge(&req);
10195 rp.instance = cp->instance;
10196 err = mgmt_cmd_complete(sk, hdev->id,
10197 MGMT_OP_REMOVE_ADVERTISING,
10198 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10202 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
10209 err = hci_req_run(&req, remove_advertising_complete);
10211 mgmt_pending_remove(cmd);
10214 hci_dev_unlock(hdev);
10219 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
10220 void *data, u16 data_len)
10222 struct mgmt_cp_get_adv_size_info *cp = data;
10223 struct mgmt_rp_get_adv_size_info rp;
10224 u32 flags, supported_flags;
10227 bt_dev_dbg(hdev, "sock %p", sk);
10229 if (!lmp_le_capable(hdev))
10230 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10231 MGMT_STATUS_REJECTED);
10233 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10234 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10235 MGMT_STATUS_INVALID_PARAMS);
10237 flags = __le32_to_cpu(cp->flags);
10239 /* The current implementation only supports a subset of the specified
10242 supported_flags = get_supported_adv_flags(hdev);
10243 if (flags & ~supported_flags)
10244 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10245 MGMT_STATUS_INVALID_PARAMS);
10247 rp.instance = cp->instance;
10248 rp.flags = cp->flags;
10249 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10250 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10252 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10253 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10258 static const struct hci_mgmt_handler mgmt_handlers[] = {
10259 { NULL }, /* 0x0000 (no command) */
10260 { read_version, MGMT_READ_VERSION_SIZE,
10262 HCI_MGMT_UNTRUSTED },
10263 { read_commands, MGMT_READ_COMMANDS_SIZE,
10265 HCI_MGMT_UNTRUSTED },
10266 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
10268 HCI_MGMT_UNTRUSTED },
10269 { read_controller_info, MGMT_READ_INFO_SIZE,
10270 HCI_MGMT_UNTRUSTED },
10271 { set_powered, MGMT_SETTING_SIZE },
10272 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
10273 { set_connectable, MGMT_SETTING_SIZE },
10274 { set_fast_connectable, MGMT_SETTING_SIZE },
10275 { set_bondable, MGMT_SETTING_SIZE },
10276 { set_link_security, MGMT_SETTING_SIZE },
10277 { set_ssp, MGMT_SETTING_SIZE },
10278 { set_hs, MGMT_SETTING_SIZE },
10279 { set_le, MGMT_SETTING_SIZE },
10280 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
10281 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
10282 { add_uuid, MGMT_ADD_UUID_SIZE },
10283 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
10284 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
10285 HCI_MGMT_VAR_LEN },
10286 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
10287 HCI_MGMT_VAR_LEN },
10288 { disconnect, MGMT_DISCONNECT_SIZE },
10289 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
10290 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
10291 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
10292 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
10293 { pair_device, MGMT_PAIR_DEVICE_SIZE },
10294 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
10295 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
10296 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
10297 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
10298 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
10299 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
10300 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
10301 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
10302 HCI_MGMT_VAR_LEN },
10303 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
10304 { start_discovery, MGMT_START_DISCOVERY_SIZE },
10305 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
10306 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
10307 { block_device, MGMT_BLOCK_DEVICE_SIZE },
10308 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
10309 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
10310 { set_advertising, MGMT_SETTING_SIZE },
10311 { set_bredr, MGMT_SETTING_SIZE },
10312 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
10313 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
10314 { set_secure_conn, MGMT_SETTING_SIZE },
10315 { set_debug_keys, MGMT_SETTING_SIZE },
10316 { set_privacy, MGMT_SET_PRIVACY_SIZE },
10317 { load_irks, MGMT_LOAD_IRKS_SIZE,
10318 HCI_MGMT_VAR_LEN },
10319 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
10320 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
10321 { add_device, MGMT_ADD_DEVICE_SIZE },
10322 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
10323 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
10324 HCI_MGMT_VAR_LEN },
10325 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
10327 HCI_MGMT_UNTRUSTED },
10328 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
10329 HCI_MGMT_UNCONFIGURED |
10330 HCI_MGMT_UNTRUSTED },
10331 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
10332 HCI_MGMT_UNCONFIGURED },
10333 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
10334 HCI_MGMT_UNCONFIGURED },
10335 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
10336 HCI_MGMT_VAR_LEN },
10337 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
10338 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
10340 HCI_MGMT_UNTRUSTED },
10341 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
10342 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
10343 HCI_MGMT_VAR_LEN },
10344 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
10345 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
10346 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
10347 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
10348 HCI_MGMT_UNTRUSTED },
10349 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
10350 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
10351 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
10352 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
10353 HCI_MGMT_VAR_LEN },
10354 { set_wideband_speech, MGMT_SETTING_SIZE },
10355 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
10356 HCI_MGMT_UNTRUSTED },
10357 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
10358 HCI_MGMT_UNTRUSTED |
10359 HCI_MGMT_HDEV_OPTIONAL },
10360 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
10362 HCI_MGMT_HDEV_OPTIONAL },
10363 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
10364 HCI_MGMT_UNTRUSTED },
10365 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
10366 HCI_MGMT_VAR_LEN },
10367 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
10368 HCI_MGMT_UNTRUSTED },
10369 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
10370 HCI_MGMT_VAR_LEN },
10371 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
10372 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
10373 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
10374 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
10375 HCI_MGMT_VAR_LEN },
10376 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
10377 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
10378 HCI_MGMT_VAR_LEN },
10379 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
10380 HCI_MGMT_VAR_LEN },
10381 { add_adv_patterns_monitor_rssi,
10382 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
10383 HCI_MGMT_VAR_LEN },
10387 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
10388 { NULL }, /* 0x0000 (no command) */
10389 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
10390 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
10391 HCI_MGMT_VAR_LEN },
10392 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
10393 HCI_MGMT_VAR_LEN },
10394 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
10395 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
10396 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
10397 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
10398 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
10399 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
10400 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
10401 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
10402 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
10403 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
10404 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
10405 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
10406 { set_voice_setting, MGMT_SET_VOICE_SETTING_SIZE },
10407 { get_adv_tx_power, MGMT_GET_ADV_TX_POWER_SIZE },
10411 void mgmt_index_added(struct hci_dev *hdev)
10413 struct mgmt_ev_ext_index ev;
10415 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10418 switch (hdev->dev_type) {
10420 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10421 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
10422 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10425 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
10426 HCI_MGMT_INDEX_EVENTS);
10437 ev.bus = hdev->bus;
10439 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
10440 HCI_MGMT_EXT_INDEX_EVENTS);
10443 void mgmt_index_removed(struct hci_dev *hdev)
10445 struct mgmt_ev_ext_index ev;
10446 u8 status = MGMT_STATUS_INVALID_INDEX;
10448 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10451 switch (hdev->dev_type) {
10453 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10455 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10456 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
10457 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10460 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
10461 HCI_MGMT_INDEX_EVENTS);
10472 ev.bus = hdev->bus;
10474 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
10475 HCI_MGMT_EXT_INDEX_EVENTS);
10478 /* This function requires the caller holds hdev->lock */
10479 static void restart_le_actions(struct hci_dev *hdev)
10481 struct hci_conn_params *p;
10483 list_for_each_entry(p, &hdev->le_conn_params, list) {
10484 /* Needed for AUTO_OFF case where might not "really"
10485 * have been powered off.
10487 list_del_init(&p->action);
10489 switch (p->auto_connect) {
10490 case HCI_AUTO_CONN_DIRECT:
10491 case HCI_AUTO_CONN_ALWAYS:
10492 list_add(&p->action, &hdev->pend_le_conns);
10494 case HCI_AUTO_CONN_REPORT:
10495 list_add(&p->action, &hdev->pend_le_reports);
10503 void mgmt_power_on(struct hci_dev *hdev, int err)
10505 struct cmd_lookup match = { NULL, hdev };
10507 bt_dev_dbg(hdev, "err %d", err);
10509 hci_dev_lock(hdev);
10512 restart_le_actions(hdev);
10513 hci_update_background_scan(hdev);
10516 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10518 new_settings(hdev, match.sk);
10521 sock_put(match.sk);
10523 hci_dev_unlock(hdev);
10526 void __mgmt_power_off(struct hci_dev *hdev)
10528 struct cmd_lookup match = { NULL, hdev };
10529 u8 status, zero_cod[] = { 0, 0, 0 };
10531 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10533 /* If the power off is because of hdev unregistration let
10534 * use the appropriate INVALID_INDEX status. Otherwise use
10535 * NOT_POWERED. We cover both scenarios here since later in
10536 * mgmt_index_removed() any hci_conn callbacks will have already
10537 * been triggered, potentially causing misleading DISCONNECTED
10538 * status responses.
10540 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
10541 status = MGMT_STATUS_INVALID_INDEX;
10543 status = MGMT_STATUS_NOT_POWERED;
10545 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10547 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
10548 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
10549 zero_cod, sizeof(zero_cod),
10550 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10551 ext_info_changed(hdev, NULL);
10554 new_settings(hdev, match.sk);
10557 sock_put(match.sk);
10560 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
10562 struct mgmt_pending_cmd *cmd;
10565 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10569 if (err == -ERFKILL)
10570 status = MGMT_STATUS_RFKILLED;
10572 status = MGMT_STATUS_FAILED;
10574 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
10576 mgmt_pending_remove(cmd);
10579 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
10582 struct mgmt_ev_new_link_key ev;
10584 memset(&ev, 0, sizeof(ev));
10586 ev.store_hint = persistent;
10587 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10588 ev.key.addr.type = BDADDR_BREDR;
10589 ev.key.type = key->type;
10590 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
10591 ev.key.pin_len = key->pin_len;
10593 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
10596 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
10598 switch (ltk->type) {
10600 case SMP_LTK_RESPONDER:
10601 if (ltk->authenticated)
10602 return MGMT_LTK_AUTHENTICATED;
10603 return MGMT_LTK_UNAUTHENTICATED;
10605 if (ltk->authenticated)
10606 return MGMT_LTK_P256_AUTH;
10607 return MGMT_LTK_P256_UNAUTH;
10608 case SMP_LTK_P256_DEBUG:
10609 return MGMT_LTK_P256_DEBUG;
10612 return MGMT_LTK_UNAUTHENTICATED;
10615 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
10617 struct mgmt_ev_new_long_term_key ev;
10619 memset(&ev, 0, sizeof(ev));
10621 /* Devices using resolvable or non-resolvable random addresses
10622 * without providing an identity resolving key don't require
10623 * to store long term keys. Their addresses will change the
10624 * next time around.
10626 * Only when a remote device provides an identity address
10627 * make sure the long term key is stored. If the remote
10628 * identity is known, the long term keys are internally
10629 * mapped to the identity address. So allow static random
10630 * and public addresses here.
10632 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10633 (key->bdaddr.b[5] & 0xc0) != 0xc0)
10634 ev.store_hint = 0x00;
10636 ev.store_hint = persistent;
10638 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10639 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
10640 ev.key.type = mgmt_ltk_type(key);
10641 ev.key.enc_size = key->enc_size;
10642 ev.key.ediv = key->ediv;
10643 ev.key.rand = key->rand;
10645 if (key->type == SMP_LTK)
10646 ev.key.initiator = 1;
10648 /* Make sure we copy only the significant bytes based on the
10649 * encryption key size, and set the rest of the value to zeroes.
10651 memcpy(ev.key.val, key->val, key->enc_size);
10652 memset(ev.key.val + key->enc_size, 0,
10653 sizeof(ev.key.val) - key->enc_size);
10655 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
10658 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
10660 struct mgmt_ev_new_irk ev;
10662 memset(&ev, 0, sizeof(ev));
10664 ev.store_hint = persistent;
10666 bacpy(&ev.rpa, &irk->rpa);
10667 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
10668 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
10669 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
10671 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
10674 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
10677 struct mgmt_ev_new_csrk ev;
10679 memset(&ev, 0, sizeof(ev));
10681 /* Devices using resolvable or non-resolvable random addresses
10682 * without providing an identity resolving key don't require
10683 * to store signature resolving keys. Their addresses will change
10684 * the next time around.
10686 * Only when a remote device provides an identity address
10687 * make sure the signature resolving key is stored. So allow
10688 * static random and public addresses here.
10690 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10691 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
10692 ev.store_hint = 0x00;
10694 ev.store_hint = persistent;
10696 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
10697 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
10698 ev.key.type = csrk->type;
10699 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
10701 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
10704 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
10705 u8 bdaddr_type, u8 store_hint, u16 min_interval,
10706 u16 max_interval, u16 latency, u16 timeout)
10708 struct mgmt_ev_new_conn_param ev;
10710 if (!hci_is_identity_address(bdaddr, bdaddr_type))
10713 memset(&ev, 0, sizeof(ev));
10714 bacpy(&ev.addr.bdaddr, bdaddr);
10715 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
10716 ev.store_hint = store_hint;
10717 ev.min_interval = cpu_to_le16(min_interval);
10718 ev.max_interval = cpu_to_le16(max_interval);
10719 ev.latency = cpu_to_le16(latency);
10720 ev.timeout = cpu_to_le16(timeout);
10722 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
10725 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
10726 u8 *name, u8 name_len)
10729 struct mgmt_ev_device_connected *ev = (void *) buf;
10733 bacpy(&ev->addr.bdaddr, &conn->dst);
10734 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10737 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
10739 ev->flags = __cpu_to_le32(flags);
10741 /* We must ensure that the EIR Data fields are ordered and
10742 * unique. Keep it simple for now and avoid the problem by not
10743 * adding any BR/EDR data to the LE adv.
10745 if (conn->le_adv_data_len > 0) {
10746 memcpy(&ev->eir[eir_len],
10747 conn->le_adv_data, conn->le_adv_data_len);
10748 eir_len = conn->le_adv_data_len;
10751 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
10754 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
10755 eir_len = eir_append_data(ev->eir, eir_len,
10757 conn->dev_class, 3);
10760 ev->eir_len = cpu_to_le16(eir_len);
10762 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
10763 sizeof(*ev) + eir_len, NULL);
10766 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
10768 struct sock **sk = data;
10770 cmd->cmd_complete(cmd, 0);
10775 mgmt_pending_remove(cmd);
10778 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
10780 struct hci_dev *hdev = data;
10781 struct mgmt_cp_unpair_device *cp = cmd->param;
10783 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
10785 cmd->cmd_complete(cmd, 0);
10786 mgmt_pending_remove(cmd);
10789 bool mgmt_powering_down(struct hci_dev *hdev)
10791 struct mgmt_pending_cmd *cmd;
10792 struct mgmt_mode *cp;
10794 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10805 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
10806 u8 link_type, u8 addr_type, u8 reason,
10807 bool mgmt_connected)
10809 struct mgmt_ev_device_disconnected ev;
10810 struct sock *sk = NULL;
10812 /* The connection is still in hci_conn_hash so test for 1
10813 * instead of 0 to know if this is the last one.
10815 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10816 cancel_delayed_work(&hdev->power_off);
10817 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10820 if (!mgmt_connected)
10823 if (link_type != ACL_LINK && link_type != LE_LINK)
10826 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
10828 bacpy(&ev.addr.bdaddr, bdaddr);
10829 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10830 ev.reason = reason;
10832 /* Report disconnects due to suspend */
10833 if (hdev->suspended)
10834 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
10836 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
10841 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10845 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
10846 u8 link_type, u8 addr_type, u8 status)
10848 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
10849 struct mgmt_cp_disconnect *cp;
10850 struct mgmt_pending_cmd *cmd;
10852 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10855 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
10861 if (bacmp(bdaddr, &cp->addr.bdaddr))
10864 if (cp->addr.type != bdaddr_type)
10867 cmd->cmd_complete(cmd, mgmt_status(status));
10868 mgmt_pending_remove(cmd);
10871 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10872 u8 addr_type, u8 status)
10874 struct mgmt_ev_connect_failed ev;
10876 /* The connection is still in hci_conn_hash so test for 1
10877 * instead of 0 to know if this is the last one.
10879 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10880 cancel_delayed_work(&hdev->power_off);
10881 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10884 bacpy(&ev.addr.bdaddr, bdaddr);
10885 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10886 ev.status = mgmt_status(status);
10888 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
10891 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
10893 struct mgmt_ev_pin_code_request ev;
10895 bacpy(&ev.addr.bdaddr, bdaddr);
10896 ev.addr.type = BDADDR_BREDR;
10897 ev.secure = secure;
10899 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
10902 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10905 struct mgmt_pending_cmd *cmd;
10907 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
10911 cmd->cmd_complete(cmd, mgmt_status(status));
10912 mgmt_pending_remove(cmd);
10915 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10918 struct mgmt_pending_cmd *cmd;
10920 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
10924 cmd->cmd_complete(cmd, mgmt_status(status));
10925 mgmt_pending_remove(cmd);
10928 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10929 u8 link_type, u8 addr_type, u32 value,
10932 struct mgmt_ev_user_confirm_request ev;
10934 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10936 bacpy(&ev.addr.bdaddr, bdaddr);
10937 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10938 ev.confirm_hint = confirm_hint;
10939 ev.value = cpu_to_le32(value);
10941 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
10945 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10946 u8 link_type, u8 addr_type)
10948 struct mgmt_ev_user_passkey_request ev;
10950 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10952 bacpy(&ev.addr.bdaddr, bdaddr);
10953 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10955 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
10959 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10960 u8 link_type, u8 addr_type, u8 status,
10963 struct mgmt_pending_cmd *cmd;
10965 cmd = pending_find(opcode, hdev);
10969 cmd->cmd_complete(cmd, mgmt_status(status));
10970 mgmt_pending_remove(cmd);
10975 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10976 u8 link_type, u8 addr_type, u8 status)
10978 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10979 status, MGMT_OP_USER_CONFIRM_REPLY);
10982 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10983 u8 link_type, u8 addr_type, u8 status)
10985 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10987 MGMT_OP_USER_CONFIRM_NEG_REPLY);
10990 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10991 u8 link_type, u8 addr_type, u8 status)
10993 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10994 status, MGMT_OP_USER_PASSKEY_REPLY);
10997 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10998 u8 link_type, u8 addr_type, u8 status)
11000 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11002 MGMT_OP_USER_PASSKEY_NEG_REPLY);
11005 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
11006 u8 link_type, u8 addr_type, u32 passkey,
11009 struct mgmt_ev_passkey_notify ev;
11011 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11013 bacpy(&ev.addr.bdaddr, bdaddr);
11014 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11015 ev.passkey = __cpu_to_le32(passkey);
11016 ev.entered = entered;
11018 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
11021 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
11023 struct mgmt_ev_auth_failed ev;
11024 struct mgmt_pending_cmd *cmd;
11025 u8 status = mgmt_status(hci_status);
11027 bacpy(&ev.addr.bdaddr, &conn->dst);
11028 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11029 ev.status = status;
11031 cmd = find_pairing(conn);
11033 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
11034 cmd ? cmd->sk : NULL);
11037 cmd->cmd_complete(cmd, status);
11038 mgmt_pending_remove(cmd);
11042 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
11044 struct cmd_lookup match = { NULL, hdev };
11048 u8 mgmt_err = mgmt_status(status);
11049 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
11050 cmd_status_rsp, &mgmt_err);
11054 if (test_bit(HCI_AUTH, &hdev->flags))
11055 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
11057 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
11059 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
11063 new_settings(hdev, match.sk);
11066 sock_put(match.sk);
11069 static void clear_eir(struct hci_request *req)
11071 struct hci_dev *hdev = req->hdev;
11072 struct hci_cp_write_eir cp;
11074 if (!lmp_ext_inq_capable(hdev))
11077 memset(hdev->eir, 0, sizeof(hdev->eir));
11079 memset(&cp, 0, sizeof(cp));
11081 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
11084 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
11086 struct cmd_lookup match = { NULL, hdev };
11087 struct hci_request req;
11088 bool changed = false;
11091 u8 mgmt_err = mgmt_status(status);
11093 if (enable && hci_dev_test_and_clear_flag(hdev,
11094 HCI_SSP_ENABLED)) {
11095 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
11096 new_settings(hdev, NULL);
11099 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
11105 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
11107 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
11109 changed = hci_dev_test_and_clear_flag(hdev,
11112 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
11115 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
11118 new_settings(hdev, match.sk);
11121 sock_put(match.sk);
11123 hci_req_init(&req, hdev);
11125 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
11126 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
11127 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
11128 sizeof(enable), &enable);
11129 __hci_req_update_eir(&req);
11134 hci_req_run(&req, NULL);
11137 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
11139 struct cmd_lookup *match = data;
11141 if (match->sk == NULL) {
11142 match->sk = cmd->sk;
11143 sock_hold(match->sk);
11147 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
11150 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
11152 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
11153 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
11154 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
11157 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
11158 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11159 ext_info_changed(hdev, NULL);
11163 sock_put(match.sk);
11166 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
11168 struct mgmt_cp_set_local_name ev;
11169 struct mgmt_pending_cmd *cmd;
11174 memset(&ev, 0, sizeof(ev));
11175 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
11176 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
11178 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
11180 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
11182 /* If this is a HCI command related to powering on the
11183 * HCI dev don't send any mgmt signals.
11185 if (pending_find(MGMT_OP_SET_POWERED, hdev))
11189 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
11190 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
11191 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
11194 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
11198 for (i = 0; i < uuid_count; i++) {
11199 if (!memcmp(uuid, uuids[i], 16))
11206 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
11210 while (parsed < eir_len) {
11211 u8 field_len = eir[0];
11215 if (field_len == 0)
11218 if (eir_len - parsed < field_len + 1)
11222 case EIR_UUID16_ALL:
11223 case EIR_UUID16_SOME:
11224 for (i = 0; i + 3 <= field_len; i += 2) {
11225 memcpy(uuid, bluetooth_base_uuid, 16);
11226 uuid[13] = eir[i + 3];
11227 uuid[12] = eir[i + 2];
11228 if (has_uuid(uuid, uuid_count, uuids))
11232 case EIR_UUID32_ALL:
11233 case EIR_UUID32_SOME:
11234 for (i = 0; i + 5 <= field_len; i += 4) {
11235 memcpy(uuid, bluetooth_base_uuid, 16);
11236 uuid[15] = eir[i + 5];
11237 uuid[14] = eir[i + 4];
11238 uuid[13] = eir[i + 3];
11239 uuid[12] = eir[i + 2];
11240 if (has_uuid(uuid, uuid_count, uuids))
11244 case EIR_UUID128_ALL:
11245 case EIR_UUID128_SOME:
11246 for (i = 0; i + 17 <= field_len; i += 16) {
11247 memcpy(uuid, eir + i + 2, 16);
11248 if (has_uuid(uuid, uuid_count, uuids))
11254 parsed += field_len + 1;
11255 eir += field_len + 1;
11261 static void restart_le_scan(struct hci_dev *hdev)
11263 /* If controller is not scanning we are done. */
11264 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
11267 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
11268 hdev->discovery.scan_start +
11269 hdev->discovery.scan_duration))
11272 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
11273 DISCOV_LE_RESTART_DELAY);
11276 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
11277 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11279 /* If a RSSI threshold has been specified, and
11280 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
11281 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
11282 * is set, let it through for further processing, as we might need to
11283 * restart the scan.
11285 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
11286 * the results are also dropped.
11288 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11289 (rssi == HCI_RSSI_INVALID ||
11290 (rssi < hdev->discovery.rssi &&
11291 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
11294 if (hdev->discovery.uuid_count != 0) {
11295 /* If a list of UUIDs is provided in filter, results with no
11296 * matching UUID should be dropped.
11298 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
11299 hdev->discovery.uuids) &&
11300 !eir_has_uuids(scan_rsp, scan_rsp_len,
11301 hdev->discovery.uuid_count,
11302 hdev->discovery.uuids))
11306 /* If duplicate filtering does not report RSSI changes, then restart
11307 * scanning to ensure updated result with updated RSSI values.
11309 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
11310 restart_le_scan(hdev);
11312 /* Validate RSSI value against the RSSI threshold once more. */
11313 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11314 rssi < hdev->discovery.rssi)
11321 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11322 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
11323 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11326 struct mgmt_ev_device_found *ev = (void *)buf;
11329 /* Don't send events for a non-kernel initiated discovery. With
11330 * LE one exception is if we have pend_le_reports > 0 in which
11331 * case we're doing passive scanning and want these events.
11333 if (!hci_discovery_active(hdev)) {
11334 if (link_type == ACL_LINK)
11336 if (link_type == LE_LINK &&
11337 list_empty(&hdev->pend_le_reports) &&
11338 !hci_is_adv_monitoring(hdev)) {
11343 if (hdev->discovery.result_filtering) {
11344 /* We are using service discovery */
11345 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
11350 if (hdev->discovery.limited) {
11351 /* Check for limited discoverable bit */
11353 if (!(dev_class[1] & 0x20))
11356 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
11357 if (!flags || !(flags[0] & LE_AD_LIMITED))
11362 /* Make sure that the buffer is big enough. The 5 extra bytes
11363 * are for the potential CoD field.
11365 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
11368 memset(buf, 0, sizeof(buf));
11370 /* In case of device discovery with BR/EDR devices (pre 1.2), the
11371 * RSSI value was reported as 0 when not available. This behavior
11372 * is kept when using device discovery. This is required for full
11373 * backwards compatibility with the API.
11375 * However when using service discovery, the value 127 will be
11376 * returned when the RSSI is not available.
11378 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
11379 link_type == ACL_LINK)
11382 bacpy(&ev->addr.bdaddr, bdaddr);
11383 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11385 ev->flags = cpu_to_le32(flags);
11388 /* Copy EIR or advertising data into event */
11389 memcpy(ev->eir, eir, eir_len);
11391 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11393 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11396 if (scan_rsp_len > 0)
11397 /* Append scan response data to event */
11398 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
11400 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
11401 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
11403 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
11406 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11407 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
11409 struct mgmt_ev_device_found *ev;
11410 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
11413 ev = (struct mgmt_ev_device_found *) buf;
11415 memset(buf, 0, sizeof(buf));
11417 bacpy(&ev->addr.bdaddr, bdaddr);
11418 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11421 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
11424 ev->eir_len = cpu_to_le16(eir_len);
11426 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
11429 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
11431 struct mgmt_ev_discovering ev;
11433 bt_dev_dbg(hdev, "discovering %u", discovering);
11435 memset(&ev, 0, sizeof(ev));
11436 ev.type = hdev->discovery.type;
11437 ev.discovering = discovering;
11439 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
11442 void mgmt_suspending(struct hci_dev *hdev, u8 state)
11444 struct mgmt_ev_controller_suspend ev;
11446 ev.suspend_state = state;
11447 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
11450 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
11453 struct mgmt_ev_controller_resume ev;
11455 ev.wake_reason = reason;
11457 bacpy(&ev.addr.bdaddr, bdaddr);
11458 ev.addr.type = addr_type;
11460 memset(&ev.addr, 0, sizeof(ev.addr));
11463 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
11466 static struct hci_mgmt_chan chan = {
11467 .channel = HCI_CHANNEL_CONTROL,
11468 .handler_count = ARRAY_SIZE(mgmt_handlers),
11469 .handlers = mgmt_handlers,
11471 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
11472 .tizen_handlers = tizen_mgmt_handlers,
11474 .hdev_init = mgmt_init_hdev,
11477 int mgmt_init(void)
11479 return hci_mgmt_chan_register(&chan);
11482 void mgmt_exit(void)
11484 hci_mgmt_chan_unregister(&chan);