2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
40 #include "hci_request.h"
42 #include "mgmt_util.h"
43 #include "mgmt_config.h"
46 #define MGMT_VERSION 1
47 #define MGMT_REVISION 21
49 static const u16 mgmt_commands[] = {
50 MGMT_OP_READ_INDEX_LIST,
53 MGMT_OP_SET_DISCOVERABLE,
54 MGMT_OP_SET_CONNECTABLE,
55 MGMT_OP_SET_FAST_CONNECTABLE,
57 MGMT_OP_SET_LINK_SECURITY,
61 MGMT_OP_SET_DEV_CLASS,
62 MGMT_OP_SET_LOCAL_NAME,
65 MGMT_OP_LOAD_LINK_KEYS,
66 MGMT_OP_LOAD_LONG_TERM_KEYS,
68 MGMT_OP_GET_CONNECTIONS,
69 MGMT_OP_PIN_CODE_REPLY,
70 MGMT_OP_PIN_CODE_NEG_REPLY,
71 MGMT_OP_SET_IO_CAPABILITY,
73 MGMT_OP_CANCEL_PAIR_DEVICE,
74 MGMT_OP_UNPAIR_DEVICE,
75 MGMT_OP_USER_CONFIRM_REPLY,
76 MGMT_OP_USER_CONFIRM_NEG_REPLY,
77 MGMT_OP_USER_PASSKEY_REPLY,
78 MGMT_OP_USER_PASSKEY_NEG_REPLY,
79 MGMT_OP_READ_LOCAL_OOB_DATA,
80 MGMT_OP_ADD_REMOTE_OOB_DATA,
81 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
82 MGMT_OP_START_DISCOVERY,
83 MGMT_OP_STOP_DISCOVERY,
86 MGMT_OP_UNBLOCK_DEVICE,
87 MGMT_OP_SET_DEVICE_ID,
88 MGMT_OP_SET_ADVERTISING,
90 MGMT_OP_SET_STATIC_ADDRESS,
91 MGMT_OP_SET_SCAN_PARAMS,
92 MGMT_OP_SET_SECURE_CONN,
93 MGMT_OP_SET_DEBUG_KEYS,
96 MGMT_OP_GET_CONN_INFO,
97 MGMT_OP_GET_CLOCK_INFO,
99 MGMT_OP_REMOVE_DEVICE,
100 MGMT_OP_LOAD_CONN_PARAM,
101 MGMT_OP_READ_UNCONF_INDEX_LIST,
102 MGMT_OP_READ_CONFIG_INFO,
103 MGMT_OP_SET_EXTERNAL_CONFIG,
104 MGMT_OP_SET_PUBLIC_ADDRESS,
105 MGMT_OP_START_SERVICE_DISCOVERY,
106 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
107 MGMT_OP_READ_EXT_INDEX_LIST,
108 MGMT_OP_READ_ADV_FEATURES,
109 MGMT_OP_ADD_ADVERTISING,
110 MGMT_OP_REMOVE_ADVERTISING,
111 MGMT_OP_GET_ADV_SIZE_INFO,
112 MGMT_OP_START_LIMITED_DISCOVERY,
113 MGMT_OP_READ_EXT_INFO,
114 MGMT_OP_SET_APPEARANCE,
115 MGMT_OP_GET_PHY_CONFIGURATION,
116 MGMT_OP_SET_PHY_CONFIGURATION,
117 MGMT_OP_SET_BLOCKED_KEYS,
118 MGMT_OP_SET_WIDEBAND_SPEECH,
119 MGMT_OP_READ_CONTROLLER_CAP,
120 MGMT_OP_READ_EXP_FEATURES_INFO,
121 MGMT_OP_SET_EXP_FEATURE,
122 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
123 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
124 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
125 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
126 MGMT_OP_GET_DEVICE_FLAGS,
127 MGMT_OP_SET_DEVICE_FLAGS,
128 MGMT_OP_READ_ADV_MONITOR_FEATURES,
129 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
130 MGMT_OP_REMOVE_ADV_MONITOR,
131 MGMT_OP_ADD_EXT_ADV_PARAMS,
132 MGMT_OP_ADD_EXT_ADV_DATA,
133 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
136 static const u16 mgmt_events[] = {
137 MGMT_EV_CONTROLLER_ERROR,
139 MGMT_EV_INDEX_REMOVED,
140 MGMT_EV_NEW_SETTINGS,
141 MGMT_EV_CLASS_OF_DEV_CHANGED,
142 MGMT_EV_LOCAL_NAME_CHANGED,
143 MGMT_EV_NEW_LINK_KEY,
144 MGMT_EV_NEW_LONG_TERM_KEY,
145 MGMT_EV_DEVICE_CONNECTED,
146 MGMT_EV_DEVICE_DISCONNECTED,
147 MGMT_EV_CONNECT_FAILED,
148 MGMT_EV_PIN_CODE_REQUEST,
149 MGMT_EV_USER_CONFIRM_REQUEST,
150 MGMT_EV_USER_PASSKEY_REQUEST,
152 MGMT_EV_DEVICE_FOUND,
154 MGMT_EV_DEVICE_BLOCKED,
155 MGMT_EV_DEVICE_UNBLOCKED,
156 MGMT_EV_DEVICE_UNPAIRED,
157 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_DEVICE_ADDED,
161 MGMT_EV_DEVICE_REMOVED,
162 MGMT_EV_NEW_CONN_PARAM,
163 MGMT_EV_UNCONF_INDEX_ADDED,
164 MGMT_EV_UNCONF_INDEX_REMOVED,
165 MGMT_EV_NEW_CONFIG_OPTIONS,
166 MGMT_EV_EXT_INDEX_ADDED,
167 MGMT_EV_EXT_INDEX_REMOVED,
168 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
169 MGMT_EV_ADVERTISING_ADDED,
170 MGMT_EV_ADVERTISING_REMOVED,
171 MGMT_EV_EXT_INFO_CHANGED,
172 MGMT_EV_PHY_CONFIGURATION_CHANGED,
173 MGMT_EV_EXP_FEATURE_CHANGED,
174 MGMT_EV_DEVICE_FLAGS_CHANGED,
175 MGMT_EV_ADV_MONITOR_ADDED,
176 MGMT_EV_ADV_MONITOR_REMOVED,
177 MGMT_EV_CONTROLLER_SUSPEND,
178 MGMT_EV_CONTROLLER_RESUME,
181 static const u16 mgmt_untrusted_commands[] = {
182 MGMT_OP_READ_INDEX_LIST,
184 MGMT_OP_READ_UNCONF_INDEX_LIST,
185 MGMT_OP_READ_CONFIG_INFO,
186 MGMT_OP_READ_EXT_INDEX_LIST,
187 MGMT_OP_READ_EXT_INFO,
188 MGMT_OP_READ_CONTROLLER_CAP,
189 MGMT_OP_READ_EXP_FEATURES_INFO,
190 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
194 static const u16 mgmt_untrusted_events[] = {
196 MGMT_EV_INDEX_REMOVED,
197 MGMT_EV_NEW_SETTINGS,
198 MGMT_EV_CLASS_OF_DEV_CHANGED,
199 MGMT_EV_LOCAL_NAME_CHANGED,
200 MGMT_EV_UNCONF_INDEX_ADDED,
201 MGMT_EV_UNCONF_INDEX_REMOVED,
202 MGMT_EV_NEW_CONFIG_OPTIONS,
203 MGMT_EV_EXT_INDEX_ADDED,
204 MGMT_EV_EXT_INDEX_REMOVED,
205 MGMT_EV_EXT_INFO_CHANGED,
206 MGMT_EV_EXP_FEATURE_CHANGED,
209 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 "\x00\x00\x00\x00\x00\x00\x00\x00"
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
217 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
218 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
219 MGMT_STATUS_FAILED, /* Hardware Failure */
220 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
221 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
222 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
223 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
224 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
225 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
226 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
227 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
228 MGMT_STATUS_BUSY, /* Command Disallowed */
229 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
230 MGMT_STATUS_REJECTED, /* Rejected Security */
231 MGMT_STATUS_REJECTED, /* Rejected Personal */
232 MGMT_STATUS_TIMEOUT, /* Host Timeout */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
234 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
235 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
236 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
237 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
238 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
239 MGMT_STATUS_BUSY, /* Repeated Attempts */
240 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
241 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
242 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
243 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
244 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
245 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
246 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
247 MGMT_STATUS_FAILED, /* Unspecified Error */
248 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
249 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
250 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
251 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
252 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
253 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
254 MGMT_STATUS_FAILED, /* Unit Link Key Used */
255 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
256 MGMT_STATUS_TIMEOUT, /* Instant Passed */
257 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
258 MGMT_STATUS_FAILED, /* Transaction Collision */
259 MGMT_STATUS_FAILED, /* Reserved for future use */
260 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
261 MGMT_STATUS_REJECTED, /* QoS Rejected */
262 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
263 MGMT_STATUS_REJECTED, /* Insufficient Security */
264 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
265 MGMT_STATUS_FAILED, /* Reserved for future use */
266 MGMT_STATUS_BUSY, /* Role Switch Pending */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_FAILED, /* Slot Violation */
269 MGMT_STATUS_FAILED, /* Role Switch Failed */
270 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
271 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
272 MGMT_STATUS_BUSY, /* Host Busy Pairing */
273 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
274 MGMT_STATUS_BUSY, /* Controller Busy */
275 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
276 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
277 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
278 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
279 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
282 static u8 mgmt_status(u8 hci_status)
284 if (hci_status < ARRAY_SIZE(mgmt_status_table))
285 return mgmt_status_table[hci_status];
287 return MGMT_STATUS_FAILED;
290 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
293 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
297 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
298 u16 len, int flag, struct sock *skip_sk)
300 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
304 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
305 struct sock *skip_sk)
307 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
308 HCI_SOCK_TRUSTED, skip_sk);
311 static u8 le_addr_type(u8 mgmt_addr_type)
313 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
314 return ADDR_LE_DEV_PUBLIC;
316 return ADDR_LE_DEV_RANDOM;
319 void mgmt_fill_version_info(void *ver)
321 struct mgmt_rp_read_version *rp = ver;
323 rp->version = MGMT_VERSION;
324 rp->revision = cpu_to_le16(MGMT_REVISION);
327 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
330 struct mgmt_rp_read_version rp;
332 bt_dev_dbg(hdev, "sock %p", sk);
334 mgmt_fill_version_info(&rp);
336 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
340 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
343 struct mgmt_rp_read_commands *rp;
344 u16 num_commands, num_events;
348 bt_dev_dbg(hdev, "sock %p", sk);
350 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
351 num_commands = ARRAY_SIZE(mgmt_commands);
352 num_events = ARRAY_SIZE(mgmt_events);
354 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
355 num_events = ARRAY_SIZE(mgmt_untrusted_events);
358 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
360 rp = kmalloc(rp_size, GFP_KERNEL);
364 rp->num_commands = cpu_to_le16(num_commands);
365 rp->num_events = cpu_to_le16(num_events);
367 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
368 __le16 *opcode = rp->opcodes;
370 for (i = 0; i < num_commands; i++, opcode++)
371 put_unaligned_le16(mgmt_commands[i], opcode);
373 for (i = 0; i < num_events; i++, opcode++)
374 put_unaligned_le16(mgmt_events[i], opcode);
376 __le16 *opcode = rp->opcodes;
378 for (i = 0; i < num_commands; i++, opcode++)
379 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
381 for (i = 0; i < num_events; i++, opcode++)
382 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
385 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
392 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
395 struct mgmt_rp_read_index_list *rp;
401 bt_dev_dbg(hdev, "sock %p", sk);
403 read_lock(&hci_dev_list_lock);
406 list_for_each_entry(d, &hci_dev_list, list) {
407 if (d->dev_type == HCI_PRIMARY &&
408 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
412 rp_len = sizeof(*rp) + (2 * count);
413 rp = kmalloc(rp_len, GFP_ATOMIC);
415 read_unlock(&hci_dev_list_lock);
420 list_for_each_entry(d, &hci_dev_list, list) {
421 if (hci_dev_test_flag(d, HCI_SETUP) ||
422 hci_dev_test_flag(d, HCI_CONFIG) ||
423 hci_dev_test_flag(d, HCI_USER_CHANNEL))
426 /* Devices marked as raw-only are neither configured
427 * nor unconfigured controllers.
429 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
432 if (d->dev_type == HCI_PRIMARY &&
433 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
434 rp->index[count++] = cpu_to_le16(d->id);
435 bt_dev_dbg(hdev, "Added hci%u", d->id);
439 rp->num_controllers = cpu_to_le16(count);
440 rp_len = sizeof(*rp) + (2 * count);
442 read_unlock(&hci_dev_list_lock);
444 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
452 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
453 void *data, u16 data_len)
455 struct mgmt_rp_read_unconf_index_list *rp;
461 bt_dev_dbg(hdev, "sock %p", sk);
463 read_lock(&hci_dev_list_lock);
466 list_for_each_entry(d, &hci_dev_list, list) {
467 if (d->dev_type == HCI_PRIMARY &&
468 hci_dev_test_flag(d, HCI_UNCONFIGURED))
472 rp_len = sizeof(*rp) + (2 * count);
473 rp = kmalloc(rp_len, GFP_ATOMIC);
475 read_unlock(&hci_dev_list_lock);
480 list_for_each_entry(d, &hci_dev_list, list) {
481 if (hci_dev_test_flag(d, HCI_SETUP) ||
482 hci_dev_test_flag(d, HCI_CONFIG) ||
483 hci_dev_test_flag(d, HCI_USER_CHANNEL))
486 /* Devices marked as raw-only are neither configured
487 * nor unconfigured controllers.
489 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
492 if (d->dev_type == HCI_PRIMARY &&
493 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
494 rp->index[count++] = cpu_to_le16(d->id);
495 bt_dev_dbg(hdev, "Added hci%u", d->id);
499 rp->num_controllers = cpu_to_le16(count);
500 rp_len = sizeof(*rp) + (2 * count);
502 read_unlock(&hci_dev_list_lock);
504 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
505 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
512 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
513 void *data, u16 data_len)
515 struct mgmt_rp_read_ext_index_list *rp;
520 bt_dev_dbg(hdev, "sock %p", sk);
522 read_lock(&hci_dev_list_lock);
525 list_for_each_entry(d, &hci_dev_list, list) {
526 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
530 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
532 read_unlock(&hci_dev_list_lock);
537 list_for_each_entry(d, &hci_dev_list, list) {
538 if (hci_dev_test_flag(d, HCI_SETUP) ||
539 hci_dev_test_flag(d, HCI_CONFIG) ||
540 hci_dev_test_flag(d, HCI_USER_CHANNEL))
543 /* Devices marked as raw-only are neither configured
544 * nor unconfigured controllers.
546 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
549 if (d->dev_type == HCI_PRIMARY) {
550 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
551 rp->entry[count].type = 0x01;
553 rp->entry[count].type = 0x00;
554 } else if (d->dev_type == HCI_AMP) {
555 rp->entry[count].type = 0x02;
560 rp->entry[count].bus = d->bus;
561 rp->entry[count++].index = cpu_to_le16(d->id);
562 bt_dev_dbg(hdev, "Added hci%u", d->id);
565 rp->num_controllers = cpu_to_le16(count);
567 read_unlock(&hci_dev_list_lock);
569 /* If this command is called at least once, then all the
570 * default index and unconfigured index events are disabled
571 * and from now on only extended index events are used.
573 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
574 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
575 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
577 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
578 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
579 struct_size(rp, entry, count));
586 static bool is_configured(struct hci_dev *hdev)
588 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
589 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
592 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
593 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
594 !bacmp(&hdev->public_addr, BDADDR_ANY))
600 static __le32 get_missing_options(struct hci_dev *hdev)
604 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
605 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
606 options |= MGMT_OPTION_EXTERNAL_CONFIG;
608 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
609 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
610 !bacmp(&hdev->public_addr, BDADDR_ANY))
611 options |= MGMT_OPTION_PUBLIC_ADDRESS;
613 return cpu_to_le32(options);
616 static int new_options(struct hci_dev *hdev, struct sock *skip)
618 __le32 options = get_missing_options(hdev);
620 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
621 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
624 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
626 __le32 options = get_missing_options(hdev);
628 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
632 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
633 void *data, u16 data_len)
635 struct mgmt_rp_read_config_info rp;
638 bt_dev_dbg(hdev, "sock %p", sk);
642 memset(&rp, 0, sizeof(rp));
643 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
645 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
646 options |= MGMT_OPTION_EXTERNAL_CONFIG;
648 if (hdev->set_bdaddr)
649 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651 rp.supported_options = cpu_to_le32(options);
652 rp.missing_options = get_missing_options(hdev);
654 hci_dev_unlock(hdev);
656 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
660 static u32 get_supported_phys(struct hci_dev *hdev)
662 u32 supported_phys = 0;
664 if (lmp_bredr_capable(hdev)) {
665 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
667 if (hdev->features[0][0] & LMP_3SLOT)
668 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
670 if (hdev->features[0][0] & LMP_5SLOT)
671 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
673 if (lmp_edr_2m_capable(hdev)) {
674 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
676 if (lmp_edr_3slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
679 if (lmp_edr_5slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
682 if (lmp_edr_3m_capable(hdev)) {
683 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
685 if (lmp_edr_3slot_capable(hdev))
686 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
688 if (lmp_edr_5slot_capable(hdev))
689 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
694 if (lmp_le_capable(hdev)) {
695 supported_phys |= MGMT_PHY_LE_1M_TX;
696 supported_phys |= MGMT_PHY_LE_1M_RX;
698 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
699 supported_phys |= MGMT_PHY_LE_2M_TX;
700 supported_phys |= MGMT_PHY_LE_2M_RX;
703 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
704 supported_phys |= MGMT_PHY_LE_CODED_TX;
705 supported_phys |= MGMT_PHY_LE_CODED_RX;
709 return supported_phys;
712 static u32 get_selected_phys(struct hci_dev *hdev)
714 u32 selected_phys = 0;
716 if (lmp_bredr_capable(hdev)) {
717 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
719 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
720 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
722 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
723 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
725 if (lmp_edr_2m_capable(hdev)) {
726 if (!(hdev->pkt_type & HCI_2DH1))
727 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
729 if (lmp_edr_3slot_capable(hdev) &&
730 !(hdev->pkt_type & HCI_2DH3))
731 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
733 if (lmp_edr_5slot_capable(hdev) &&
734 !(hdev->pkt_type & HCI_2DH5))
735 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
737 if (lmp_edr_3m_capable(hdev)) {
738 if (!(hdev->pkt_type & HCI_3DH1))
739 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
741 if (lmp_edr_3slot_capable(hdev) &&
742 !(hdev->pkt_type & HCI_3DH3))
743 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
745 if (lmp_edr_5slot_capable(hdev) &&
746 !(hdev->pkt_type & HCI_3DH5))
747 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
752 if (lmp_le_capable(hdev)) {
753 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
754 selected_phys |= MGMT_PHY_LE_1M_TX;
756 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
757 selected_phys |= MGMT_PHY_LE_1M_RX;
759 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
760 selected_phys |= MGMT_PHY_LE_2M_TX;
762 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
763 selected_phys |= MGMT_PHY_LE_2M_RX;
765 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
766 selected_phys |= MGMT_PHY_LE_CODED_TX;
768 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
769 selected_phys |= MGMT_PHY_LE_CODED_RX;
772 return selected_phys;
775 static u32 get_configurable_phys(struct hci_dev *hdev)
777 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
778 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
781 static u32 get_supported_settings(struct hci_dev *hdev)
785 settings |= MGMT_SETTING_POWERED;
786 settings |= MGMT_SETTING_BONDABLE;
787 settings |= MGMT_SETTING_DEBUG_KEYS;
788 settings |= MGMT_SETTING_CONNECTABLE;
789 settings |= MGMT_SETTING_DISCOVERABLE;
791 if (lmp_bredr_capable(hdev)) {
792 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
793 settings |= MGMT_SETTING_FAST_CONNECTABLE;
794 settings |= MGMT_SETTING_BREDR;
795 settings |= MGMT_SETTING_LINK_SECURITY;
797 if (lmp_ssp_capable(hdev)) {
798 settings |= MGMT_SETTING_SSP;
799 if (IS_ENABLED(CONFIG_BT_HS))
800 settings |= MGMT_SETTING_HS;
803 if (lmp_sc_capable(hdev))
804 settings |= MGMT_SETTING_SECURE_CONN;
806 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
808 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
811 if (lmp_le_capable(hdev)) {
812 settings |= MGMT_SETTING_LE;
813 settings |= MGMT_SETTING_SECURE_CONN;
814 settings |= MGMT_SETTING_PRIVACY;
815 settings |= MGMT_SETTING_STATIC_ADDRESS;
817 /* When the experimental feature for LL Privacy support is
818 * enabled, then advertising is no longer supported.
820 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
821 settings |= MGMT_SETTING_ADVERTISING;
824 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
826 settings |= MGMT_SETTING_CONFIGURATION;
828 settings |= MGMT_SETTING_PHY_CONFIGURATION;
833 static u32 get_current_settings(struct hci_dev *hdev)
837 if (hdev_is_powered(hdev))
838 settings |= MGMT_SETTING_POWERED;
840 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
841 settings |= MGMT_SETTING_CONNECTABLE;
843 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
844 settings |= MGMT_SETTING_FAST_CONNECTABLE;
846 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
847 settings |= MGMT_SETTING_DISCOVERABLE;
849 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
850 settings |= MGMT_SETTING_BONDABLE;
852 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
853 settings |= MGMT_SETTING_BREDR;
855 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
856 settings |= MGMT_SETTING_LE;
858 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
859 settings |= MGMT_SETTING_LINK_SECURITY;
861 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
862 settings |= MGMT_SETTING_SSP;
864 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
865 settings |= MGMT_SETTING_HS;
867 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
868 settings |= MGMT_SETTING_ADVERTISING;
870 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
871 settings |= MGMT_SETTING_SECURE_CONN;
873 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
874 settings |= MGMT_SETTING_DEBUG_KEYS;
876 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
877 settings |= MGMT_SETTING_PRIVACY;
879 /* The current setting for static address has two purposes. The
880 * first is to indicate if the static address will be used and
881 * the second is to indicate if it is actually set.
883 * This means if the static address is not configured, this flag
884 * will never be set. If the address is configured, then if the
885 * address is actually used decides if the flag is set or not.
887 * For single mode LE only controllers and dual-mode controllers
888 * with BR/EDR disabled, the existence of the static address will
891 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
892 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
893 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
894 if (bacmp(&hdev->static_addr, BDADDR_ANY))
895 settings |= MGMT_SETTING_STATIC_ADDRESS;
898 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
899 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
904 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
906 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
909 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
910 struct hci_dev *hdev,
913 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
916 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
918 struct mgmt_pending_cmd *cmd;
920 /* If there's a pending mgmt command the flags will not yet have
921 * their final values, so check for this first.
923 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
925 struct mgmt_mode *cp = cmd->param;
927 return LE_AD_GENERAL;
928 else if (cp->val == 0x02)
929 return LE_AD_LIMITED;
931 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
932 return LE_AD_LIMITED;
933 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
934 return LE_AD_GENERAL;
940 bool mgmt_get_connectable(struct hci_dev *hdev)
942 struct mgmt_pending_cmd *cmd;
944 /* If there's a pending mgmt command the flag will not yet have
945 * it's final value, so check for this first.
947 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
949 struct mgmt_mode *cp = cmd->param;
954 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
957 static void service_cache_off(struct work_struct *work)
959 struct hci_dev *hdev = container_of(work, struct hci_dev,
961 struct hci_request req;
963 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
966 hci_req_init(&req, hdev);
970 __hci_req_update_eir(&req);
971 __hci_req_update_class(&req);
973 hci_dev_unlock(hdev);
975 hci_req_run(&req, NULL);
978 static void rpa_expired(struct work_struct *work)
980 struct hci_dev *hdev = container_of(work, struct hci_dev,
982 struct hci_request req;
984 bt_dev_dbg(hdev, "");
986 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
988 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
991 /* The generation of a new RPA and programming it into the
992 * controller happens in the hci_req_enable_advertising()
995 hci_req_init(&req, hdev);
996 if (ext_adv_capable(hdev))
997 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
999 __hci_req_enable_advertising(&req);
1000 hci_req_run(&req, NULL);
1003 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1005 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1008 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1009 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1011 /* Non-mgmt controlled devices get this bit set
1012 * implicitly so that pairing works for them, however
1013 * for mgmt we require user-space to explicitly enable
1016 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1019 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1020 void *data, u16 data_len)
1022 struct mgmt_rp_read_info rp;
1024 bt_dev_dbg(hdev, "sock %p", sk);
1028 memset(&rp, 0, sizeof(rp));
1030 bacpy(&rp.bdaddr, &hdev->bdaddr);
1032 rp.version = hdev->hci_ver;
1033 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1035 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1036 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1038 memcpy(rp.dev_class, hdev->dev_class, 3);
1040 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1041 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1043 hci_dev_unlock(hdev);
1045 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1049 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1054 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1055 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1056 hdev->dev_class, 3);
1058 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1059 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1062 name_len = strlen(hdev->dev_name);
1063 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1064 hdev->dev_name, name_len);
1066 name_len = strlen(hdev->short_name);
1067 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1068 hdev->short_name, name_len);
1073 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1074 void *data, u16 data_len)
1077 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1080 bt_dev_dbg(hdev, "sock %p", sk);
1082 memset(&buf, 0, sizeof(buf));
1086 bacpy(&rp->bdaddr, &hdev->bdaddr);
1088 rp->version = hdev->hci_ver;
1089 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1091 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1092 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1095 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1096 rp->eir_len = cpu_to_le16(eir_len);
1098 hci_dev_unlock(hdev);
1100 /* If this command is called at least once, then the events
1101 * for class of device and local name changes are disabled
1102 * and only the new extended controller information event
1105 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1106 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1107 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1109 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1110 sizeof(*rp) + eir_len);
1113 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1116 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1119 memset(buf, 0, sizeof(buf));
1121 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1122 ev->eir_len = cpu_to_le16(eir_len);
1124 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1125 sizeof(*ev) + eir_len,
1126 HCI_MGMT_EXT_INFO_EVENTS, skip);
1129 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1131 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1133 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1137 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1139 bt_dev_dbg(hdev, "status 0x%02x", status);
1141 if (hci_conn_count(hdev) == 0) {
1142 cancel_delayed_work(&hdev->power_off);
1143 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1147 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1149 struct mgmt_ev_advertising_added ev;
1151 ev.instance = instance;
1153 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1156 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1159 struct mgmt_ev_advertising_removed ev;
1161 ev.instance = instance;
1163 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1166 static void cancel_adv_timeout(struct hci_dev *hdev)
1168 if (hdev->adv_instance_timeout) {
1169 hdev->adv_instance_timeout = 0;
1170 cancel_delayed_work(&hdev->adv_instance_expire);
1174 static int clean_up_hci_state(struct hci_dev *hdev)
1176 struct hci_request req;
1177 struct hci_conn *conn;
1178 bool discov_stopped;
1181 hci_req_init(&req, hdev);
1183 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1184 test_bit(HCI_PSCAN, &hdev->flags)) {
1186 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1189 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1191 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1192 __hci_req_disable_advertising(&req);
1194 discov_stopped = hci_req_stop_discovery(&req);
1196 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1197 /* 0x15 == Terminated due to Power Off */
1198 __hci_abort_conn(&req, conn, 0x15);
1201 err = hci_req_run(&req, clean_up_hci_complete);
1202 if (!err && discov_stopped)
1203 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1208 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1211 struct mgmt_mode *cp = data;
1212 struct mgmt_pending_cmd *cmd;
1215 bt_dev_dbg(hdev, "sock %p", sk);
1217 if (cp->val != 0x00 && cp->val != 0x01)
1218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1219 MGMT_STATUS_INVALID_PARAMS);
1223 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1224 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1229 if (!!cp->val == hdev_is_powered(hdev)) {
1230 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1234 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1241 queue_work(hdev->req_workqueue, &hdev->power_on);
1244 /* Disconnect connections, stop scans, etc */
1245 err = clean_up_hci_state(hdev);
1247 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1248 HCI_POWER_OFF_TIMEOUT);
1250 /* ENODATA means there were no HCI commands queued */
1251 if (err == -ENODATA) {
1252 cancel_delayed_work(&hdev->power_off);
1253 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1259 hci_dev_unlock(hdev);
1263 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1265 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1267 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1268 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1271 int mgmt_new_settings(struct hci_dev *hdev)
1273 return new_settings(hdev, NULL);
1278 struct hci_dev *hdev;
1282 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1284 struct cmd_lookup *match = data;
1286 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1288 list_del(&cmd->list);
1290 if (match->sk == NULL) {
1291 match->sk = cmd->sk;
1292 sock_hold(match->sk);
1295 mgmt_pending_free(cmd);
1298 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1302 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1303 mgmt_pending_remove(cmd);
1306 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1308 if (cmd->cmd_complete) {
1311 cmd->cmd_complete(cmd, *status);
1312 mgmt_pending_remove(cmd);
1317 cmd_status_rsp(cmd, data);
1320 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1322 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1323 cmd->param, cmd->param_len);
1326 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1328 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1329 cmd->param, sizeof(struct mgmt_addr_info));
1332 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1334 if (!lmp_bredr_capable(hdev))
1335 return MGMT_STATUS_NOT_SUPPORTED;
1336 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1337 return MGMT_STATUS_REJECTED;
1339 return MGMT_STATUS_SUCCESS;
1342 static u8 mgmt_le_support(struct hci_dev *hdev)
1344 if (!lmp_le_capable(hdev))
1345 return MGMT_STATUS_NOT_SUPPORTED;
1346 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1347 return MGMT_STATUS_REJECTED;
1349 return MGMT_STATUS_SUCCESS;
1352 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1354 struct mgmt_pending_cmd *cmd;
1356 bt_dev_dbg(hdev, "status 0x%02x", status);
1360 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1365 u8 mgmt_err = mgmt_status(status);
1366 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1367 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1371 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1372 hdev->discov_timeout > 0) {
1373 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1374 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1377 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1378 new_settings(hdev, cmd->sk);
1381 mgmt_pending_remove(cmd);
1384 hci_dev_unlock(hdev);
1387 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1390 struct mgmt_cp_set_discoverable *cp = data;
1391 struct mgmt_pending_cmd *cmd;
1395 bt_dev_dbg(hdev, "sock %p", sk);
1397 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1398 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1400 MGMT_STATUS_REJECTED);
1402 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1404 MGMT_STATUS_INVALID_PARAMS);
1406 timeout = __le16_to_cpu(cp->timeout);
1408 /* Disabling discoverable requires that no timeout is set,
1409 * and enabling limited discoverable requires a timeout.
1411 if ((cp->val == 0x00 && timeout > 0) ||
1412 (cp->val == 0x02 && timeout == 0))
1413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1414 MGMT_STATUS_INVALID_PARAMS);
1418 if (!hdev_is_powered(hdev) && timeout > 0) {
1419 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1420 MGMT_STATUS_NOT_POWERED);
1424 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1425 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1431 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1432 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1433 MGMT_STATUS_REJECTED);
1437 if (hdev->advertising_paused) {
1438 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1443 if (!hdev_is_powered(hdev)) {
1444 bool changed = false;
1446 /* Setting limited discoverable when powered off is
1447 * not a valid operation since it requires a timeout
1448 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1450 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1451 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1455 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1460 err = new_settings(hdev, sk);
1465 /* If the current mode is the same, then just update the timeout
1466 * value with the new value. And if only the timeout gets updated,
1467 * then no need for any HCI transactions.
1469 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1470 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1471 HCI_LIMITED_DISCOVERABLE)) {
1472 cancel_delayed_work(&hdev->discov_off);
1473 hdev->discov_timeout = timeout;
1475 if (cp->val && hdev->discov_timeout > 0) {
1476 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1477 queue_delayed_work(hdev->req_workqueue,
1478 &hdev->discov_off, to);
1481 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1485 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1491 /* Cancel any potential discoverable timeout that might be
1492 * still active and store new timeout value. The arming of
1493 * the timeout happens in the complete handler.
1495 cancel_delayed_work(&hdev->discov_off);
1496 hdev->discov_timeout = timeout;
1499 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1501 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1503 /* Limited discoverable mode */
1504 if (cp->val == 0x02)
1505 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1507 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1509 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1513 hci_dev_unlock(hdev);
1517 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1519 struct mgmt_pending_cmd *cmd;
1521 bt_dev_dbg(hdev, "status 0x%02x", status);
1525 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1530 u8 mgmt_err = mgmt_status(status);
1531 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1535 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1536 new_settings(hdev, cmd->sk);
1539 mgmt_pending_remove(cmd);
1542 hci_dev_unlock(hdev);
1545 static int set_connectable_update_settings(struct hci_dev *hdev,
1546 struct sock *sk, u8 val)
1548 bool changed = false;
1551 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1555 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1557 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1558 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1561 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1566 hci_req_update_scan(hdev);
1567 hci_update_background_scan(hdev);
1568 return new_settings(hdev, sk);
1574 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1577 struct mgmt_mode *cp = data;
1578 struct mgmt_pending_cmd *cmd;
1581 bt_dev_dbg(hdev, "sock %p", sk);
1583 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1584 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1585 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1586 MGMT_STATUS_REJECTED);
1588 if (cp->val != 0x00 && cp->val != 0x01)
1589 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1590 MGMT_STATUS_INVALID_PARAMS);
1594 if (!hdev_is_powered(hdev)) {
1595 err = set_connectable_update_settings(hdev, sk, cp->val);
1599 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1600 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1601 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1606 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1613 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1615 if (hdev->discov_timeout > 0)
1616 cancel_delayed_work(&hdev->discov_off);
1618 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1619 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1620 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1623 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1627 hci_dev_unlock(hdev);
1631 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1634 struct mgmt_mode *cp = data;
1638 bt_dev_dbg(hdev, "sock %p", sk);
1640 if (cp->val != 0x00 && cp->val != 0x01)
1641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1642 MGMT_STATUS_INVALID_PARAMS);
1647 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1649 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1651 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1656 /* In limited privacy mode the change of bondable mode
1657 * may affect the local advertising address.
1659 if (hdev_is_powered(hdev) &&
1660 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1661 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1662 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1663 queue_work(hdev->req_workqueue,
1664 &hdev->discoverable_update);
1666 err = new_settings(hdev, sk);
1670 hci_dev_unlock(hdev);
1674 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1677 struct mgmt_mode *cp = data;
1678 struct mgmt_pending_cmd *cmd;
1682 bt_dev_dbg(hdev, "sock %p", sk);
1684 status = mgmt_bredr_support(hdev);
1686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1689 if (cp->val != 0x00 && cp->val != 0x01)
1690 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1691 MGMT_STATUS_INVALID_PARAMS);
1695 if (!hdev_is_powered(hdev)) {
1696 bool changed = false;
1698 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1699 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1703 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1708 err = new_settings(hdev, sk);
1713 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1721 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1722 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1726 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1732 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1734 mgmt_pending_remove(cmd);
1739 hci_dev_unlock(hdev);
1743 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1745 struct mgmt_mode *cp = data;
1746 struct mgmt_pending_cmd *cmd;
1750 bt_dev_dbg(hdev, "sock %p", sk);
1752 status = mgmt_bredr_support(hdev);
1754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1756 if (!lmp_ssp_capable(hdev))
1757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1758 MGMT_STATUS_NOT_SUPPORTED);
1760 if (cp->val != 0x00 && cp->val != 0x01)
1761 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1762 MGMT_STATUS_INVALID_PARAMS);
1766 if (!hdev_is_powered(hdev)) {
1770 changed = !hci_dev_test_and_set_flag(hdev,
1773 changed = hci_dev_test_and_clear_flag(hdev,
1776 changed = hci_dev_test_and_clear_flag(hdev,
1779 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1782 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1787 err = new_settings(hdev, sk);
1792 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1793 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1798 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1799 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1803 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1809 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1810 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1811 sizeof(cp->val), &cp->val);
1813 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1815 mgmt_pending_remove(cmd);
1820 hci_dev_unlock(hdev);
1824 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1826 struct mgmt_mode *cp = data;
1831 bt_dev_dbg(hdev, "sock %p", sk);
1833 if (!IS_ENABLED(CONFIG_BT_HS))
1834 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1835 MGMT_STATUS_NOT_SUPPORTED);
1837 status = mgmt_bredr_support(hdev);
1839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1841 if (!lmp_ssp_capable(hdev))
1842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1843 MGMT_STATUS_NOT_SUPPORTED);
1845 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1847 MGMT_STATUS_REJECTED);
1849 if (cp->val != 0x00 && cp->val != 0x01)
1850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1851 MGMT_STATUS_INVALID_PARAMS);
1855 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1862 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1864 if (hdev_is_powered(hdev)) {
1865 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1866 MGMT_STATUS_REJECTED);
1870 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1873 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1878 err = new_settings(hdev, sk);
1881 hci_dev_unlock(hdev);
1885 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1887 struct cmd_lookup match = { NULL, hdev };
1892 u8 mgmt_err = mgmt_status(status);
1894 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1899 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1901 new_settings(hdev, match.sk);
1906 /* Make sure the controller has a good default for
1907 * advertising data. Restrict the update to when LE
1908 * has actually been enabled. During power on, the
1909 * update in powered_update_hci will take care of it.
1911 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1912 struct hci_request req;
1913 hci_req_init(&req, hdev);
1914 if (ext_adv_capable(hdev)) {
1917 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1919 __hci_req_update_scan_rsp_data(&req, 0x00);
1921 __hci_req_update_adv_data(&req, 0x00);
1922 __hci_req_update_scan_rsp_data(&req, 0x00);
1924 hci_req_run(&req, NULL);
1925 hci_update_background_scan(hdev);
1929 hci_dev_unlock(hdev);
1932 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1934 struct mgmt_mode *cp = data;
1935 struct hci_cp_write_le_host_supported hci_cp;
1936 struct mgmt_pending_cmd *cmd;
1937 struct hci_request req;
1941 bt_dev_dbg(hdev, "sock %p", sk);
1943 if (!lmp_le_capable(hdev))
1944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1945 MGMT_STATUS_NOT_SUPPORTED);
1947 if (cp->val != 0x00 && cp->val != 0x01)
1948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1949 MGMT_STATUS_INVALID_PARAMS);
1951 /* Bluetooth single mode LE only controllers or dual-mode
1952 * controllers configured as LE only devices, do not allow
1953 * switching LE off. These have either LE enabled explicitly
1954 * or BR/EDR has been previously switched off.
1956 * When trying to enable an already enabled LE, then gracefully
1957 * send a positive response. Trying to disable it however will
1958 * result into rejection.
1960 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1961 if (cp->val == 0x01)
1962 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1965 MGMT_STATUS_REJECTED);
1971 enabled = lmp_host_le_capable(hdev);
1974 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1976 if (!hdev_is_powered(hdev) || val == enabled) {
1977 bool changed = false;
1979 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1980 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1984 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1985 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1989 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1994 err = new_settings(hdev, sk);
1999 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2000 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2006 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2012 hci_req_init(&req, hdev);
2014 memset(&hci_cp, 0, sizeof(hci_cp));
2018 hci_cp.simul = 0x00;
2020 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2021 __hci_req_disable_advertising(&req);
2023 if (ext_adv_capable(hdev))
2024 __hci_req_clear_ext_adv_sets(&req);
2027 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2030 err = hci_req_run(&req, le_enable_complete);
2032 mgmt_pending_remove(cmd);
2035 hci_dev_unlock(hdev);
2039 /* This is a helper function to test for pending mgmt commands that can
2040 * cause CoD or EIR HCI commands. We can only allow one such pending
2041 * mgmt command at a time since otherwise we cannot easily track what
2042 * the current values are, will be, and based on that calculate if a new
2043 * HCI command needs to be sent and if yes with what value.
2045 static bool pending_eir_or_class(struct hci_dev *hdev)
2047 struct mgmt_pending_cmd *cmd;
2049 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2050 switch (cmd->opcode) {
2051 case MGMT_OP_ADD_UUID:
2052 case MGMT_OP_REMOVE_UUID:
2053 case MGMT_OP_SET_DEV_CLASS:
2054 case MGMT_OP_SET_POWERED:
2062 static const u8 bluetooth_base_uuid[] = {
2063 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2064 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2067 static u8 get_uuid_size(const u8 *uuid)
2071 if (memcmp(uuid, bluetooth_base_uuid, 12))
2074 val = get_unaligned_le32(&uuid[12]);
2081 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2083 struct mgmt_pending_cmd *cmd;
2087 cmd = pending_find(mgmt_op, hdev);
2091 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2092 mgmt_status(status), hdev->dev_class, 3);
2094 mgmt_pending_remove(cmd);
2097 hci_dev_unlock(hdev);
2100 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2102 bt_dev_dbg(hdev, "status 0x%02x", status);
2104 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2107 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2109 struct mgmt_cp_add_uuid *cp = data;
2110 struct mgmt_pending_cmd *cmd;
2111 struct hci_request req;
2112 struct bt_uuid *uuid;
2115 bt_dev_dbg(hdev, "sock %p", sk);
2119 if (pending_eir_or_class(hdev)) {
2120 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2125 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2131 memcpy(uuid->uuid, cp->uuid, 16);
2132 uuid->svc_hint = cp->svc_hint;
2133 uuid->size = get_uuid_size(cp->uuid);
2135 list_add_tail(&uuid->list, &hdev->uuids);
2137 hci_req_init(&req, hdev);
2139 __hci_req_update_class(&req);
2140 __hci_req_update_eir(&req);
2142 err = hci_req_run(&req, add_uuid_complete);
2144 if (err != -ENODATA)
2147 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2148 hdev->dev_class, 3);
2152 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2161 hci_dev_unlock(hdev);
2165 static bool enable_service_cache(struct hci_dev *hdev)
2167 if (!hdev_is_powered(hdev))
2170 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2171 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2179 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2181 bt_dev_dbg(hdev, "status 0x%02x", status);
2183 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2186 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2189 struct mgmt_cp_remove_uuid *cp = data;
2190 struct mgmt_pending_cmd *cmd;
2191 struct bt_uuid *match, *tmp;
2192 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2193 struct hci_request req;
2196 bt_dev_dbg(hdev, "sock %p", sk);
2200 if (pending_eir_or_class(hdev)) {
2201 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2206 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2207 hci_uuids_clear(hdev);
2209 if (enable_service_cache(hdev)) {
2210 err = mgmt_cmd_complete(sk, hdev->id,
2211 MGMT_OP_REMOVE_UUID,
2212 0, hdev->dev_class, 3);
2221 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2222 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2225 list_del(&match->list);
2231 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2232 MGMT_STATUS_INVALID_PARAMS);
2237 hci_req_init(&req, hdev);
2239 __hci_req_update_class(&req);
2240 __hci_req_update_eir(&req);
2242 err = hci_req_run(&req, remove_uuid_complete);
2244 if (err != -ENODATA)
2247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2248 hdev->dev_class, 3);
2252 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2261 hci_dev_unlock(hdev);
2265 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2267 bt_dev_dbg(hdev, "status 0x%02x", status);
2269 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2272 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2275 struct mgmt_cp_set_dev_class *cp = data;
2276 struct mgmt_pending_cmd *cmd;
2277 struct hci_request req;
2280 bt_dev_dbg(hdev, "sock %p", sk);
2282 if (!lmp_bredr_capable(hdev))
2283 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2284 MGMT_STATUS_NOT_SUPPORTED);
2288 if (pending_eir_or_class(hdev)) {
2289 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2294 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2295 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2296 MGMT_STATUS_INVALID_PARAMS);
2300 hdev->major_class = cp->major;
2301 hdev->minor_class = cp->minor;
2303 if (!hdev_is_powered(hdev)) {
2304 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2305 hdev->dev_class, 3);
2309 hci_req_init(&req, hdev);
2311 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2312 hci_dev_unlock(hdev);
2313 cancel_delayed_work_sync(&hdev->service_cache);
2315 __hci_req_update_eir(&req);
2318 __hci_req_update_class(&req);
2320 err = hci_req_run(&req, set_class_complete);
2322 if (err != -ENODATA)
2325 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2326 hdev->dev_class, 3);
2330 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2339 hci_dev_unlock(hdev);
2343 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2346 struct mgmt_cp_load_link_keys *cp = data;
2347 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2348 sizeof(struct mgmt_link_key_info));
2349 u16 key_count, expected_len;
2353 bt_dev_dbg(hdev, "sock %p", sk);
2355 if (!lmp_bredr_capable(hdev))
2356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2357 MGMT_STATUS_NOT_SUPPORTED);
2359 key_count = __le16_to_cpu(cp->key_count);
2360 if (key_count > max_key_count) {
2361 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2364 MGMT_STATUS_INVALID_PARAMS);
2367 expected_len = struct_size(cp, keys, key_count);
2368 if (expected_len != len) {
2369 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2372 MGMT_STATUS_INVALID_PARAMS);
2375 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2376 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2377 MGMT_STATUS_INVALID_PARAMS);
2379 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2382 for (i = 0; i < key_count; i++) {
2383 struct mgmt_link_key_info *key = &cp->keys[i];
2385 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2386 return mgmt_cmd_status(sk, hdev->id,
2387 MGMT_OP_LOAD_LINK_KEYS,
2388 MGMT_STATUS_INVALID_PARAMS);
2393 hci_link_keys_clear(hdev);
2396 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2398 changed = hci_dev_test_and_clear_flag(hdev,
2399 HCI_KEEP_DEBUG_KEYS);
2402 new_settings(hdev, NULL);
2404 for (i = 0; i < key_count; i++) {
2405 struct mgmt_link_key_info *key = &cp->keys[i];
2407 if (hci_is_blocked_key(hdev,
2408 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2410 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2415 /* Always ignore debug keys and require a new pairing if
2416 * the user wants to use them.
2418 if (key->type == HCI_LK_DEBUG_COMBINATION)
2421 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2422 key->type, key->pin_len, NULL);
2425 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2427 hci_dev_unlock(hdev);
2432 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2433 u8 addr_type, struct sock *skip_sk)
2435 struct mgmt_ev_device_unpaired ev;
2437 bacpy(&ev.addr.bdaddr, bdaddr);
2438 ev.addr.type = addr_type;
2440 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2444 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2447 struct mgmt_cp_unpair_device *cp = data;
2448 struct mgmt_rp_unpair_device rp;
2449 struct hci_conn_params *params;
2450 struct mgmt_pending_cmd *cmd;
2451 struct hci_conn *conn;
2455 memset(&rp, 0, sizeof(rp));
2456 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2457 rp.addr.type = cp->addr.type;
2459 if (!bdaddr_type_is_valid(cp->addr.type))
2460 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2461 MGMT_STATUS_INVALID_PARAMS,
2464 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2465 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2466 MGMT_STATUS_INVALID_PARAMS,
2471 if (!hdev_is_powered(hdev)) {
2472 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2473 MGMT_STATUS_NOT_POWERED, &rp,
2478 if (cp->addr.type == BDADDR_BREDR) {
2479 /* If disconnection is requested, then look up the
2480 * connection. If the remote device is connected, it
2481 * will be later used to terminate the link.
2483 * Setting it to NULL explicitly will cause no
2484 * termination of the link.
2487 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2492 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2494 err = mgmt_cmd_complete(sk, hdev->id,
2495 MGMT_OP_UNPAIR_DEVICE,
2496 MGMT_STATUS_NOT_PAIRED, &rp,
2504 /* LE address type */
2505 addr_type = le_addr_type(cp->addr.type);
2507 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2508 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2510 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2511 MGMT_STATUS_NOT_PAIRED, &rp,
2516 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2518 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2523 /* Defer clearing up the connection parameters until closing to
2524 * give a chance of keeping them if a repairing happens.
2526 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2528 /* Disable auto-connection parameters if present */
2529 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2531 if (params->explicit_connect)
2532 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2534 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2537 /* If disconnection is not requested, then clear the connection
2538 * variable so that the link is not terminated.
2540 if (!cp->disconnect)
2544 /* If the connection variable is set, then termination of the
2545 * link is requested.
2548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2550 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2554 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2561 cmd->cmd_complete = addr_cmd_complete;
2563 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2565 mgmt_pending_remove(cmd);
2568 hci_dev_unlock(hdev);
2572 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2575 struct mgmt_cp_disconnect *cp = data;
2576 struct mgmt_rp_disconnect rp;
2577 struct mgmt_pending_cmd *cmd;
2578 struct hci_conn *conn;
2581 bt_dev_dbg(hdev, "sock %p", sk);
2583 memset(&rp, 0, sizeof(rp));
2584 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2585 rp.addr.type = cp->addr.type;
2587 if (!bdaddr_type_is_valid(cp->addr.type))
2588 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2589 MGMT_STATUS_INVALID_PARAMS,
2594 if (!test_bit(HCI_UP, &hdev->flags)) {
2595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2596 MGMT_STATUS_NOT_POWERED, &rp,
2601 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2602 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2603 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2607 if (cp->addr.type == BDADDR_BREDR)
2608 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2611 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2612 le_addr_type(cp->addr.type));
2614 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2615 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2616 MGMT_STATUS_NOT_CONNECTED, &rp,
2621 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2627 cmd->cmd_complete = generic_cmd_complete;
2629 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2631 mgmt_pending_remove(cmd);
2634 hci_dev_unlock(hdev);
2638 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2640 switch (link_type) {
2642 switch (addr_type) {
2643 case ADDR_LE_DEV_PUBLIC:
2644 return BDADDR_LE_PUBLIC;
2647 /* Fallback to LE Random address type */
2648 return BDADDR_LE_RANDOM;
2652 /* Fallback to BR/EDR type */
2653 return BDADDR_BREDR;
2657 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2660 struct mgmt_rp_get_connections *rp;
2665 bt_dev_dbg(hdev, "sock %p", sk);
2669 if (!hdev_is_powered(hdev)) {
2670 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2671 MGMT_STATUS_NOT_POWERED);
2676 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2677 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2681 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2688 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2689 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2691 bacpy(&rp->addr[i].bdaddr, &c->dst);
2692 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2693 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2698 rp->conn_count = cpu_to_le16(i);
2700 /* Recalculate length in case of filtered SCO connections, etc */
2701 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2702 struct_size(rp, addr, i));
2707 hci_dev_unlock(hdev);
2711 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2712 struct mgmt_cp_pin_code_neg_reply *cp)
2714 struct mgmt_pending_cmd *cmd;
2717 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2722 cmd->cmd_complete = addr_cmd_complete;
2724 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2725 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2727 mgmt_pending_remove(cmd);
2732 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2735 struct hci_conn *conn;
2736 struct mgmt_cp_pin_code_reply *cp = data;
2737 struct hci_cp_pin_code_reply reply;
2738 struct mgmt_pending_cmd *cmd;
2741 bt_dev_dbg(hdev, "sock %p", sk);
2745 if (!hdev_is_powered(hdev)) {
2746 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2747 MGMT_STATUS_NOT_POWERED);
2751 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2753 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2754 MGMT_STATUS_NOT_CONNECTED);
2758 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2759 struct mgmt_cp_pin_code_neg_reply ncp;
2761 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2763 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2765 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2767 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2768 MGMT_STATUS_INVALID_PARAMS);
2773 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2779 cmd->cmd_complete = addr_cmd_complete;
2781 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2782 reply.pin_len = cp->pin_len;
2783 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2785 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2787 mgmt_pending_remove(cmd);
2790 hci_dev_unlock(hdev);
2794 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2797 struct mgmt_cp_set_io_capability *cp = data;
2799 bt_dev_dbg(hdev, "sock %p", sk);
2801 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2803 MGMT_STATUS_INVALID_PARAMS);
2807 hdev->io_capability = cp->io_capability;
2809 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2811 hci_dev_unlock(hdev);
2813 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2817 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2819 struct hci_dev *hdev = conn->hdev;
2820 struct mgmt_pending_cmd *cmd;
2822 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2823 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2826 if (cmd->user_data != conn)
2835 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2837 struct mgmt_rp_pair_device rp;
2838 struct hci_conn *conn = cmd->user_data;
2841 bacpy(&rp.addr.bdaddr, &conn->dst);
2842 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2844 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2845 status, &rp, sizeof(rp));
2847 /* So we don't get further callbacks for this connection */
2848 conn->connect_cfm_cb = NULL;
2849 conn->security_cfm_cb = NULL;
2850 conn->disconn_cfm_cb = NULL;
2852 hci_conn_drop(conn);
2854 /* The device is paired so there is no need to remove
2855 * its connection parameters anymore.
2857 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2864 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2866 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2867 struct mgmt_pending_cmd *cmd;
2869 cmd = find_pairing(conn);
2871 cmd->cmd_complete(cmd, status);
2872 mgmt_pending_remove(cmd);
2876 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2878 struct mgmt_pending_cmd *cmd;
2880 BT_DBG("status %u", status);
2882 cmd = find_pairing(conn);
2884 BT_DBG("Unable to find a pending command");
2888 cmd->cmd_complete(cmd, mgmt_status(status));
2889 mgmt_pending_remove(cmd);
2892 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2894 struct mgmt_pending_cmd *cmd;
2896 BT_DBG("status %u", status);
2901 cmd = find_pairing(conn);
2903 BT_DBG("Unable to find a pending command");
2907 cmd->cmd_complete(cmd, mgmt_status(status));
2908 mgmt_pending_remove(cmd);
2911 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2914 struct mgmt_cp_pair_device *cp = data;
2915 struct mgmt_rp_pair_device rp;
2916 struct mgmt_pending_cmd *cmd;
2917 u8 sec_level, auth_type;
2918 struct hci_conn *conn;
2921 bt_dev_dbg(hdev, "sock %p", sk);
2923 memset(&rp, 0, sizeof(rp));
2924 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2925 rp.addr.type = cp->addr.type;
2927 if (!bdaddr_type_is_valid(cp->addr.type))
2928 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2929 MGMT_STATUS_INVALID_PARAMS,
2932 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2933 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2934 MGMT_STATUS_INVALID_PARAMS,
2939 if (!hdev_is_powered(hdev)) {
2940 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2941 MGMT_STATUS_NOT_POWERED, &rp,
2946 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2947 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2948 MGMT_STATUS_ALREADY_PAIRED, &rp,
2953 sec_level = BT_SECURITY_MEDIUM;
2954 auth_type = HCI_AT_DEDICATED_BONDING;
2956 if (cp->addr.type == BDADDR_BREDR) {
2957 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2958 auth_type, CONN_REASON_PAIR_DEVICE);
2960 u8 addr_type = le_addr_type(cp->addr.type);
2961 struct hci_conn_params *p;
2963 /* When pairing a new device, it is expected to remember
2964 * this device for future connections. Adding the connection
2965 * parameter information ahead of time allows tracking
2966 * of the peripheral preferred values and will speed up any
2967 * further connection establishment.
2969 * If connection parameters already exist, then they
2970 * will be kept and this function does nothing.
2972 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2974 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2975 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2977 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2978 sec_level, HCI_LE_CONN_TIMEOUT,
2979 CONN_REASON_PAIR_DEVICE);
2985 if (PTR_ERR(conn) == -EBUSY)
2986 status = MGMT_STATUS_BUSY;
2987 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2988 status = MGMT_STATUS_NOT_SUPPORTED;
2989 else if (PTR_ERR(conn) == -ECONNREFUSED)
2990 status = MGMT_STATUS_REJECTED;
2992 status = MGMT_STATUS_CONNECT_FAILED;
2994 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2995 status, &rp, sizeof(rp));
2999 if (conn->connect_cfm_cb) {
3000 hci_conn_drop(conn);
3001 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3002 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3006 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3009 hci_conn_drop(conn);
3013 cmd->cmd_complete = pairing_complete;
3015 /* For LE, just connecting isn't a proof that the pairing finished */
3016 if (cp->addr.type == BDADDR_BREDR) {
3017 conn->connect_cfm_cb = pairing_complete_cb;
3018 conn->security_cfm_cb = pairing_complete_cb;
3019 conn->disconn_cfm_cb = pairing_complete_cb;
3021 conn->connect_cfm_cb = le_pairing_complete_cb;
3022 conn->security_cfm_cb = le_pairing_complete_cb;
3023 conn->disconn_cfm_cb = le_pairing_complete_cb;
3026 conn->io_capability = cp->io_cap;
3027 cmd->user_data = hci_conn_get(conn);
3029 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3030 hci_conn_security(conn, sec_level, auth_type, true)) {
3031 cmd->cmd_complete(cmd, 0);
3032 mgmt_pending_remove(cmd);
3038 hci_dev_unlock(hdev);
3042 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3045 struct mgmt_addr_info *addr = data;
3046 struct mgmt_pending_cmd *cmd;
3047 struct hci_conn *conn;
3050 bt_dev_dbg(hdev, "sock %p", sk);
3054 if (!hdev_is_powered(hdev)) {
3055 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3056 MGMT_STATUS_NOT_POWERED);
3060 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3062 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3063 MGMT_STATUS_INVALID_PARAMS);
3067 conn = cmd->user_data;
3069 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3070 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3071 MGMT_STATUS_INVALID_PARAMS);
3075 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3076 mgmt_pending_remove(cmd);
3078 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3079 addr, sizeof(*addr));
3081 /* Since user doesn't want to proceed with the connection, abort any
3082 * ongoing pairing and then terminate the link if it was created
3083 * because of the pair device action.
3085 if (addr->type == BDADDR_BREDR)
3086 hci_remove_link_key(hdev, &addr->bdaddr);
3088 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3089 le_addr_type(addr->type));
3091 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3092 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3095 hci_dev_unlock(hdev);
3099 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3100 struct mgmt_addr_info *addr, u16 mgmt_op,
3101 u16 hci_op, __le32 passkey)
3103 struct mgmt_pending_cmd *cmd;
3104 struct hci_conn *conn;
3109 if (!hdev_is_powered(hdev)) {
3110 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3111 MGMT_STATUS_NOT_POWERED, addr,
3116 if (addr->type == BDADDR_BREDR)
3117 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3119 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3120 le_addr_type(addr->type));
3123 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3124 MGMT_STATUS_NOT_CONNECTED, addr,
3129 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3130 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3132 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3133 MGMT_STATUS_SUCCESS, addr,
3136 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3137 MGMT_STATUS_FAILED, addr,
3143 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3149 cmd->cmd_complete = addr_cmd_complete;
3151 /* Continue with pairing via HCI */
3152 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3153 struct hci_cp_user_passkey_reply cp;
3155 bacpy(&cp.bdaddr, &addr->bdaddr);
3156 cp.passkey = passkey;
3157 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3159 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3163 mgmt_pending_remove(cmd);
3166 hci_dev_unlock(hdev);
3170 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3171 void *data, u16 len)
3173 struct mgmt_cp_pin_code_neg_reply *cp = data;
3175 bt_dev_dbg(hdev, "sock %p", sk);
3177 return user_pairing_resp(sk, hdev, &cp->addr,
3178 MGMT_OP_PIN_CODE_NEG_REPLY,
3179 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3182 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3185 struct mgmt_cp_user_confirm_reply *cp = data;
3187 bt_dev_dbg(hdev, "sock %p", sk);
3189 if (len != sizeof(*cp))
3190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3191 MGMT_STATUS_INVALID_PARAMS);
3193 return user_pairing_resp(sk, hdev, &cp->addr,
3194 MGMT_OP_USER_CONFIRM_REPLY,
3195 HCI_OP_USER_CONFIRM_REPLY, 0);
3198 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3199 void *data, u16 len)
3201 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3203 bt_dev_dbg(hdev, "sock %p", sk);
3205 return user_pairing_resp(sk, hdev, &cp->addr,
3206 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3207 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3210 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3213 struct mgmt_cp_user_passkey_reply *cp = data;
3215 bt_dev_dbg(hdev, "sock %p", sk);
3217 return user_pairing_resp(sk, hdev, &cp->addr,
3218 MGMT_OP_USER_PASSKEY_REPLY,
3219 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3222 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3223 void *data, u16 len)
3225 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3227 bt_dev_dbg(hdev, "sock %p", sk);
3229 return user_pairing_resp(sk, hdev, &cp->addr,
3230 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3231 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3234 static void adv_expire(struct hci_dev *hdev, u32 flags)
3236 struct adv_info *adv_instance;
3237 struct hci_request req;
3240 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3244 /* stop if current instance doesn't need to be changed */
3245 if (!(adv_instance->flags & flags))
3248 cancel_adv_timeout(hdev);
3250 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3254 hci_req_init(&req, hdev);
3255 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3260 hci_req_run(&req, NULL);
3263 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3265 struct mgmt_cp_set_local_name *cp;
3266 struct mgmt_pending_cmd *cmd;
3268 bt_dev_dbg(hdev, "status 0x%02x", status);
3272 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3279 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3280 mgmt_status(status));
3282 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3285 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3286 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3289 mgmt_pending_remove(cmd);
3292 hci_dev_unlock(hdev);
3295 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3298 struct mgmt_cp_set_local_name *cp = data;
3299 struct mgmt_pending_cmd *cmd;
3300 struct hci_request req;
3303 bt_dev_dbg(hdev, "sock %p", sk);
3307 /* If the old values are the same as the new ones just return a
3308 * direct command complete event.
3310 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3311 !memcmp(hdev->short_name, cp->short_name,
3312 sizeof(hdev->short_name))) {
3313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3318 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3320 if (!hdev_is_powered(hdev)) {
3321 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3323 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3328 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3329 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3330 ext_info_changed(hdev, sk);
3335 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3341 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3343 hci_req_init(&req, hdev);
3345 if (lmp_bredr_capable(hdev)) {
3346 __hci_req_update_name(&req);
3347 __hci_req_update_eir(&req);
3350 /* The name is stored in the scan response data and so
3351 * no need to update the advertising data here.
3353 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3354 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3356 err = hci_req_run(&req, set_name_complete);
3358 mgmt_pending_remove(cmd);
3361 hci_dev_unlock(hdev);
3365 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3368 struct mgmt_cp_set_appearance *cp = data;
3372 bt_dev_dbg(hdev, "sock %p", sk);
3374 if (!lmp_le_capable(hdev))
3375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3376 MGMT_STATUS_NOT_SUPPORTED);
3378 appearance = le16_to_cpu(cp->appearance);
3382 if (hdev->appearance != appearance) {
3383 hdev->appearance = appearance;
3385 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3386 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3388 ext_info_changed(hdev, sk);
3391 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3394 hci_dev_unlock(hdev);
3399 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3400 void *data, u16 len)
3402 struct mgmt_rp_get_phy_configuration rp;
3404 bt_dev_dbg(hdev, "sock %p", sk);
3408 memset(&rp, 0, sizeof(rp));
3410 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3411 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3412 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3414 hci_dev_unlock(hdev);
3416 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3420 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3422 struct mgmt_ev_phy_configuration_changed ev;
3424 memset(&ev, 0, sizeof(ev));
3426 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3428 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3432 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3433 u16 opcode, struct sk_buff *skb)
3435 struct mgmt_pending_cmd *cmd;
3437 bt_dev_dbg(hdev, "status 0x%02x", status);
3441 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3446 mgmt_cmd_status(cmd->sk, hdev->id,
3447 MGMT_OP_SET_PHY_CONFIGURATION,
3448 mgmt_status(status));
3450 mgmt_cmd_complete(cmd->sk, hdev->id,
3451 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3454 mgmt_phy_configuration_changed(hdev, cmd->sk);
3457 mgmt_pending_remove(cmd);
3460 hci_dev_unlock(hdev);
3463 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3464 void *data, u16 len)
3466 struct mgmt_cp_set_phy_configuration *cp = data;
3467 struct hci_cp_le_set_default_phy cp_phy;
3468 struct mgmt_pending_cmd *cmd;
3469 struct hci_request req;
3470 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3471 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3472 bool changed = false;
3475 bt_dev_dbg(hdev, "sock %p", sk);
3477 configurable_phys = get_configurable_phys(hdev);
3478 supported_phys = get_supported_phys(hdev);
3479 selected_phys = __le32_to_cpu(cp->selected_phys);
3481 if (selected_phys & ~supported_phys)
3482 return mgmt_cmd_status(sk, hdev->id,
3483 MGMT_OP_SET_PHY_CONFIGURATION,
3484 MGMT_STATUS_INVALID_PARAMS);
3486 unconfigure_phys = supported_phys & ~configurable_phys;
3488 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3489 return mgmt_cmd_status(sk, hdev->id,
3490 MGMT_OP_SET_PHY_CONFIGURATION,
3491 MGMT_STATUS_INVALID_PARAMS);
3493 if (selected_phys == get_selected_phys(hdev))
3494 return mgmt_cmd_complete(sk, hdev->id,
3495 MGMT_OP_SET_PHY_CONFIGURATION,
3500 if (!hdev_is_powered(hdev)) {
3501 err = mgmt_cmd_status(sk, hdev->id,
3502 MGMT_OP_SET_PHY_CONFIGURATION,
3503 MGMT_STATUS_REJECTED);
3507 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3508 err = mgmt_cmd_status(sk, hdev->id,
3509 MGMT_OP_SET_PHY_CONFIGURATION,
3514 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3515 pkt_type |= (HCI_DH3 | HCI_DM3);
3517 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3519 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3520 pkt_type |= (HCI_DH5 | HCI_DM5);
3522 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3524 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3525 pkt_type &= ~HCI_2DH1;
3527 pkt_type |= HCI_2DH1;
3529 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3530 pkt_type &= ~HCI_2DH3;
3532 pkt_type |= HCI_2DH3;
3534 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3535 pkt_type &= ~HCI_2DH5;
3537 pkt_type |= HCI_2DH5;
3539 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3540 pkt_type &= ~HCI_3DH1;
3542 pkt_type |= HCI_3DH1;
3544 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3545 pkt_type &= ~HCI_3DH3;
3547 pkt_type |= HCI_3DH3;
3549 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3550 pkt_type &= ~HCI_3DH5;
3552 pkt_type |= HCI_3DH5;
3554 if (pkt_type != hdev->pkt_type) {
3555 hdev->pkt_type = pkt_type;
3559 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3560 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3562 mgmt_phy_configuration_changed(hdev, sk);
3564 err = mgmt_cmd_complete(sk, hdev->id,
3565 MGMT_OP_SET_PHY_CONFIGURATION,
3571 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3578 hci_req_init(&req, hdev);
3580 memset(&cp_phy, 0, sizeof(cp_phy));
3582 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3583 cp_phy.all_phys |= 0x01;
3585 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3586 cp_phy.all_phys |= 0x02;
3588 if (selected_phys & MGMT_PHY_LE_1M_TX)
3589 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3591 if (selected_phys & MGMT_PHY_LE_2M_TX)
3592 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3594 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3595 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3597 if (selected_phys & MGMT_PHY_LE_1M_RX)
3598 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3600 if (selected_phys & MGMT_PHY_LE_2M_RX)
3601 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3603 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3604 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3606 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3608 err = hci_req_run_skb(&req, set_default_phy_complete);
3610 mgmt_pending_remove(cmd);
3613 hci_dev_unlock(hdev);
3618 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3621 int err = MGMT_STATUS_SUCCESS;
3622 struct mgmt_cp_set_blocked_keys *keys = data;
3623 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3624 sizeof(struct mgmt_blocked_key_info));
3625 u16 key_count, expected_len;
3628 bt_dev_dbg(hdev, "sock %p", sk);
3630 key_count = __le16_to_cpu(keys->key_count);
3631 if (key_count > max_key_count) {
3632 bt_dev_err(hdev, "too big key_count value %u", key_count);
3633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3634 MGMT_STATUS_INVALID_PARAMS);
3637 expected_len = struct_size(keys, keys, key_count);
3638 if (expected_len != len) {
3639 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3642 MGMT_STATUS_INVALID_PARAMS);
3647 hci_blocked_keys_clear(hdev);
3649 for (i = 0; i < keys->key_count; ++i) {
3650 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3653 err = MGMT_STATUS_NO_RESOURCES;
3657 b->type = keys->keys[i].type;
3658 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3659 list_add_rcu(&b->list, &hdev->blocked_keys);
3661 hci_dev_unlock(hdev);
3663 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3667 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3668 void *data, u16 len)
3670 struct mgmt_mode *cp = data;
3672 bool changed = false;
3674 bt_dev_dbg(hdev, "sock %p", sk);
3676 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3677 return mgmt_cmd_status(sk, hdev->id,
3678 MGMT_OP_SET_WIDEBAND_SPEECH,
3679 MGMT_STATUS_NOT_SUPPORTED);
3681 if (cp->val != 0x00 && cp->val != 0x01)
3682 return mgmt_cmd_status(sk, hdev->id,
3683 MGMT_OP_SET_WIDEBAND_SPEECH,
3684 MGMT_STATUS_INVALID_PARAMS);
3688 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3689 err = mgmt_cmd_status(sk, hdev->id,
3690 MGMT_OP_SET_WIDEBAND_SPEECH,
3695 if (hdev_is_powered(hdev) &&
3696 !!cp->val != hci_dev_test_flag(hdev,
3697 HCI_WIDEBAND_SPEECH_ENABLED)) {
3698 err = mgmt_cmd_status(sk, hdev->id,
3699 MGMT_OP_SET_WIDEBAND_SPEECH,
3700 MGMT_STATUS_REJECTED);
3705 changed = !hci_dev_test_and_set_flag(hdev,
3706 HCI_WIDEBAND_SPEECH_ENABLED);
3708 changed = hci_dev_test_and_clear_flag(hdev,
3709 HCI_WIDEBAND_SPEECH_ENABLED);
3711 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3716 err = new_settings(hdev, sk);
3719 hci_dev_unlock(hdev);
3723 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3724 void *data, u16 data_len)
3727 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3730 u8 tx_power_range[2];
3732 bt_dev_dbg(hdev, "sock %p", sk);
3734 memset(&buf, 0, sizeof(buf));
3738 /* When the Read Simple Pairing Options command is supported, then
3739 * the remote public key validation is supported.
3741 * Alternatively, when Microsoft extensions are available, they can
3742 * indicate support for public key validation as well.
3744 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3745 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3747 flags |= 0x02; /* Remote public key validation (LE) */
3749 /* When the Read Encryption Key Size command is supported, then the
3750 * encryption key size is enforced.
3752 if (hdev->commands[20] & 0x10)
3753 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3755 flags |= 0x08; /* Encryption key size enforcement (LE) */
3757 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3760 /* When the Read Simple Pairing Options command is supported, then
3761 * also max encryption key size information is provided.
3763 if (hdev->commands[41] & 0x08)
3764 cap_len = eir_append_le16(rp->cap, cap_len,
3765 MGMT_CAP_MAX_ENC_KEY_SIZE,
3766 hdev->max_enc_key_size);
3768 cap_len = eir_append_le16(rp->cap, cap_len,
3769 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3770 SMP_MAX_ENC_KEY_SIZE);
3772 /* Append the min/max LE tx power parameters if we were able to fetch
3773 * it from the controller
3775 if (hdev->commands[38] & 0x80) {
3776 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3777 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3778 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3782 rp->cap_len = cpu_to_le16(cap_len);
3784 hci_dev_unlock(hdev);
3786 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3787 rp, sizeof(*rp) + cap_len);
3790 #ifdef CONFIG_BT_FEATURE_DEBUG
3791 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3792 static const u8 debug_uuid[16] = {
3793 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3794 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3798 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3799 static const u8 simult_central_periph_uuid[16] = {
3800 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3801 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3804 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3805 static const u8 rpa_resolution_uuid[16] = {
3806 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3807 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3810 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3811 void *data, u16 data_len)
3813 char buf[62]; /* Enough space for 3 features */
3814 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3818 bt_dev_dbg(hdev, "sock %p", sk);
3820 memset(&buf, 0, sizeof(buf));
3822 #ifdef CONFIG_BT_FEATURE_DEBUG
3824 flags = bt_dbg_get() ? BIT(0) : 0;
3826 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3827 rp->features[idx].flags = cpu_to_le32(flags);
3833 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3834 (hdev->le_states[4] & 0x08) && /* Central */
3835 (hdev->le_states[4] & 0x40) && /* Peripheral */
3836 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3841 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3842 rp->features[idx].flags = cpu_to_le32(flags);
3846 if (hdev && use_ll_privacy(hdev)) {
3847 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3848 flags = BIT(0) | BIT(1);
3852 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3853 rp->features[idx].flags = cpu_to_le32(flags);
3857 rp->feature_count = cpu_to_le16(idx);
3859 /* After reading the experimental features information, enable
3860 * the events to update client on any future change.
3862 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3864 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3865 MGMT_OP_READ_EXP_FEATURES_INFO,
3866 0, rp, sizeof(*rp) + (20 * idx));
3869 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3872 struct mgmt_ev_exp_feature_changed ev;
3874 memset(&ev, 0, sizeof(ev));
3875 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3876 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3878 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3880 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3884 #ifdef CONFIG_BT_FEATURE_DEBUG
3885 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3887 struct mgmt_ev_exp_feature_changed ev;
3889 memset(&ev, 0, sizeof(ev));
3890 memcpy(ev.uuid, debug_uuid, 16);
3891 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3893 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3895 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3899 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3900 void *data, u16 data_len)
3902 struct mgmt_cp_set_exp_feature *cp = data;
3903 struct mgmt_rp_set_exp_feature rp;
3905 bt_dev_dbg(hdev, "sock %p", sk);
3907 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3908 memset(rp.uuid, 0, 16);
3909 rp.flags = cpu_to_le32(0);
3911 #ifdef CONFIG_BT_FEATURE_DEBUG
3913 bool changed = bt_dbg_get();
3918 exp_debug_feature_changed(false, sk);
3922 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3923 bool changed = hci_dev_test_flag(hdev,
3924 HCI_ENABLE_LL_PRIVACY);
3926 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3929 exp_ll_privacy_feature_changed(false, hdev, sk);
3932 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3934 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3935 MGMT_OP_SET_EXP_FEATURE, 0,
3939 #ifdef CONFIG_BT_FEATURE_DEBUG
3940 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3944 /* Command requires to use the non-controller index */
3946 return mgmt_cmd_status(sk, hdev->id,
3947 MGMT_OP_SET_EXP_FEATURE,
3948 MGMT_STATUS_INVALID_INDEX);
3950 /* Parameters are limited to a single octet */
3951 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3952 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3953 MGMT_OP_SET_EXP_FEATURE,
3954 MGMT_STATUS_INVALID_PARAMS);
3956 /* Only boolean on/off is supported */
3957 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3958 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3959 MGMT_OP_SET_EXP_FEATURE,
3960 MGMT_STATUS_INVALID_PARAMS);
3962 val = !!cp->param[0];
3963 changed = val ? !bt_dbg_get() : bt_dbg_get();
3966 memcpy(rp.uuid, debug_uuid, 16);
3967 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3969 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3971 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3972 MGMT_OP_SET_EXP_FEATURE, 0,
3976 exp_debug_feature_changed(val, sk);
3982 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3987 /* Command requires to use the controller index */
3989 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3990 MGMT_OP_SET_EXP_FEATURE,
3991 MGMT_STATUS_INVALID_INDEX);
3993 /* Changes can only be made when controller is powered down */
3994 if (hdev_is_powered(hdev))
3995 return mgmt_cmd_status(sk, hdev->id,
3996 MGMT_OP_SET_EXP_FEATURE,
3997 MGMT_STATUS_REJECTED);
3999 /* Parameters are limited to a single octet */
4000 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4001 return mgmt_cmd_status(sk, hdev->id,
4002 MGMT_OP_SET_EXP_FEATURE,
4003 MGMT_STATUS_INVALID_PARAMS);
4005 /* Only boolean on/off is supported */
4006 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4007 return mgmt_cmd_status(sk, hdev->id,
4008 MGMT_OP_SET_EXP_FEATURE,
4009 MGMT_STATUS_INVALID_PARAMS);
4011 val = !!cp->param[0];
4014 changed = !hci_dev_test_flag(hdev,
4015 HCI_ENABLE_LL_PRIVACY);
4016 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4017 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4019 /* Enable LL privacy + supported settings changed */
4020 flags = BIT(0) | BIT(1);
4022 changed = hci_dev_test_flag(hdev,
4023 HCI_ENABLE_LL_PRIVACY);
4024 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4026 /* Disable LL privacy + supported settings changed */
4030 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4031 rp.flags = cpu_to_le32(flags);
4033 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4035 err = mgmt_cmd_complete(sk, hdev->id,
4036 MGMT_OP_SET_EXP_FEATURE, 0,
4040 exp_ll_privacy_feature_changed(val, hdev, sk);
4045 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4046 MGMT_OP_SET_EXP_FEATURE,
4047 MGMT_STATUS_NOT_SUPPORTED);
4050 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4052 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4055 struct mgmt_cp_get_device_flags *cp = data;
4056 struct mgmt_rp_get_device_flags rp;
4057 struct bdaddr_list_with_flags *br_params;
4058 struct hci_conn_params *params;
4059 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4060 u32 current_flags = 0;
4061 u8 status = MGMT_STATUS_INVALID_PARAMS;
4063 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4064 &cp->addr.bdaddr, cp->addr.type);
4068 memset(&rp, 0, sizeof(rp));
4070 if (cp->addr.type == BDADDR_BREDR) {
4071 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4077 current_flags = br_params->current_flags;
4079 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4080 le_addr_type(cp->addr.type));
4085 current_flags = params->current_flags;
4088 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4089 rp.addr.type = cp->addr.type;
4090 rp.supported_flags = cpu_to_le32(supported_flags);
4091 rp.current_flags = cpu_to_le32(current_flags);
4093 status = MGMT_STATUS_SUCCESS;
4096 hci_dev_unlock(hdev);
4098 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4102 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4103 bdaddr_t *bdaddr, u8 bdaddr_type,
4104 u32 supported_flags, u32 current_flags)
4106 struct mgmt_ev_device_flags_changed ev;
4108 bacpy(&ev.addr.bdaddr, bdaddr);
4109 ev.addr.type = bdaddr_type;
4110 ev.supported_flags = cpu_to_le32(supported_flags);
4111 ev.current_flags = cpu_to_le32(current_flags);
4113 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4116 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4119 struct mgmt_cp_set_device_flags *cp = data;
4120 struct bdaddr_list_with_flags *br_params;
4121 struct hci_conn_params *params;
4122 u8 status = MGMT_STATUS_INVALID_PARAMS;
4123 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4124 u32 current_flags = __le32_to_cpu(cp->current_flags);
4126 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4127 &cp->addr.bdaddr, cp->addr.type,
4128 __le32_to_cpu(current_flags));
4130 if ((supported_flags | current_flags) != supported_flags) {
4131 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4132 current_flags, supported_flags);
4138 if (cp->addr.type == BDADDR_BREDR) {
4139 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4144 br_params->current_flags = current_flags;
4145 status = MGMT_STATUS_SUCCESS;
4147 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4148 &cp->addr.bdaddr, cp->addr.type);
4151 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4152 le_addr_type(cp->addr.type));
4154 params->current_flags = current_flags;
4155 status = MGMT_STATUS_SUCCESS;
4157 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4159 le_addr_type(cp->addr.type));
4164 hci_dev_unlock(hdev);
4166 if (status == MGMT_STATUS_SUCCESS)
4167 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4168 supported_flags, current_flags);
4170 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4171 &cp->addr, sizeof(cp->addr));
4174 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4177 struct mgmt_ev_adv_monitor_added ev;
4179 ev.monitor_handle = cpu_to_le16(handle);
4181 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4184 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4186 struct mgmt_ev_adv_monitor_removed ev;
4187 struct mgmt_pending_cmd *cmd;
4188 struct sock *sk_skip = NULL;
4189 struct mgmt_cp_remove_adv_monitor *cp;
4191 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4195 if (cp->monitor_handle)
4199 ev.monitor_handle = cpu_to_le16(handle);
4201 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4204 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4205 void *data, u16 len)
4207 struct adv_monitor *monitor = NULL;
4208 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4211 __u32 supported = 0;
4213 __u16 num_handles = 0;
4214 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4216 BT_DBG("request for %s", hdev->name);
4220 if (msft_monitor_supported(hdev))
4221 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4223 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4224 handles[num_handles++] = monitor->handle;
4226 hci_dev_unlock(hdev);
4228 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4229 rp = kmalloc(rp_size, GFP_KERNEL);
4233 /* All supported features are currently enabled */
4234 enabled = supported;
4236 rp->supported_features = cpu_to_le32(supported);
4237 rp->enabled_features = cpu_to_le32(enabled);
4238 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4239 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4240 rp->num_handles = cpu_to_le16(num_handles);
4242 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4244 err = mgmt_cmd_complete(sk, hdev->id,
4245 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4246 MGMT_STATUS_SUCCESS, rp, rp_size);
4253 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4255 struct mgmt_rp_add_adv_patterns_monitor rp;
4256 struct mgmt_pending_cmd *cmd;
4257 struct adv_monitor *monitor;
4262 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4264 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4269 monitor = cmd->user_data;
4270 rp.monitor_handle = cpu_to_le16(monitor->handle);
4273 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4274 hdev->adv_monitors_cnt++;
4275 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4276 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4277 hci_update_background_scan(hdev);
4280 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4281 mgmt_status(status), &rp, sizeof(rp));
4282 mgmt_pending_remove(cmd);
4285 hci_dev_unlock(hdev);
4286 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4287 rp.monitor_handle, status);
4292 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4293 struct adv_monitor *m, u8 status,
4294 void *data, u16 len, u16 op)
4296 struct mgmt_rp_add_adv_patterns_monitor rp;
4297 struct mgmt_pending_cmd *cmd;
4306 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4307 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4308 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4309 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4310 status = MGMT_STATUS_BUSY;
4314 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4316 status = MGMT_STATUS_NO_RESOURCES;
4321 pending = hci_add_adv_monitor(hdev, m, &err);
4323 if (err == -ENOSPC || err == -ENOMEM)
4324 status = MGMT_STATUS_NO_RESOURCES;
4325 else if (err == -EINVAL)
4326 status = MGMT_STATUS_INVALID_PARAMS;
4328 status = MGMT_STATUS_FAILED;
4330 mgmt_pending_remove(cmd);
4335 mgmt_pending_remove(cmd);
4336 rp.monitor_handle = cpu_to_le16(m->handle);
4337 mgmt_adv_monitor_added(sk, hdev, m->handle);
4338 m->state = ADV_MONITOR_STATE_REGISTERED;
4339 hdev->adv_monitors_cnt++;
4341 hci_dev_unlock(hdev);
4342 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4346 hci_dev_unlock(hdev);
4351 hci_free_adv_monitor(hdev, m);
4352 hci_dev_unlock(hdev);
4353 return mgmt_cmd_status(sk, hdev->id, op, status);
4356 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4357 struct mgmt_adv_rssi_thresholds *rssi)
4360 m->rssi.low_threshold = rssi->low_threshold;
4361 m->rssi.low_threshold_timeout =
4362 __le16_to_cpu(rssi->low_threshold_timeout);
4363 m->rssi.high_threshold = rssi->high_threshold;
4364 m->rssi.high_threshold_timeout =
4365 __le16_to_cpu(rssi->high_threshold_timeout);
4366 m->rssi.sampling_period = rssi->sampling_period;
4368 /* Default values. These numbers are the least constricting
4369 * parameters for MSFT API to work, so it behaves as if there
4370 * are no rssi parameter to consider. May need to be changed
4371 * if other API are to be supported.
4373 m->rssi.low_threshold = -127;
4374 m->rssi.low_threshold_timeout = 60;
4375 m->rssi.high_threshold = -127;
4376 m->rssi.high_threshold_timeout = 0;
4377 m->rssi.sampling_period = 0;
4381 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4382 struct mgmt_adv_pattern *patterns)
4384 u8 offset = 0, length = 0;
4385 struct adv_pattern *p = NULL;
4388 for (i = 0; i < pattern_count; i++) {
4389 offset = patterns[i].offset;
4390 length = patterns[i].length;
4391 if (offset >= HCI_MAX_AD_LENGTH ||
4392 length > HCI_MAX_AD_LENGTH ||
4393 (offset + length) > HCI_MAX_AD_LENGTH)
4394 return MGMT_STATUS_INVALID_PARAMS;
4396 p = kmalloc(sizeof(*p), GFP_KERNEL);
4398 return MGMT_STATUS_NO_RESOURCES;
4400 p->ad_type = patterns[i].ad_type;
4401 p->offset = patterns[i].offset;
4402 p->length = patterns[i].length;
4403 memcpy(p->value, patterns[i].value, p->length);
4405 INIT_LIST_HEAD(&p->list);
4406 list_add(&p->list, &m->patterns);
4409 return MGMT_STATUS_SUCCESS;
4412 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4413 void *data, u16 len)
4415 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4416 struct adv_monitor *m = NULL;
4417 u8 status = MGMT_STATUS_SUCCESS;
4418 size_t expected_size = sizeof(*cp);
4420 BT_DBG("request for %s", hdev->name);
4422 if (len <= sizeof(*cp)) {
4423 status = MGMT_STATUS_INVALID_PARAMS;
4427 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4428 if (len != expected_size) {
4429 status = MGMT_STATUS_INVALID_PARAMS;
4433 m = kzalloc(sizeof(*m), GFP_KERNEL);
4435 status = MGMT_STATUS_NO_RESOURCES;
4439 INIT_LIST_HEAD(&m->patterns);
4441 parse_adv_monitor_rssi(m, NULL);
4442 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4445 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4446 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4449 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4450 void *data, u16 len)
4452 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4453 struct adv_monitor *m = NULL;
4454 u8 status = MGMT_STATUS_SUCCESS;
4455 size_t expected_size = sizeof(*cp);
4457 BT_DBG("request for %s", hdev->name);
4459 if (len <= sizeof(*cp)) {
4460 status = MGMT_STATUS_INVALID_PARAMS;
4464 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4465 if (len != expected_size) {
4466 status = MGMT_STATUS_INVALID_PARAMS;
4470 m = kzalloc(sizeof(*m), GFP_KERNEL);
4472 status = MGMT_STATUS_NO_RESOURCES;
4476 INIT_LIST_HEAD(&m->patterns);
4478 parse_adv_monitor_rssi(m, &cp->rssi);
4479 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4482 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4483 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4486 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4488 struct mgmt_rp_remove_adv_monitor rp;
4489 struct mgmt_cp_remove_adv_monitor *cp;
4490 struct mgmt_pending_cmd *cmd;
4495 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4500 rp.monitor_handle = cp->monitor_handle;
4503 hci_update_background_scan(hdev);
4505 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4506 mgmt_status(status), &rp, sizeof(rp));
4507 mgmt_pending_remove(cmd);
4510 hci_dev_unlock(hdev);
4511 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4512 rp.monitor_handle, status);
4517 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4518 void *data, u16 len)
4520 struct mgmt_cp_remove_adv_monitor *cp = data;
4521 struct mgmt_rp_remove_adv_monitor rp;
4522 struct mgmt_pending_cmd *cmd;
4523 u16 handle = __le16_to_cpu(cp->monitor_handle);
4527 BT_DBG("request for %s", hdev->name);
4528 rp.monitor_handle = cp->monitor_handle;
4532 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4533 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4534 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4535 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4536 status = MGMT_STATUS_BUSY;
4540 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4542 status = MGMT_STATUS_NO_RESOURCES;
4547 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4549 pending = hci_remove_all_adv_monitor(hdev, &err);
4552 mgmt_pending_remove(cmd);
4555 status = MGMT_STATUS_INVALID_INDEX;
4557 status = MGMT_STATUS_FAILED;
4562 /* monitor can be removed without forwarding request to controller */
4564 mgmt_pending_remove(cmd);
4565 hci_dev_unlock(hdev);
4567 return mgmt_cmd_complete(sk, hdev->id,
4568 MGMT_OP_REMOVE_ADV_MONITOR,
4569 MGMT_STATUS_SUCCESS,
4573 hci_dev_unlock(hdev);
4577 hci_dev_unlock(hdev);
4578 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4582 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4583 u16 opcode, struct sk_buff *skb)
4585 struct mgmt_rp_read_local_oob_data mgmt_rp;
4586 size_t rp_size = sizeof(mgmt_rp);
4587 struct mgmt_pending_cmd *cmd;
4589 bt_dev_dbg(hdev, "status %u", status);
4591 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4595 if (status || !skb) {
4596 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4597 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4601 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4603 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4604 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4606 if (skb->len < sizeof(*rp)) {
4607 mgmt_cmd_status(cmd->sk, hdev->id,
4608 MGMT_OP_READ_LOCAL_OOB_DATA,
4609 MGMT_STATUS_FAILED);
4613 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4614 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4616 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4618 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4620 if (skb->len < sizeof(*rp)) {
4621 mgmt_cmd_status(cmd->sk, hdev->id,
4622 MGMT_OP_READ_LOCAL_OOB_DATA,
4623 MGMT_STATUS_FAILED);
4627 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4628 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4630 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4631 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4634 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4635 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4638 mgmt_pending_remove(cmd);
4641 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4642 void *data, u16 data_len)
4644 struct mgmt_pending_cmd *cmd;
4645 struct hci_request req;
4648 bt_dev_dbg(hdev, "sock %p", sk);
4652 if (!hdev_is_powered(hdev)) {
4653 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4654 MGMT_STATUS_NOT_POWERED);
4658 if (!lmp_ssp_capable(hdev)) {
4659 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4660 MGMT_STATUS_NOT_SUPPORTED);
4664 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4665 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4670 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4676 hci_req_init(&req, hdev);
4678 if (bredr_sc_enabled(hdev))
4679 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4681 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4683 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4685 mgmt_pending_remove(cmd);
4688 hci_dev_unlock(hdev);
4692 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4693 void *data, u16 len)
4695 struct mgmt_addr_info *addr = data;
4698 bt_dev_dbg(hdev, "sock %p", sk);
4700 if (!bdaddr_type_is_valid(addr->type))
4701 return mgmt_cmd_complete(sk, hdev->id,
4702 MGMT_OP_ADD_REMOTE_OOB_DATA,
4703 MGMT_STATUS_INVALID_PARAMS,
4704 addr, sizeof(*addr));
4708 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4709 struct mgmt_cp_add_remote_oob_data *cp = data;
4712 if (cp->addr.type != BDADDR_BREDR) {
4713 err = mgmt_cmd_complete(sk, hdev->id,
4714 MGMT_OP_ADD_REMOTE_OOB_DATA,
4715 MGMT_STATUS_INVALID_PARAMS,
4716 &cp->addr, sizeof(cp->addr));
4720 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4721 cp->addr.type, cp->hash,
4722 cp->rand, NULL, NULL);
4724 status = MGMT_STATUS_FAILED;
4726 status = MGMT_STATUS_SUCCESS;
4728 err = mgmt_cmd_complete(sk, hdev->id,
4729 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4730 &cp->addr, sizeof(cp->addr));
4731 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4732 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4733 u8 *rand192, *hash192, *rand256, *hash256;
4736 if (bdaddr_type_is_le(cp->addr.type)) {
4737 /* Enforce zero-valued 192-bit parameters as
4738 * long as legacy SMP OOB isn't implemented.
4740 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4741 memcmp(cp->hash192, ZERO_KEY, 16)) {
4742 err = mgmt_cmd_complete(sk, hdev->id,
4743 MGMT_OP_ADD_REMOTE_OOB_DATA,
4744 MGMT_STATUS_INVALID_PARAMS,
4745 addr, sizeof(*addr));
4752 /* In case one of the P-192 values is set to zero,
4753 * then just disable OOB data for P-192.
4755 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4756 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4760 rand192 = cp->rand192;
4761 hash192 = cp->hash192;
4765 /* In case one of the P-256 values is set to zero, then just
4766 * disable OOB data for P-256.
4768 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4769 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4773 rand256 = cp->rand256;
4774 hash256 = cp->hash256;
4777 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4778 cp->addr.type, hash192, rand192,
4781 status = MGMT_STATUS_FAILED;
4783 status = MGMT_STATUS_SUCCESS;
4785 err = mgmt_cmd_complete(sk, hdev->id,
4786 MGMT_OP_ADD_REMOTE_OOB_DATA,
4787 status, &cp->addr, sizeof(cp->addr));
4789 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4791 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4792 MGMT_STATUS_INVALID_PARAMS);
4796 hci_dev_unlock(hdev);
4800 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4801 void *data, u16 len)
4803 struct mgmt_cp_remove_remote_oob_data *cp = data;
4807 bt_dev_dbg(hdev, "sock %p", sk);
4809 if (cp->addr.type != BDADDR_BREDR)
4810 return mgmt_cmd_complete(sk, hdev->id,
4811 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4812 MGMT_STATUS_INVALID_PARAMS,
4813 &cp->addr, sizeof(cp->addr));
4817 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4818 hci_remote_oob_data_clear(hdev);
4819 status = MGMT_STATUS_SUCCESS;
4823 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4825 status = MGMT_STATUS_INVALID_PARAMS;
4827 status = MGMT_STATUS_SUCCESS;
4830 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4831 status, &cp->addr, sizeof(cp->addr));
4833 hci_dev_unlock(hdev);
4837 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4839 struct mgmt_pending_cmd *cmd;
4841 bt_dev_dbg(hdev, "status %u", status);
4845 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4847 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4850 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4853 cmd->cmd_complete(cmd, mgmt_status(status));
4854 mgmt_pending_remove(cmd);
4857 hci_dev_unlock(hdev);
4859 /* Handle suspend notifier */
4860 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4861 hdev->suspend_tasks)) {
4862 bt_dev_dbg(hdev, "Unpaused discovery");
4863 wake_up(&hdev->suspend_wait_q);
4867 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4868 uint8_t *mgmt_status)
4871 case DISCOV_TYPE_LE:
4872 *mgmt_status = mgmt_le_support(hdev);
4876 case DISCOV_TYPE_INTERLEAVED:
4877 *mgmt_status = mgmt_le_support(hdev);
4881 case DISCOV_TYPE_BREDR:
4882 *mgmt_status = mgmt_bredr_support(hdev);
4887 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4894 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4895 u16 op, void *data, u16 len)
4897 struct mgmt_cp_start_discovery *cp = data;
4898 struct mgmt_pending_cmd *cmd;
4902 bt_dev_dbg(hdev, "sock %p", sk);
4906 if (!hdev_is_powered(hdev)) {
4907 err = mgmt_cmd_complete(sk, hdev->id, op,
4908 MGMT_STATUS_NOT_POWERED,
4909 &cp->type, sizeof(cp->type));
4913 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4914 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4915 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4916 &cp->type, sizeof(cp->type));
4920 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4921 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4922 &cp->type, sizeof(cp->type));
4926 /* Can't start discovery when it is paused */
4927 if (hdev->discovery_paused) {
4928 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4929 &cp->type, sizeof(cp->type));
4933 /* Clear the discovery filter first to free any previously
4934 * allocated memory for the UUID list.
4936 hci_discovery_filter_clear(hdev);
4938 hdev->discovery.type = cp->type;
4939 hdev->discovery.report_invalid_rssi = false;
4940 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4941 hdev->discovery.limited = true;
4943 hdev->discovery.limited = false;
4945 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4951 cmd->cmd_complete = generic_cmd_complete;
4953 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4954 queue_work(hdev->req_workqueue, &hdev->discov_update);
4958 hci_dev_unlock(hdev);
4962 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4963 void *data, u16 len)
4965 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4969 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4970 void *data, u16 len)
4972 return start_discovery_internal(sk, hdev,
4973 MGMT_OP_START_LIMITED_DISCOVERY,
4977 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4980 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4984 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4985 void *data, u16 len)
4987 struct mgmt_cp_start_service_discovery *cp = data;
4988 struct mgmt_pending_cmd *cmd;
4989 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4990 u16 uuid_count, expected_len;
4994 bt_dev_dbg(hdev, "sock %p", sk);
4998 if (!hdev_is_powered(hdev)) {
4999 err = mgmt_cmd_complete(sk, hdev->id,
5000 MGMT_OP_START_SERVICE_DISCOVERY,
5001 MGMT_STATUS_NOT_POWERED,
5002 &cp->type, sizeof(cp->type));
5006 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5007 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5008 err = mgmt_cmd_complete(sk, hdev->id,
5009 MGMT_OP_START_SERVICE_DISCOVERY,
5010 MGMT_STATUS_BUSY, &cp->type,
5015 if (hdev->discovery_paused) {
5016 err = mgmt_cmd_complete(sk, hdev->id,
5017 MGMT_OP_START_SERVICE_DISCOVERY,
5018 MGMT_STATUS_BUSY, &cp->type,
5023 uuid_count = __le16_to_cpu(cp->uuid_count);
5024 if (uuid_count > max_uuid_count) {
5025 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5027 err = mgmt_cmd_complete(sk, hdev->id,
5028 MGMT_OP_START_SERVICE_DISCOVERY,
5029 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5034 expected_len = sizeof(*cp) + uuid_count * 16;
5035 if (expected_len != len) {
5036 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5038 err = mgmt_cmd_complete(sk, hdev->id,
5039 MGMT_OP_START_SERVICE_DISCOVERY,
5040 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5045 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5046 err = mgmt_cmd_complete(sk, hdev->id,
5047 MGMT_OP_START_SERVICE_DISCOVERY,
5048 status, &cp->type, sizeof(cp->type));
5052 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5059 cmd->cmd_complete = service_discovery_cmd_complete;
5061 /* Clear the discovery filter first to free any previously
5062 * allocated memory for the UUID list.
5064 hci_discovery_filter_clear(hdev);
5066 hdev->discovery.result_filtering = true;
5067 hdev->discovery.type = cp->type;
5068 hdev->discovery.rssi = cp->rssi;
5069 hdev->discovery.uuid_count = uuid_count;
5071 if (uuid_count > 0) {
5072 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5074 if (!hdev->discovery.uuids) {
5075 err = mgmt_cmd_complete(sk, hdev->id,
5076 MGMT_OP_START_SERVICE_DISCOVERY,
5078 &cp->type, sizeof(cp->type));
5079 mgmt_pending_remove(cmd);
5084 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5085 queue_work(hdev->req_workqueue, &hdev->discov_update);
5089 hci_dev_unlock(hdev);
5093 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5095 struct mgmt_pending_cmd *cmd;
5097 bt_dev_dbg(hdev, "status %u", status);
5101 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5103 cmd->cmd_complete(cmd, mgmt_status(status));
5104 mgmt_pending_remove(cmd);
5107 hci_dev_unlock(hdev);
5109 /* Handle suspend notifier */
5110 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5111 bt_dev_dbg(hdev, "Paused discovery");
5112 wake_up(&hdev->suspend_wait_q);
5116 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5119 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5120 struct mgmt_pending_cmd *cmd;
5123 bt_dev_dbg(hdev, "sock %p", sk);
5127 if (!hci_discovery_active(hdev)) {
5128 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5129 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5130 sizeof(mgmt_cp->type));
5134 if (hdev->discovery.type != mgmt_cp->type) {
5135 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5136 MGMT_STATUS_INVALID_PARAMS,
5137 &mgmt_cp->type, sizeof(mgmt_cp->type));
5141 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5147 cmd->cmd_complete = generic_cmd_complete;
5149 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5150 queue_work(hdev->req_workqueue, &hdev->discov_update);
5154 hci_dev_unlock(hdev);
5158 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5161 struct mgmt_cp_confirm_name *cp = data;
5162 struct inquiry_entry *e;
5165 bt_dev_dbg(hdev, "sock %p", sk);
5169 if (!hci_discovery_active(hdev)) {
5170 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5171 MGMT_STATUS_FAILED, &cp->addr,
5176 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5178 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5179 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5184 if (cp->name_known) {
5185 e->name_state = NAME_KNOWN;
5188 e->name_state = NAME_NEEDED;
5189 hci_inquiry_cache_update_resolve(hdev, e);
5192 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5193 &cp->addr, sizeof(cp->addr));
5196 hci_dev_unlock(hdev);
5200 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5203 struct mgmt_cp_block_device *cp = data;
5207 bt_dev_dbg(hdev, "sock %p", sk);
5209 if (!bdaddr_type_is_valid(cp->addr.type))
5210 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5211 MGMT_STATUS_INVALID_PARAMS,
5212 &cp->addr, sizeof(cp->addr));
5216 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5219 status = MGMT_STATUS_FAILED;
5223 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5225 status = MGMT_STATUS_SUCCESS;
5228 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5229 &cp->addr, sizeof(cp->addr));
5231 hci_dev_unlock(hdev);
5236 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5239 struct mgmt_cp_unblock_device *cp = data;
5243 bt_dev_dbg(hdev, "sock %p", sk);
5245 if (!bdaddr_type_is_valid(cp->addr.type))
5246 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5247 MGMT_STATUS_INVALID_PARAMS,
5248 &cp->addr, sizeof(cp->addr));
5252 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5255 status = MGMT_STATUS_INVALID_PARAMS;
5259 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5261 status = MGMT_STATUS_SUCCESS;
5264 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5265 &cp->addr, sizeof(cp->addr));
5267 hci_dev_unlock(hdev);
5272 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5275 struct mgmt_cp_set_device_id *cp = data;
5276 struct hci_request req;
5280 bt_dev_dbg(hdev, "sock %p", sk);
5282 source = __le16_to_cpu(cp->source);
5284 if (source > 0x0002)
5285 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5286 MGMT_STATUS_INVALID_PARAMS);
5290 hdev->devid_source = source;
5291 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5292 hdev->devid_product = __le16_to_cpu(cp->product);
5293 hdev->devid_version = __le16_to_cpu(cp->version);
5295 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5298 hci_req_init(&req, hdev);
5299 __hci_req_update_eir(&req);
5300 hci_req_run(&req, NULL);
5302 hci_dev_unlock(hdev);
5307 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5310 bt_dev_dbg(hdev, "status %u", status);
5313 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5316 struct cmd_lookup match = { NULL, hdev };
5317 struct hci_request req;
5319 struct adv_info *adv_instance;
5325 u8 mgmt_err = mgmt_status(status);
5327 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5328 cmd_status_rsp, &mgmt_err);
5332 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5333 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5335 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5337 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5340 new_settings(hdev, match.sk);
5345 /* Handle suspend notifier */
5346 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5347 hdev->suspend_tasks)) {
5348 bt_dev_dbg(hdev, "Paused advertising");
5349 wake_up(&hdev->suspend_wait_q);
5350 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5351 hdev->suspend_tasks)) {
5352 bt_dev_dbg(hdev, "Unpaused advertising");
5353 wake_up(&hdev->suspend_wait_q);
5356 /* If "Set Advertising" was just disabled and instance advertising was
5357 * set up earlier, then re-enable multi-instance advertising.
5359 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5360 list_empty(&hdev->adv_instances))
5363 instance = hdev->cur_adv_instance;
5365 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5366 struct adv_info, list);
5370 instance = adv_instance->instance;
5373 hci_req_init(&req, hdev);
5375 err = __hci_req_schedule_adv_instance(&req, instance, true);
5378 err = hci_req_run(&req, enable_advertising_instance);
5381 bt_dev_err(hdev, "failed to re-configure advertising");
5384 hci_dev_unlock(hdev);
5387 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5390 struct mgmt_mode *cp = data;
5391 struct mgmt_pending_cmd *cmd;
5392 struct hci_request req;
5396 bt_dev_dbg(hdev, "sock %p", sk);
5398 status = mgmt_le_support(hdev);
5400 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5403 /* Enabling the experimental LL Privay support disables support for
5406 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5408 MGMT_STATUS_NOT_SUPPORTED);
5410 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5412 MGMT_STATUS_INVALID_PARAMS);
5414 if (hdev->advertising_paused)
5415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5422 /* The following conditions are ones which mean that we should
5423 * not do any HCI communication but directly send a mgmt
5424 * response to user space (after toggling the flag if
5427 if (!hdev_is_powered(hdev) ||
5428 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5429 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5430 hci_conn_num(hdev, LE_LINK) > 0 ||
5431 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5432 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5436 hdev->cur_adv_instance = 0x00;
5437 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5438 if (cp->val == 0x02)
5439 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5441 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5443 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5444 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5447 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5452 err = new_settings(hdev, sk);
5457 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5458 pending_find(MGMT_OP_SET_LE, hdev)) {
5459 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5464 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5470 hci_req_init(&req, hdev);
5472 if (cp->val == 0x02)
5473 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5475 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5477 cancel_adv_timeout(hdev);
5480 /* Switch to instance "0" for the Set Advertising setting.
5481 * We cannot use update_[adv|scan_rsp]_data() here as the
5482 * HCI_ADVERTISING flag is not yet set.
5484 hdev->cur_adv_instance = 0x00;
5486 if (ext_adv_capable(hdev)) {
5487 __hci_req_start_ext_adv(&req, 0x00);
5489 __hci_req_update_adv_data(&req, 0x00);
5490 __hci_req_update_scan_rsp_data(&req, 0x00);
5491 __hci_req_enable_advertising(&req);
5494 __hci_req_disable_advertising(&req);
5497 err = hci_req_run(&req, set_advertising_complete);
5499 mgmt_pending_remove(cmd);
5502 hci_dev_unlock(hdev);
5506 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5507 void *data, u16 len)
5509 struct mgmt_cp_set_static_address *cp = data;
5512 bt_dev_dbg(hdev, "sock %p", sk);
5514 if (!lmp_le_capable(hdev))
5515 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5516 MGMT_STATUS_NOT_SUPPORTED);
5518 if (hdev_is_powered(hdev))
5519 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5520 MGMT_STATUS_REJECTED);
5522 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5523 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5524 return mgmt_cmd_status(sk, hdev->id,
5525 MGMT_OP_SET_STATIC_ADDRESS,
5526 MGMT_STATUS_INVALID_PARAMS);
5528 /* Two most significant bits shall be set */
5529 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5530 return mgmt_cmd_status(sk, hdev->id,
5531 MGMT_OP_SET_STATIC_ADDRESS,
5532 MGMT_STATUS_INVALID_PARAMS);
5537 bacpy(&hdev->static_addr, &cp->bdaddr);
5539 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5543 err = new_settings(hdev, sk);
5546 hci_dev_unlock(hdev);
5550 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5551 void *data, u16 len)
5553 struct mgmt_cp_set_scan_params *cp = data;
5554 __u16 interval, window;
5557 bt_dev_dbg(hdev, "sock %p", sk);
5559 if (!lmp_le_capable(hdev))
5560 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5561 MGMT_STATUS_NOT_SUPPORTED);
5563 interval = __le16_to_cpu(cp->interval);
5565 if (interval < 0x0004 || interval > 0x4000)
5566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5567 MGMT_STATUS_INVALID_PARAMS);
5569 window = __le16_to_cpu(cp->window);
5571 if (window < 0x0004 || window > 0x4000)
5572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5573 MGMT_STATUS_INVALID_PARAMS);
5575 if (window > interval)
5576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5577 MGMT_STATUS_INVALID_PARAMS);
5581 hdev->le_scan_interval = interval;
5582 hdev->le_scan_window = window;
5584 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5587 /* If background scan is running, restart it so new parameters are
5590 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5591 hdev->discovery.state == DISCOVERY_STOPPED) {
5592 struct hci_request req;
5594 hci_req_init(&req, hdev);
5596 hci_req_add_le_scan_disable(&req, false);
5597 hci_req_add_le_passive_scan(&req);
5599 hci_req_run(&req, NULL);
5602 hci_dev_unlock(hdev);
5607 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5610 struct mgmt_pending_cmd *cmd;
5612 bt_dev_dbg(hdev, "status 0x%02x", status);
5616 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5621 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5622 mgmt_status(status));
5624 struct mgmt_mode *cp = cmd->param;
5627 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5629 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5631 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5632 new_settings(hdev, cmd->sk);
5635 mgmt_pending_remove(cmd);
5638 hci_dev_unlock(hdev);
5641 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5642 void *data, u16 len)
5644 struct mgmt_mode *cp = data;
5645 struct mgmt_pending_cmd *cmd;
5646 struct hci_request req;
5649 bt_dev_dbg(hdev, "sock %p", sk);
5651 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5652 hdev->hci_ver < BLUETOOTH_VER_1_2)
5653 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5654 MGMT_STATUS_NOT_SUPPORTED);
5656 if (cp->val != 0x00 && cp->val != 0x01)
5657 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5658 MGMT_STATUS_INVALID_PARAMS);
5662 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5663 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5668 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5669 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5674 if (!hdev_is_powered(hdev)) {
5675 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5676 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5678 new_settings(hdev, sk);
5682 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5689 hci_req_init(&req, hdev);
5691 __hci_req_write_fast_connectable(&req, cp->val);
5693 err = hci_req_run(&req, fast_connectable_complete);
5695 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5696 MGMT_STATUS_FAILED);
5697 mgmt_pending_remove(cmd);
5701 hci_dev_unlock(hdev);
5706 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5708 struct mgmt_pending_cmd *cmd;
5710 bt_dev_dbg(hdev, "status 0x%02x", status);
5714 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5719 u8 mgmt_err = mgmt_status(status);
5721 /* We need to restore the flag if related HCI commands
5724 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5726 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5728 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5729 new_settings(hdev, cmd->sk);
5732 mgmt_pending_remove(cmd);
5735 hci_dev_unlock(hdev);
5738 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5740 struct mgmt_mode *cp = data;
5741 struct mgmt_pending_cmd *cmd;
5742 struct hci_request req;
5745 bt_dev_dbg(hdev, "sock %p", sk);
5747 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5749 MGMT_STATUS_NOT_SUPPORTED);
5751 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5753 MGMT_STATUS_REJECTED);
5755 if (cp->val != 0x00 && cp->val != 0x01)
5756 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5757 MGMT_STATUS_INVALID_PARAMS);
5761 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5762 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5766 if (!hdev_is_powered(hdev)) {
5768 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5769 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5770 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5771 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5772 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5775 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5777 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5781 err = new_settings(hdev, sk);
5785 /* Reject disabling when powered on */
5787 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5788 MGMT_STATUS_REJECTED);
5791 /* When configuring a dual-mode controller to operate
5792 * with LE only and using a static address, then switching
5793 * BR/EDR back on is not allowed.
5795 * Dual-mode controllers shall operate with the public
5796 * address as its identity address for BR/EDR and LE. So
5797 * reject the attempt to create an invalid configuration.
5799 * The same restrictions applies when secure connections
5800 * has been enabled. For BR/EDR this is a controller feature
5801 * while for LE it is a host stack feature. This means that
5802 * switching BR/EDR back on when secure connections has been
5803 * enabled is not a supported transaction.
5805 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5806 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5807 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5808 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5809 MGMT_STATUS_REJECTED);
5814 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5815 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5820 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5826 /* We need to flip the bit already here so that
5827 * hci_req_update_adv_data generates the correct flags.
5829 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5831 hci_req_init(&req, hdev);
5833 __hci_req_write_fast_connectable(&req, false);
5834 __hci_req_update_scan(&req);
5836 /* Since only the advertising data flags will change, there
5837 * is no need to update the scan response data.
5839 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5841 err = hci_req_run(&req, set_bredr_complete);
5843 mgmt_pending_remove(cmd);
5846 hci_dev_unlock(hdev);
5850 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5852 struct mgmt_pending_cmd *cmd;
5853 struct mgmt_mode *cp;
5855 bt_dev_dbg(hdev, "status %u", status);
5859 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5864 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5865 mgmt_status(status));
5873 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5874 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5877 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5878 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5881 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5882 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5886 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5887 new_settings(hdev, cmd->sk);
5890 mgmt_pending_remove(cmd);
5892 hci_dev_unlock(hdev);
5895 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5896 void *data, u16 len)
5898 struct mgmt_mode *cp = data;
5899 struct mgmt_pending_cmd *cmd;
5900 struct hci_request req;
5904 bt_dev_dbg(hdev, "sock %p", sk);
5906 if (!lmp_sc_capable(hdev) &&
5907 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5908 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5909 MGMT_STATUS_NOT_SUPPORTED);
5911 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5912 lmp_sc_capable(hdev) &&
5913 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5914 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5915 MGMT_STATUS_REJECTED);
5917 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5918 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5919 MGMT_STATUS_INVALID_PARAMS);
5923 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5924 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5928 changed = !hci_dev_test_and_set_flag(hdev,
5930 if (cp->val == 0x02)
5931 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5933 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5935 changed = hci_dev_test_and_clear_flag(hdev,
5937 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5940 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5945 err = new_settings(hdev, sk);
5950 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5951 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5958 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5959 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5960 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5964 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5970 hci_req_init(&req, hdev);
5971 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5972 err = hci_req_run(&req, sc_enable_complete);
5974 mgmt_pending_remove(cmd);
5979 hci_dev_unlock(hdev);
5983 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5984 void *data, u16 len)
5986 struct mgmt_mode *cp = data;
5987 bool changed, use_changed;
5990 bt_dev_dbg(hdev, "sock %p", sk);
5992 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5993 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5994 MGMT_STATUS_INVALID_PARAMS);
5999 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6001 changed = hci_dev_test_and_clear_flag(hdev,
6002 HCI_KEEP_DEBUG_KEYS);
6004 if (cp->val == 0x02)
6005 use_changed = !hci_dev_test_and_set_flag(hdev,
6006 HCI_USE_DEBUG_KEYS);
6008 use_changed = hci_dev_test_and_clear_flag(hdev,
6009 HCI_USE_DEBUG_KEYS);
6011 if (hdev_is_powered(hdev) && use_changed &&
6012 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6013 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6014 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6015 sizeof(mode), &mode);
6018 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6023 err = new_settings(hdev, sk);
6026 hci_dev_unlock(hdev);
6030 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6033 struct mgmt_cp_set_privacy *cp = cp_data;
6037 bt_dev_dbg(hdev, "sock %p", sk);
6039 if (!lmp_le_capable(hdev))
6040 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6041 MGMT_STATUS_NOT_SUPPORTED);
6043 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6044 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6045 MGMT_STATUS_INVALID_PARAMS);
6047 if (hdev_is_powered(hdev))
6048 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6049 MGMT_STATUS_REJECTED);
6053 /* If user space supports this command it is also expected to
6054 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6056 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6059 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6060 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6061 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6062 hci_adv_instances_set_rpa_expired(hdev, true);
6063 if (cp->privacy == 0x02)
6064 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6066 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6068 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6069 memset(hdev->irk, 0, sizeof(hdev->irk));
6070 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6071 hci_adv_instances_set_rpa_expired(hdev, false);
6072 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6075 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6080 err = new_settings(hdev, sk);
6083 hci_dev_unlock(hdev);
6087 static bool irk_is_valid(struct mgmt_irk_info *irk)
6089 switch (irk->addr.type) {
6090 case BDADDR_LE_PUBLIC:
6093 case BDADDR_LE_RANDOM:
6094 /* Two most significant bits shall be set */
6095 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6103 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6106 struct mgmt_cp_load_irks *cp = cp_data;
6107 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6108 sizeof(struct mgmt_irk_info));
6109 u16 irk_count, expected_len;
6112 bt_dev_dbg(hdev, "sock %p", sk);
6114 if (!lmp_le_capable(hdev))
6115 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6116 MGMT_STATUS_NOT_SUPPORTED);
6118 irk_count = __le16_to_cpu(cp->irk_count);
6119 if (irk_count > max_irk_count) {
6120 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6122 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6123 MGMT_STATUS_INVALID_PARAMS);
6126 expected_len = struct_size(cp, irks, irk_count);
6127 if (expected_len != len) {
6128 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6130 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6131 MGMT_STATUS_INVALID_PARAMS);
6134 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6136 for (i = 0; i < irk_count; i++) {
6137 struct mgmt_irk_info *key = &cp->irks[i];
6139 if (!irk_is_valid(key))
6140 return mgmt_cmd_status(sk, hdev->id,
6142 MGMT_STATUS_INVALID_PARAMS);
6147 hci_smp_irks_clear(hdev);
6149 for (i = 0; i < irk_count; i++) {
6150 struct mgmt_irk_info *irk = &cp->irks[i];
6152 if (hci_is_blocked_key(hdev,
6153 HCI_BLOCKED_KEY_TYPE_IRK,
6155 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6160 hci_add_irk(hdev, &irk->addr.bdaddr,
6161 le_addr_type(irk->addr.type), irk->val,
6165 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6167 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6169 hci_dev_unlock(hdev);
6175 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
6176 void *data, u16 len)
6178 struct mgmt_cp_set_advertising_params *cp = data;
6183 BT_DBG("%s", hdev->name);
6185 if (!lmp_le_capable(hdev))
6186 return mgmt_cmd_status(sk, hdev->id,
6187 MGMT_OP_SET_ADVERTISING_PARAMS,
6188 MGMT_STATUS_NOT_SUPPORTED);
6190 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6191 return mgmt_cmd_status(sk, hdev->id,
6192 MGMT_OP_SET_ADVERTISING_PARAMS,
6195 min_interval = __le16_to_cpu(cp->interval_min);
6196 max_interval = __le16_to_cpu(cp->interval_max);
6198 if (min_interval > max_interval ||
6199 min_interval < 0x0020 || max_interval > 0x4000)
6200 return mgmt_cmd_status(sk, hdev->id,
6201 MGMT_OP_SET_ADVERTISING_PARAMS,
6202 MGMT_STATUS_INVALID_PARAMS);
6206 hdev->le_adv_min_interval = min_interval;
6207 hdev->le_adv_max_interval = max_interval;
6208 hdev->adv_filter_policy = cp->filter_policy;
6209 hdev->adv_type = cp->type;
6211 err = mgmt_cmd_complete(sk, hdev->id,
6212 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
6214 hci_dev_unlock(hdev);
6219 static void set_advertising_data_complete(struct hci_dev *hdev,
6220 u8 status, u16 opcode)
6222 struct mgmt_cp_set_advertising_data *cp;
6223 struct mgmt_pending_cmd *cmd;
6225 BT_DBG("status 0x%02x", status);
6229 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
6236 mgmt_cmd_status(cmd->sk, hdev->id,
6237 MGMT_OP_SET_ADVERTISING_DATA,
6238 mgmt_status(status));
6240 mgmt_cmd_complete(cmd->sk, hdev->id,
6241 MGMT_OP_SET_ADVERTISING_DATA, 0,
6244 mgmt_pending_remove(cmd);
6247 hci_dev_unlock(hdev);
6250 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
6251 void *data, u16 len)
6253 struct mgmt_pending_cmd *cmd;
6254 struct hci_request req;
6255 struct mgmt_cp_set_advertising_data *cp = data;
6256 struct hci_cp_le_set_adv_data adv;
6259 BT_DBG("%s", hdev->name);
6261 if (!lmp_le_capable(hdev)) {
6262 return mgmt_cmd_status(sk, hdev->id,
6263 MGMT_OP_SET_ADVERTISING_DATA,
6264 MGMT_STATUS_NOT_SUPPORTED);
6269 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
6270 err = mgmt_cmd_status(sk, hdev->id,
6271 MGMT_OP_SET_ADVERTISING_DATA,
6276 if (len > HCI_MAX_AD_LENGTH) {
6277 err = mgmt_cmd_status(sk, hdev->id,
6278 MGMT_OP_SET_ADVERTISING_DATA,
6279 MGMT_STATUS_INVALID_PARAMS);
6283 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
6290 hci_req_init(&req, hdev);
6292 memset(&adv, 0, sizeof(adv));
6293 memcpy(adv.data, cp->data, len);
6296 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
6298 err = hci_req_run(&req, set_advertising_data_complete);
6300 mgmt_pending_remove(cmd);
6303 hci_dev_unlock(hdev);
6308 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
6311 struct mgmt_cp_set_scan_rsp_data *cp;
6312 struct mgmt_pending_cmd *cmd;
6314 BT_DBG("status 0x%02x", status);
6318 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
6325 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6326 mgmt_status(status));
6328 mgmt_cmd_complete(cmd->sk, hdev->id,
6329 MGMT_OP_SET_SCAN_RSP_DATA, 0,
6332 mgmt_pending_remove(cmd);
6335 hci_dev_unlock(hdev);
6338 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
6341 struct mgmt_pending_cmd *cmd;
6342 struct hci_request req;
6343 struct mgmt_cp_set_scan_rsp_data *cp = data;
6344 struct hci_cp_le_set_scan_rsp_data rsp;
6347 BT_DBG("%s", hdev->name);
6349 if (!lmp_le_capable(hdev))
6350 return mgmt_cmd_status(sk, hdev->id,
6351 MGMT_OP_SET_SCAN_RSP_DATA,
6352 MGMT_STATUS_NOT_SUPPORTED);
6356 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
6357 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6362 if (len > HCI_MAX_AD_LENGTH) {
6363 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6364 MGMT_STATUS_INVALID_PARAMS);
6368 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
6374 hci_req_init(&req, hdev);
6376 memset(&rsp, 0, sizeof(rsp));
6377 memcpy(rsp.data, cp->data, len);
6380 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
6382 err = hci_req_run(&req, set_scan_rsp_data_complete);
6384 mgmt_pending_remove(cmd);
6387 hci_dev_unlock(hdev);
6392 /* Adv White List feature */
6393 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6395 struct mgmt_cp_add_dev_white_list *cp;
6396 struct mgmt_pending_cmd *cmd;
6398 BT_DBG("status 0x%02x", status);
6402 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
6409 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6410 mgmt_status(status));
6412 mgmt_cmd_complete(cmd->sk, hdev->id,
6413 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
6415 mgmt_pending_remove(cmd);
6418 hci_dev_unlock(hdev);
6421 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
6422 void *data, u16 len)
6424 struct mgmt_pending_cmd *cmd;
6425 struct mgmt_cp_add_dev_white_list *cp = data;
6426 struct hci_request req;
6429 BT_DBG("%s", hdev->name);
6431 if (!lmp_le_capable(hdev))
6432 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6433 MGMT_STATUS_NOT_SUPPORTED);
6435 if (!hdev_is_powered(hdev))
6436 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6437 MGMT_STATUS_REJECTED);
6441 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
6442 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6447 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
6453 hci_req_init(&req, hdev);
6455 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
6457 err = hci_req_run(&req, add_white_list_complete);
6459 mgmt_pending_remove(cmd);
6464 hci_dev_unlock(hdev);
6469 static void remove_from_white_list_complete(struct hci_dev *hdev,
6470 u8 status, u16 opcode)
6472 struct mgmt_cp_remove_dev_from_white_list *cp;
6473 struct mgmt_pending_cmd *cmd;
6475 BT_DBG("status 0x%02x", status);
6479 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
6486 mgmt_cmd_status(cmd->sk, hdev->id,
6487 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6488 mgmt_status(status));
6490 mgmt_cmd_complete(cmd->sk, hdev->id,
6491 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
6494 mgmt_pending_remove(cmd);
6497 hci_dev_unlock(hdev);
6500 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
6501 void *data, u16 len)
6503 struct mgmt_pending_cmd *cmd;
6504 struct mgmt_cp_remove_dev_from_white_list *cp = data;
6505 struct hci_request req;
6508 BT_DBG("%s", hdev->name);
6510 if (!lmp_le_capable(hdev))
6511 return mgmt_cmd_status(sk, hdev->id,
6512 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6513 MGMT_STATUS_NOT_SUPPORTED);
6515 if (!hdev_is_powered(hdev))
6516 return mgmt_cmd_status(sk, hdev->id,
6517 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6518 MGMT_STATUS_REJECTED);
6522 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
6523 err = mgmt_cmd_status(sk, hdev->id,
6524 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6529 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6536 hci_req_init(&req, hdev);
6538 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
6540 err = hci_req_run(&req, remove_from_white_list_complete);
6542 mgmt_pending_remove(cmd);
6547 hci_dev_unlock(hdev);
6552 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
6555 struct mgmt_pending_cmd *cmd;
6557 BT_DBG("status 0x%02x", status);
6561 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
6566 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6567 mgmt_status(status));
6569 mgmt_cmd_complete(cmd->sk, hdev->id,
6570 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6573 mgmt_pending_remove(cmd);
6576 hci_dev_unlock(hdev);
6579 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
6580 void *data, u16 len)
6582 struct mgmt_pending_cmd *cmd;
6583 struct hci_request req;
6586 BT_DBG("%s", hdev->name);
6588 if (!lmp_le_capable(hdev))
6589 return mgmt_cmd_status(sk, hdev->id,
6590 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6591 MGMT_STATUS_NOT_SUPPORTED);
6593 if (!hdev_is_powered(hdev))
6594 return mgmt_cmd_status(sk, hdev->id,
6595 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6596 MGMT_STATUS_REJECTED);
6600 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
6601 err = mgmt_cmd_status(sk, hdev->id,
6602 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6607 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6614 hci_req_init(&req, hdev);
6616 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
6618 err = hci_req_run(&req, clear_white_list_complete);
6620 mgmt_pending_remove(cmd);
6625 hci_dev_unlock(hdev);
6630 static void set_rssi_threshold_complete(struct hci_dev *hdev,
6631 u8 status, u16 opcode)
6633 struct mgmt_pending_cmd *cmd;
6635 BT_DBG("status 0x%02x", status);
6639 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6644 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6645 mgmt_status(status));
6647 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6650 mgmt_pending_remove(cmd);
6653 hci_dev_unlock(hdev);
6656 static void set_rssi_disable_complete(struct hci_dev *hdev,
6657 u8 status, u16 opcode)
6659 struct mgmt_pending_cmd *cmd;
6661 BT_DBG("status 0x%02x", status);
6665 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6670 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6671 mgmt_status(status));
6673 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6676 mgmt_pending_remove(cmd);
6679 hci_dev_unlock(hdev);
6682 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
6683 void *data, u16 len)
6686 struct hci_cp_set_rssi_threshold th = { 0, };
6687 struct mgmt_cp_set_enable_rssi *cp = data;
6688 struct hci_conn *conn;
6689 struct mgmt_pending_cmd *cmd;
6690 struct hci_request req;
6695 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6697 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6698 MGMT_STATUS_FAILED);
6702 if (!lmp_le_capable(hdev)) {
6703 mgmt_pending_remove(cmd);
6704 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6705 MGMT_STATUS_NOT_SUPPORTED);
6709 if (!hdev_is_powered(hdev)) {
6710 BT_DBG("%s", hdev->name);
6711 mgmt_pending_remove(cmd);
6712 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6713 MGMT_STATUS_NOT_POWERED);
6717 if (cp->link_type == 0x01)
6718 dest_type = LE_LINK;
6720 dest_type = ACL_LINK;
6722 /* Get LE/ACL link handle info */
6723 conn = hci_conn_hash_lookup_ba(hdev,
6724 dest_type, &cp->bdaddr);
6727 err = mgmt_cmd_complete(sk, hdev->id,
6728 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
6729 mgmt_pending_remove(cmd);
6733 hci_req_init(&req, hdev);
6735 th.hci_le_ext_opcode = 0x0B;
6737 th.conn_handle = conn->handle;
6738 th.alert_mask = 0x07;
6739 th.low_th = cp->low_th;
6740 th.in_range_th = cp->in_range_th;
6741 th.high_th = cp->high_th;
6743 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6744 err = hci_req_run(&req, set_rssi_threshold_complete);
6747 mgmt_pending_remove(cmd);
6748 BT_ERR("Error in requesting hci_req_run");
6753 hci_dev_unlock(hdev);
6757 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
6758 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6760 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
6761 struct mgmt_cp_set_enable_rssi *cp = data;
6762 struct mgmt_pending_cmd *cmd;
6767 mgmt_rp.status = rp->status;
6768 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6769 mgmt_rp.bt_address = cp->bdaddr;
6770 mgmt_rp.link_type = cp->link_type;
6772 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6773 MGMT_STATUS_SUCCESS, &mgmt_rp,
6774 sizeof(struct mgmt_cc_rsp_enable_rssi));
6776 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
6777 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6779 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
6780 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6781 &mgmt_rp.bt_address, true);
6785 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6787 mgmt_pending_remove(cmd);
6789 hci_dev_unlock(hdev);
6792 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
6793 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6795 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
6796 struct mgmt_cp_disable_rssi *cp = data;
6797 struct mgmt_pending_cmd *cmd;
6802 mgmt_rp.status = rp->status;
6803 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6804 mgmt_rp.bt_address = cp->bdaddr;
6805 mgmt_rp.link_type = cp->link_type;
6807 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6808 MGMT_STATUS_SUCCESS, &mgmt_rp,
6809 sizeof(struct mgmt_cc_rsp_enable_rssi));
6811 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
6812 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6814 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6815 &mgmt_rp.bt_address, false);
6819 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6821 mgmt_pending_remove(cmd);
6823 hci_dev_unlock(hdev);
6826 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
6827 void *data, u16 len)
6829 struct mgmt_pending_cmd *cmd;
6830 struct hci_request req;
6831 struct hci_cp_set_enable_rssi cp_en = { 0, };
6834 BT_DBG("Set Disable RSSI.");
6836 cp_en.hci_le_ext_opcode = 0x01;
6837 cp_en.le_enable_cs_Features = 0x00;
6838 cp_en.data[0] = 0x00;
6839 cp_en.data[1] = 0x00;
6840 cp_en.data[2] = 0x00;
6844 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6846 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6847 MGMT_STATUS_FAILED);
6851 if (!lmp_le_capable(hdev)) {
6852 mgmt_pending_remove(cmd);
6853 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6854 MGMT_STATUS_NOT_SUPPORTED);
6858 if (!hdev_is_powered(hdev)) {
6859 BT_DBG("%s", hdev->name);
6860 mgmt_pending_remove(cmd);
6861 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6862 MGMT_STATUS_NOT_POWERED);
6866 hci_req_init(&req, hdev);
6868 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6869 sizeof(struct hci_cp_set_enable_rssi),
6870 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6871 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6873 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6874 err = hci_req_run(&req, set_rssi_disable_complete);
6877 mgmt_pending_remove(cmd);
6878 BT_ERR("Error in requesting hci_req_run");
6883 hci_dev_unlock(hdev);
6887 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
6889 struct hci_cc_rsp_enable_rssi *rp = response;
6890 struct mgmt_pending_cmd *cmd_enable = NULL;
6891 struct mgmt_pending_cmd *cmd_disable = NULL;
6892 struct mgmt_cp_set_enable_rssi *cp_en;
6893 struct mgmt_cp_disable_rssi *cp_dis;
6896 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6897 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6898 hci_dev_unlock(hdev);
6901 BT_DBG("Enable Request");
6904 BT_DBG("Disable Request");
6907 cp_en = cmd_enable->param;
6912 switch (rp->le_ext_opcode) {
6914 BT_DBG("RSSI enabled.. Setting Threshold...");
6915 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
6916 cp_en, sizeof(*cp_en));
6920 BT_DBG("Sending RSSI enable success");
6921 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
6922 cp_en, rp, rp->status);
6926 } else if (cmd_disable) {
6927 cp_dis = cmd_disable->param;
6932 switch (rp->le_ext_opcode) {
6934 BT_DBG("Sending RSSI disable success");
6935 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6936 cp_dis, rp, rp->status);
6941 * Only unset RSSI Threshold values for the Link if
6942 * RSSI is monitored for other BREDR or LE Links
6944 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
6945 BT_DBG("Unset Threshold. Other links being monitored");
6946 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6947 cp_dis, rp, rp->status);
6949 BT_DBG("Unset Threshold. Disabling...");
6950 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
6951 cp_dis, sizeof(*cp_dis));
6958 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
6961 struct mgmt_pending_cmd *cmd;
6963 BT_DBG("status 0x%02x", status);
6967 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6972 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6973 mgmt_status(status));
6975 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6978 mgmt_pending_remove(cmd);
6981 hci_dev_unlock(hdev);
6984 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
6985 void *data, u16 len)
6987 struct mgmt_pending_cmd *cmd;
6988 struct hci_request req;
6989 struct mgmt_cp_set_enable_rssi *cp = data;
6990 struct hci_cp_set_enable_rssi cp_en = { 0, };
6993 BT_DBG("Set Enable RSSI.");
6995 cp_en.hci_le_ext_opcode = 0x01;
6996 cp_en.le_enable_cs_Features = 0x04;
6997 cp_en.data[0] = 0x00;
6998 cp_en.data[1] = 0x00;
6999 cp_en.data[2] = 0x00;
7003 if (!lmp_le_capable(hdev)) {
7004 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7005 MGMT_STATUS_NOT_SUPPORTED);
7009 if (!hdev_is_powered(hdev)) {
7010 BT_DBG("%s", hdev->name);
7011 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7012 MGMT_STATUS_NOT_POWERED);
7016 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
7017 BT_DBG("%s", hdev->name);
7018 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7023 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
7026 BT_DBG("%s", hdev->name);
7031 /* If RSSI is already enabled directly set Threshold values */
7032 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
7033 hci_dev_unlock(hdev);
7034 BT_DBG("RSSI Enabled. Directly set Threshold");
7035 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
7039 hci_req_init(&req, hdev);
7041 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7042 sizeof(struct hci_cp_set_enable_rssi),
7043 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7044 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7046 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7047 err = hci_req_run(&req, set_rssi_enable_complete);
7050 mgmt_pending_remove(cmd);
7051 BT_ERR("Error in requesting hci_req_run");
7056 hci_dev_unlock(hdev);
7061 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7063 struct mgmt_pending_cmd *cmd;
7065 BT_DBG("status 0x%02x", status);
7069 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
7073 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7074 MGMT_STATUS_SUCCESS, &status, 1);
7076 mgmt_pending_remove(cmd);
7079 hci_dev_unlock(hdev);
7082 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
7085 struct mgmt_pending_cmd *cmd;
7086 struct hci_request req;
7087 struct mgmt_cp_get_raw_rssi *cp = data;
7088 struct hci_cp_get_raw_rssi hci_cp;
7090 struct hci_conn *conn;
7094 BT_DBG("Get Raw RSSI.");
7098 if (!lmp_le_capable(hdev)) {
7099 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7100 MGMT_STATUS_NOT_SUPPORTED);
7104 if (cp->link_type == 0x01)
7105 dest_type = LE_LINK;
7107 dest_type = ACL_LINK;
7109 /* Get LE/BREDR link handle info */
7110 conn = hci_conn_hash_lookup_ba(hdev,
7111 dest_type, &cp->bt_address);
7113 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7114 MGMT_STATUS_NOT_CONNECTED);
7117 hci_cp.conn_handle = conn->handle;
7119 if (!hdev_is_powered(hdev)) {
7120 BT_DBG("%s", hdev->name);
7121 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7122 MGMT_STATUS_NOT_POWERED);
7126 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
7127 BT_DBG("%s", hdev->name);
7128 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7133 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
7135 BT_DBG("%s", hdev->name);
7140 hci_req_init(&req, hdev);
7142 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
7143 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
7144 err = hci_req_run(&req, get_raw_rssi_complete);
7147 mgmt_pending_remove(cmd);
7148 BT_ERR("Error in requesting hci_req_run");
7152 hci_dev_unlock(hdev);
7157 void mgmt_raw_rssi_response(struct hci_dev *hdev,
7158 struct hci_cc_rp_get_raw_rssi *rp, int success)
7160 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
7161 struct hci_conn *conn;
7163 mgmt_rp.status = rp->status;
7164 mgmt_rp.rssi_dbm = rp->rssi_dbm;
7166 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
7170 bacpy(&mgmt_rp.bt_address, &conn->dst);
7171 if (conn->type == LE_LINK)
7172 mgmt_rp.link_type = 0x01;
7174 mgmt_rp.link_type = 0x00;
7176 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
7177 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
7180 static void set_disable_threshold_complete(struct hci_dev *hdev,
7181 u8 status, u16 opcode)
7183 struct mgmt_pending_cmd *cmd;
7185 BT_DBG("status 0x%02x", status);
7189 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7193 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7194 MGMT_STATUS_SUCCESS, &status, 1);
7196 mgmt_pending_remove(cmd);
7199 hci_dev_unlock(hdev);
7202 /** Removes monitoring for a link*/
7203 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
7204 void *data, u16 len)
7207 struct hci_cp_set_rssi_threshold th = { 0, };
7208 struct mgmt_cp_disable_rssi *cp = data;
7209 struct hci_conn *conn;
7210 struct mgmt_pending_cmd *cmd;
7211 struct hci_request req;
7214 BT_DBG("Set Disable RSSI.");
7218 if (!lmp_le_capable(hdev)) {
7219 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7220 MGMT_STATUS_NOT_SUPPORTED);
7224 /* Get LE/ACL link handle info*/
7225 if (cp->link_type == 0x01)
7226 dest_type = LE_LINK;
7228 dest_type = ACL_LINK;
7230 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
7232 err = mgmt_cmd_complete(sk, hdev->id,
7233 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
7237 th.hci_le_ext_opcode = 0x0B;
7239 th.conn_handle = conn->handle;
7240 th.alert_mask = 0x00;
7242 th.in_range_th = 0x00;
7245 if (!hdev_is_powered(hdev)) {
7246 BT_DBG("%s", hdev->name);
7247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7252 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
7253 BT_DBG("%s", hdev->name);
7254 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7259 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
7262 BT_DBG("%s", hdev->name);
7267 hci_req_init(&req, hdev);
7269 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7270 err = hci_req_run(&req, set_disable_threshold_complete);
7272 mgmt_pending_remove(cmd);
7273 BT_ERR("Error in requesting hci_req_run");
7278 hci_dev_unlock(hdev);
7283 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
7284 s8 alert_type, s8 rssi_dbm)
7286 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
7287 struct hci_conn *conn;
7289 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
7290 conn_handle, alert_type, rssi_dbm);
7292 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
7295 BT_ERR("RSSI alert Error: Device not found for handle");
7298 bacpy(&mgmt_ev.bdaddr, &conn->dst);
7300 if (conn->type == LE_LINK)
7301 mgmt_ev.link_type = 0x01;
7303 mgmt_ev.link_type = 0x00;
7305 mgmt_ev.alert_type = alert_type;
7306 mgmt_ev.rssi_dbm = rssi_dbm;
7308 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
7309 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
7313 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
7315 struct mgmt_pending_cmd *cmd;
7319 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7321 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7325 type = hdev->le_discovery.type;
7327 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7328 mgmt_status(status), &type, sizeof(type));
7329 mgmt_pending_remove(cmd);
7334 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
7337 unsigned long timeout = 0;
7339 BT_DBG("status %d", status);
7343 mgmt_start_le_discovery_failed(hdev, status);
7344 hci_dev_unlock(hdev);
7349 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
7350 hci_dev_unlock(hdev);
7352 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
7353 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
7358 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
7361 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
7362 void *data, u16 len)
7364 struct mgmt_cp_start_le_discovery *cp = data;
7365 struct mgmt_pending_cmd *cmd;
7366 struct hci_cp_le_set_scan_param param_cp;
7367 struct hci_cp_le_set_scan_enable enable_cp;
7368 struct hci_request req;
7369 u8 status, own_addr_type;
7372 BT_DBG("%s", hdev->name);
7376 if (!hdev_is_powered(hdev)) {
7377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7378 MGMT_STATUS_NOT_POWERED);
7382 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
7383 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7388 if (cp->type != DISCOV_TYPE_LE) {
7389 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7390 MGMT_STATUS_INVALID_PARAMS);
7394 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
7400 hdev->le_discovery.type = cp->type;
7402 hci_req_init(&req, hdev);
7404 status = mgmt_le_support(hdev);
7406 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7408 mgmt_pending_remove(cmd);
7412 /* If controller is scanning, it means the background scanning
7413 * is running. Thus, we should temporarily stop it in order to
7414 * set the discovery scanning parameters.
7416 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
7417 hci_req_add_le_scan_disable(&req, false);
7419 memset(¶m_cp, 0, sizeof(param_cp));
7421 /* All active scans will be done with either a resolvable
7422 * private address (when privacy feature has been enabled)
7423 * or unresolvable private address.
7425 err = hci_update_random_address(&req, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
7427 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7428 MGMT_STATUS_FAILED);
7429 mgmt_pending_remove(cmd);
7433 param_cp.type = hdev->le_scan_type;
7434 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
7435 param_cp.window = cpu_to_le16(hdev->le_scan_window);
7436 param_cp.own_address_type = own_addr_type;
7437 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
7440 memset(&enable_cp, 0, sizeof(enable_cp));
7441 enable_cp.enable = LE_SCAN_ENABLE;
7442 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
7444 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
7447 err = hci_req_run(&req, start_le_discovery_complete);
7449 mgmt_pending_remove(cmd);
7451 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
7454 hci_dev_unlock(hdev);
7458 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
7460 struct mgmt_pending_cmd *cmd;
7463 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7467 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7468 mgmt_status(status), &hdev->le_discovery.type,
7469 sizeof(hdev->le_discovery.type));
7470 mgmt_pending_remove(cmd);
7475 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
7478 BT_DBG("status %d", status);
7483 mgmt_stop_le_discovery_failed(hdev, status);
7487 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7490 hci_dev_unlock(hdev);
7493 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
7494 void *data, u16 len)
7496 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
7497 struct mgmt_pending_cmd *cmd;
7498 struct hci_request req;
7501 BT_DBG("%s", hdev->name);
7505 if (!hci_le_discovery_active(hdev)) {
7506 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7507 MGMT_STATUS_REJECTED, &mgmt_cp->type,
7508 sizeof(mgmt_cp->type));
7512 if (hdev->le_discovery.type != mgmt_cp->type) {
7513 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7514 MGMT_STATUS_INVALID_PARAMS,
7515 &mgmt_cp->type, sizeof(mgmt_cp->type));
7519 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
7525 hci_req_init(&req, hdev);
7527 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
7528 BT_DBG("unknown le discovery state %u",
7529 hdev->le_discovery.state);
7531 mgmt_pending_remove(cmd);
7532 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7533 MGMT_STATUS_FAILED, &mgmt_cp->type,
7534 sizeof(mgmt_cp->type));
7538 cancel_delayed_work(&hdev->le_scan_disable);
7539 hci_req_add_le_scan_disable(&req, false);
7541 err = hci_req_run(&req, stop_le_discovery_complete);
7543 mgmt_pending_remove(cmd);
7545 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
7548 hci_dev_unlock(hdev);
7552 /* Separate LE discovery */
7553 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
7555 struct mgmt_ev_discovering ev;
7556 struct mgmt_pending_cmd *cmd;
7558 BT_DBG("%s le discovering %u", hdev->name, discovering);
7561 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7563 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7566 u8 type = hdev->le_discovery.type;
7568 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
7570 mgmt_pending_remove(cmd);
7573 memset(&ev, 0, sizeof(ev));
7574 ev.type = hdev->le_discovery.type;
7575 ev.discovering = discovering;
7577 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7580 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
7581 void *data, u16 len)
7585 BT_DBG("%s", hdev->name);
7589 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
7591 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
7593 hci_dev_unlock(hdev);
7598 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
7603 if (min > max || min < 6 || max > 3200)
7606 if (to_multiplier < 10 || to_multiplier > 3200)
7609 if (max >= to_multiplier * 8)
7612 max_latency = (to_multiplier * 8 / max) - 1;
7614 if (latency > 499 || latency > max_latency)
7620 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
7623 struct mgmt_cp_le_conn_update *cp = data;
7625 struct hci_conn *conn;
7626 u16 min, max, latency, supervision_timeout;
7629 if (!hdev_is_powered(hdev))
7630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7631 MGMT_STATUS_NOT_POWERED);
7633 min = __le16_to_cpu(cp->conn_interval_min);
7634 max = __le16_to_cpu(cp->conn_interval_max);
7635 latency = __le16_to_cpu(cp->conn_latency);
7636 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
7638 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
7639 min, max, latency, supervision_timeout);
7641 err = check_le_conn_update_param(min, max, latency,
7642 supervision_timeout);
7645 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7646 MGMT_STATUS_INVALID_PARAMS);
7650 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
7652 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7653 MGMT_STATUS_NOT_CONNECTED);
7654 hci_dev_unlock(hdev);
7658 hci_dev_unlock(hdev);
7660 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
7662 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
7666 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
7669 struct mgmt_cp_set_manufacturer_data *cp;
7670 struct mgmt_pending_cmd *cmd;
7672 BT_DBG("status 0x%02x", status);
7676 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
7683 mgmt_cmd_status(cmd->sk, hdev->id,
7684 MGMT_OP_SET_MANUFACTURER_DATA,
7685 mgmt_status(status));
7687 mgmt_cmd_complete(cmd->sk, hdev->id,
7688 MGMT_OP_SET_MANUFACTURER_DATA, 0,
7691 mgmt_pending_remove(cmd);
7694 hci_dev_unlock(hdev);
7697 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
7698 void *data, u16 len)
7700 struct mgmt_pending_cmd *cmd;
7701 struct hci_request req;
7702 struct mgmt_cp_set_manufacturer_data *cp = data;
7703 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
7707 BT_DBG("%s", hdev->name);
7709 if (!lmp_bredr_capable(hdev))
7710 return mgmt_cmd_status(sk, hdev->id,
7711 MGMT_OP_SET_MANUFACTURER_DATA,
7712 MGMT_STATUS_NOT_SUPPORTED);
7714 if (cp->data[0] == 0 ||
7715 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
7716 return mgmt_cmd_status(sk, hdev->id,
7717 MGMT_OP_SET_MANUFACTURER_DATA,
7718 MGMT_STATUS_INVALID_PARAMS);
7720 if (cp->data[1] != 0xFF)
7721 return mgmt_cmd_status(sk, hdev->id,
7722 MGMT_OP_SET_MANUFACTURER_DATA,
7723 MGMT_STATUS_NOT_SUPPORTED);
7727 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
7728 err = mgmt_cmd_status(sk, hdev->id,
7729 MGMT_OP_SET_MANUFACTURER_DATA,
7734 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
7741 hci_req_init(&req, hdev);
7743 /* if new data is same as previous data then return command
7746 if (hdev->manufacturer_len == cp->data[0] - 1 &&
7747 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
7748 mgmt_pending_remove(cmd);
7749 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
7750 0, cp, sizeof(*cp));
7755 old_len = hdev->manufacturer_len;
7757 memcpy(old_data, hdev->manufacturer_data, old_len);
7759 hdev->manufacturer_len = cp->data[0] - 1;
7760 if (hdev->manufacturer_len > 0)
7761 memcpy(hdev->manufacturer_data, cp->data + 2,
7762 hdev->manufacturer_len);
7764 __hci_req_update_eir(&req);
7766 err = hci_req_run(&req, set_manufacturer_data_complete);
7768 mgmt_pending_remove(cmd);
7773 hci_dev_unlock(hdev);
7778 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
7779 hdev->manufacturer_len = old_len;
7780 if (hdev->manufacturer_len > 0)
7781 memcpy(hdev->manufacturer_data, old_data,
7782 hdev->manufacturer_len);
7783 hci_dev_unlock(hdev);
7787 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
7788 void *data, u16 len)
7790 struct mgmt_cp_le_set_scan_params *cp = data;
7791 __u16 interval, window;
7794 BT_DBG("%s", hdev->name);
7796 if (!lmp_le_capable(hdev))
7797 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7798 MGMT_STATUS_NOT_SUPPORTED);
7800 interval = __le16_to_cpu(cp->interval);
7802 if (interval < 0x0004 || interval > 0x4000)
7803 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7804 MGMT_STATUS_INVALID_PARAMS);
7806 window = __le16_to_cpu(cp->window);
7808 if (window < 0x0004 || window > 0x4000)
7809 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7810 MGMT_STATUS_INVALID_PARAMS);
7812 if (window > interval)
7813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7814 MGMT_STATUS_INVALID_PARAMS);
7818 hdev->le_scan_type = cp->type;
7819 hdev->le_scan_interval = interval;
7820 hdev->le_scan_window = window;
7822 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
7825 /* If background scan is running, restart it so new parameters are
7828 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
7829 hdev->discovery.state == DISCOVERY_STOPPED) {
7830 struct hci_request req;
7832 hci_req_init(&req, hdev);
7834 hci_req_add_le_scan_disable(&req, false);
7835 hci_req_add_le_passive_scan(&req);
7837 hci_req_run(&req, NULL);
7840 hci_dev_unlock(hdev);
7845 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
7846 void *data, u16 len)
7848 struct mgmt_cp_set_voice_setting *cp = data;
7849 struct hci_conn *conn;
7850 struct hci_conn *sco_conn;
7854 BT_DBG("%s", hdev->name);
7856 if (!lmp_bredr_capable(hdev)) {
7857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
7858 MGMT_STATUS_NOT_SUPPORTED);
7863 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
7865 err = mgmt_cmd_complete(sk, hdev->id,
7866 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
7870 conn->voice_setting = cp->voice_setting;
7871 conn->sco_role = cp->sco_role;
7873 sco_conn = hci_conn_hash_lookup_sco(hdev);
7874 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
7875 BT_ERR("There is other SCO connection.");
7879 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
7880 if (conn->voice_setting == 0x0063)
7881 sco_connect_set_wbc(hdev);
7883 sco_connect_set_nbc(hdev);
7885 if (conn->voice_setting == 0x0063)
7886 sco_connect_set_gw_wbc(hdev);
7888 sco_connect_set_gw_nbc(hdev);
7892 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
7896 hci_dev_unlock(hdev);
7900 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
7902 struct mgmt_ev_hardware_error ev;
7904 ev.error_code = err_code;
7905 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
7908 void mgmt_tx_timeout_error(struct hci_dev *hdev)
7910 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
7913 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
7914 u8 state_change_reason, u16 connection_handle)
7916 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
7918 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
7919 adv_instance, state_change_reason, connection_handle);
7921 mgmt_ev.adv_instance = adv_instance;
7922 mgmt_ev.state_change_reason = state_change_reason;
7923 mgmt_ev.connection_handle = connection_handle;
7925 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
7926 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
7929 #endif /* TIZEN_BT */
7931 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7933 if (key->initiator != 0x00 && key->initiator != 0x01)
7936 switch (key->addr.type) {
7937 case BDADDR_LE_PUBLIC:
7940 case BDADDR_LE_RANDOM:
7941 /* Two most significant bits shall be set */
7942 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7950 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7951 void *cp_data, u16 len)
7953 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7954 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7955 sizeof(struct mgmt_ltk_info));
7956 u16 key_count, expected_len;
7959 bt_dev_dbg(hdev, "sock %p", sk);
7961 if (!lmp_le_capable(hdev))
7962 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7963 MGMT_STATUS_NOT_SUPPORTED);
7965 key_count = __le16_to_cpu(cp->key_count);
7966 if (key_count > max_key_count) {
7967 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7969 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7970 MGMT_STATUS_INVALID_PARAMS);
7973 expected_len = struct_size(cp, keys, key_count);
7974 if (expected_len != len) {
7975 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7978 MGMT_STATUS_INVALID_PARAMS);
7981 bt_dev_dbg(hdev, "key_count %u", key_count);
7983 for (i = 0; i < key_count; i++) {
7984 struct mgmt_ltk_info *key = &cp->keys[i];
7986 if (!ltk_is_valid(key))
7987 return mgmt_cmd_status(sk, hdev->id,
7988 MGMT_OP_LOAD_LONG_TERM_KEYS,
7989 MGMT_STATUS_INVALID_PARAMS);
7994 hci_smp_ltks_clear(hdev);
7996 for (i = 0; i < key_count; i++) {
7997 struct mgmt_ltk_info *key = &cp->keys[i];
7998 u8 type, authenticated;
8000 if (hci_is_blocked_key(hdev,
8001 HCI_BLOCKED_KEY_TYPE_LTK,
8003 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
8008 switch (key->type) {
8009 case MGMT_LTK_UNAUTHENTICATED:
8010 authenticated = 0x00;
8011 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8013 case MGMT_LTK_AUTHENTICATED:
8014 authenticated = 0x01;
8015 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8017 case MGMT_LTK_P256_UNAUTH:
8018 authenticated = 0x00;
8019 type = SMP_LTK_P256;
8021 case MGMT_LTK_P256_AUTH:
8022 authenticated = 0x01;
8023 type = SMP_LTK_P256;
8025 case MGMT_LTK_P256_DEBUG:
8026 authenticated = 0x00;
8027 type = SMP_LTK_P256_DEBUG;
8033 hci_add_ltk(hdev, &key->addr.bdaddr,
8034 le_addr_type(key->addr.type), type, authenticated,
8035 key->val, key->enc_size, key->ediv, key->rand);
8038 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
8041 hci_dev_unlock(hdev);
8046 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8048 struct hci_conn *conn = cmd->user_data;
8049 struct mgmt_rp_get_conn_info rp;
8052 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8054 if (status == MGMT_STATUS_SUCCESS) {
8055 rp.rssi = conn->rssi;
8056 rp.tx_power = conn->tx_power;
8057 rp.max_tx_power = conn->max_tx_power;
8059 rp.rssi = HCI_RSSI_INVALID;
8060 rp.tx_power = HCI_TX_POWER_INVALID;
8061 rp.max_tx_power = HCI_TX_POWER_INVALID;
8064 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
8065 status, &rp, sizeof(rp));
8067 hci_conn_drop(conn);
8073 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
8076 struct hci_cp_read_rssi *cp;
8077 struct mgmt_pending_cmd *cmd;
8078 struct hci_conn *conn;
8082 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
8086 /* Commands sent in request are either Read RSSI or Read Transmit Power
8087 * Level so we check which one was last sent to retrieve connection
8088 * handle. Both commands have handle as first parameter so it's safe to
8089 * cast data on the same command struct.
8091 * First command sent is always Read RSSI and we fail only if it fails.
8092 * In other case we simply override error to indicate success as we
8093 * already remembered if TX power value is actually valid.
8095 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
8097 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
8098 status = MGMT_STATUS_SUCCESS;
8100 status = mgmt_status(hci_status);
8104 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
8108 handle = __le16_to_cpu(cp->handle);
8109 conn = hci_conn_hash_lookup_handle(hdev, handle);
8111 bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
8116 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
8120 cmd->cmd_complete(cmd, status);
8121 mgmt_pending_remove(cmd);
8124 hci_dev_unlock(hdev);
8127 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
8130 struct mgmt_cp_get_conn_info *cp = data;
8131 struct mgmt_rp_get_conn_info rp;
8132 struct hci_conn *conn;
8133 unsigned long conn_info_age;
8136 bt_dev_dbg(hdev, "sock %p", sk);
8138 memset(&rp, 0, sizeof(rp));
8139 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8140 rp.addr.type = cp->addr.type;
8142 if (!bdaddr_type_is_valid(cp->addr.type))
8143 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8144 MGMT_STATUS_INVALID_PARAMS,
8149 if (!hdev_is_powered(hdev)) {
8150 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8151 MGMT_STATUS_NOT_POWERED, &rp,
8156 if (cp->addr.type == BDADDR_BREDR)
8157 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8160 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8162 if (!conn || conn->state != BT_CONNECTED) {
8163 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8164 MGMT_STATUS_NOT_CONNECTED, &rp,
8169 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
8170 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8171 MGMT_STATUS_BUSY, &rp, sizeof(rp));
8175 /* To avoid client trying to guess when to poll again for information we
8176 * calculate conn info age as random value between min/max set in hdev.
8178 conn_info_age = hdev->conn_info_min_age +
8179 prandom_u32_max(hdev->conn_info_max_age -
8180 hdev->conn_info_min_age);
8182 /* Query controller to refresh cached values if they are too old or were
8185 if (time_after(jiffies, conn->conn_info_timestamp +
8186 msecs_to_jiffies(conn_info_age)) ||
8187 !conn->conn_info_timestamp) {
8188 struct hci_request req;
8189 struct hci_cp_read_tx_power req_txp_cp;
8190 struct hci_cp_read_rssi req_rssi_cp;
8191 struct mgmt_pending_cmd *cmd;
8193 hci_req_init(&req, hdev);
8194 req_rssi_cp.handle = cpu_to_le16(conn->handle);
8195 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
8198 /* For LE links TX power does not change thus we don't need to
8199 * query for it once value is known.
8201 if (!bdaddr_type_is_le(cp->addr.type) ||
8202 conn->tx_power == HCI_TX_POWER_INVALID) {
8203 req_txp_cp.handle = cpu_to_le16(conn->handle);
8204 req_txp_cp.type = 0x00;
8205 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8206 sizeof(req_txp_cp), &req_txp_cp);
8209 /* Max TX power needs to be read only once per connection */
8210 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
8211 req_txp_cp.handle = cpu_to_le16(conn->handle);
8212 req_txp_cp.type = 0x01;
8213 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8214 sizeof(req_txp_cp), &req_txp_cp);
8217 err = hci_req_run(&req, conn_info_refresh_complete);
8221 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
8228 hci_conn_hold(conn);
8229 cmd->user_data = hci_conn_get(conn);
8230 cmd->cmd_complete = conn_info_cmd_complete;
8232 conn->conn_info_timestamp = jiffies;
8234 /* Cache is valid, just reply with values cached in hci_conn */
8235 rp.rssi = conn->rssi;
8236 rp.tx_power = conn->tx_power;
8237 rp.max_tx_power = conn->max_tx_power;
8239 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8240 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8244 hci_dev_unlock(hdev);
8248 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8250 struct hci_conn *conn = cmd->user_data;
8251 struct mgmt_rp_get_clock_info rp;
8252 struct hci_dev *hdev;
8255 memset(&rp, 0, sizeof(rp));
8256 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8261 hdev = hci_dev_get(cmd->index);
8263 rp.local_clock = cpu_to_le32(hdev->clock);
8268 rp.piconet_clock = cpu_to_le32(conn->clock);
8269 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
8273 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
8277 hci_conn_drop(conn);
8284 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8286 struct hci_cp_read_clock *hci_cp;
8287 struct mgmt_pending_cmd *cmd;
8288 struct hci_conn *conn;
8290 bt_dev_dbg(hdev, "status %u", status);
8294 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
8298 if (hci_cp->which) {
8299 u16 handle = __le16_to_cpu(hci_cp->handle);
8300 conn = hci_conn_hash_lookup_handle(hdev, handle);
8305 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
8309 cmd->cmd_complete(cmd, mgmt_status(status));
8310 mgmt_pending_remove(cmd);
8313 hci_dev_unlock(hdev);
8316 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
8319 struct mgmt_cp_get_clock_info *cp = data;
8320 struct mgmt_rp_get_clock_info rp;
8321 struct hci_cp_read_clock hci_cp;
8322 struct mgmt_pending_cmd *cmd;
8323 struct hci_request req;
8324 struct hci_conn *conn;
8327 bt_dev_dbg(hdev, "sock %p", sk);
8329 memset(&rp, 0, sizeof(rp));
8330 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8331 rp.addr.type = cp->addr.type;
8333 if (cp->addr.type != BDADDR_BREDR)
8334 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8335 MGMT_STATUS_INVALID_PARAMS,
8340 if (!hdev_is_powered(hdev)) {
8341 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8342 MGMT_STATUS_NOT_POWERED, &rp,
8347 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8348 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8350 if (!conn || conn->state != BT_CONNECTED) {
8351 err = mgmt_cmd_complete(sk, hdev->id,
8352 MGMT_OP_GET_CLOCK_INFO,
8353 MGMT_STATUS_NOT_CONNECTED,
8361 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
8367 cmd->cmd_complete = clock_info_cmd_complete;
8369 hci_req_init(&req, hdev);
8371 memset(&hci_cp, 0, sizeof(hci_cp));
8372 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8375 hci_conn_hold(conn);
8376 cmd->user_data = hci_conn_get(conn);
8378 hci_cp.handle = cpu_to_le16(conn->handle);
8379 hci_cp.which = 0x01; /* Piconet clock */
8380 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8383 err = hci_req_run(&req, get_clock_info_complete);
8385 mgmt_pending_remove(cmd);
8388 hci_dev_unlock(hdev);
8392 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
8394 struct hci_conn *conn;
8396 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
8400 if (conn->dst_type != type)
8403 if (conn->state != BT_CONNECTED)
8409 /* This function requires the caller holds hdev->lock */
8410 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
8411 u8 addr_type, u8 auto_connect)
8413 struct hci_conn_params *params;
8415 params = hci_conn_params_add(hdev, addr, addr_type);
8419 if (params->auto_connect == auto_connect)
8422 list_del_init(¶ms->action);
8424 switch (auto_connect) {
8425 case HCI_AUTO_CONN_DISABLED:
8426 case HCI_AUTO_CONN_LINK_LOSS:
8427 /* If auto connect is being disabled when we're trying to
8428 * connect to device, keep connecting.
8430 if (params->explicit_connect)
8431 list_add(¶ms->action, &hdev->pend_le_conns);
8433 case HCI_AUTO_CONN_REPORT:
8434 if (params->explicit_connect)
8435 list_add(¶ms->action, &hdev->pend_le_conns);
8437 list_add(¶ms->action, &hdev->pend_le_reports);
8439 case HCI_AUTO_CONN_DIRECT:
8440 case HCI_AUTO_CONN_ALWAYS:
8441 if (!is_connected(hdev, addr, addr_type))
8442 list_add(¶ms->action, &hdev->pend_le_conns);
8446 params->auto_connect = auto_connect;
8448 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
8449 addr, addr_type, auto_connect);
8454 static void device_added(struct sock *sk, struct hci_dev *hdev,
8455 bdaddr_t *bdaddr, u8 type, u8 action)
8457 struct mgmt_ev_device_added ev;
8459 bacpy(&ev.addr.bdaddr, bdaddr);
8460 ev.addr.type = type;
8463 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
8466 static int add_device(struct sock *sk, struct hci_dev *hdev,
8467 void *data, u16 len)
8469 struct mgmt_cp_add_device *cp = data;
8470 u8 auto_conn, addr_type;
8471 struct hci_conn_params *params;
8473 u32 current_flags = 0;
8475 bt_dev_dbg(hdev, "sock %p", sk);
8477 if (!bdaddr_type_is_valid(cp->addr.type) ||
8478 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
8479 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8480 MGMT_STATUS_INVALID_PARAMS,
8481 &cp->addr, sizeof(cp->addr));
8483 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
8484 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8485 MGMT_STATUS_INVALID_PARAMS,
8486 &cp->addr, sizeof(cp->addr));
8490 if (cp->addr.type == BDADDR_BREDR) {
8491 /* Only incoming connections action is supported for now */
8492 if (cp->action != 0x01) {
8493 err = mgmt_cmd_complete(sk, hdev->id,
8495 MGMT_STATUS_INVALID_PARAMS,
8496 &cp->addr, sizeof(cp->addr));
8500 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
8506 hci_req_update_scan(hdev);
8511 addr_type = le_addr_type(cp->addr.type);
8513 if (cp->action == 0x02)
8514 auto_conn = HCI_AUTO_CONN_ALWAYS;
8515 else if (cp->action == 0x01)
8516 auto_conn = HCI_AUTO_CONN_DIRECT;
8518 auto_conn = HCI_AUTO_CONN_REPORT;
8520 /* Kernel internally uses conn_params with resolvable private
8521 * address, but Add Device allows only identity addresses.
8522 * Make sure it is enforced before calling
8523 * hci_conn_params_lookup.
8525 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8526 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8527 MGMT_STATUS_INVALID_PARAMS,
8528 &cp->addr, sizeof(cp->addr));
8532 /* If the connection parameters don't exist for this device,
8533 * they will be created and configured with defaults.
8535 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
8537 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8538 MGMT_STATUS_FAILED, &cp->addr,
8542 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8545 current_flags = params->current_flags;
8548 hci_update_background_scan(hdev);
8551 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
8552 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
8553 SUPPORTED_DEVICE_FLAGS(), current_flags);
8555 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8556 MGMT_STATUS_SUCCESS, &cp->addr,
8560 hci_dev_unlock(hdev);
8564 static void device_removed(struct sock *sk, struct hci_dev *hdev,
8565 bdaddr_t *bdaddr, u8 type)
8567 struct mgmt_ev_device_removed ev;
8569 bacpy(&ev.addr.bdaddr, bdaddr);
8570 ev.addr.type = type;
8572 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
8575 static int remove_device(struct sock *sk, struct hci_dev *hdev,
8576 void *data, u16 len)
8578 struct mgmt_cp_remove_device *cp = data;
8581 bt_dev_dbg(hdev, "sock %p", sk);
8585 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8586 struct hci_conn_params *params;
8589 if (!bdaddr_type_is_valid(cp->addr.type)) {
8590 err = mgmt_cmd_complete(sk, hdev->id,
8591 MGMT_OP_REMOVE_DEVICE,
8592 MGMT_STATUS_INVALID_PARAMS,
8593 &cp->addr, sizeof(cp->addr));
8597 if (cp->addr.type == BDADDR_BREDR) {
8598 err = hci_bdaddr_list_del(&hdev->accept_list,
8602 err = mgmt_cmd_complete(sk, hdev->id,
8603 MGMT_OP_REMOVE_DEVICE,
8604 MGMT_STATUS_INVALID_PARAMS,
8610 hci_req_update_scan(hdev);
8612 device_removed(sk, hdev, &cp->addr.bdaddr,
8617 addr_type = le_addr_type(cp->addr.type);
8619 /* Kernel internally uses conn_params with resolvable private
8620 * address, but Remove Device allows only identity addresses.
8621 * Make sure it is enforced before calling
8622 * hci_conn_params_lookup.
8624 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8625 err = mgmt_cmd_complete(sk, hdev->id,
8626 MGMT_OP_REMOVE_DEVICE,
8627 MGMT_STATUS_INVALID_PARAMS,
8628 &cp->addr, sizeof(cp->addr));
8632 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8635 err = mgmt_cmd_complete(sk, hdev->id,
8636 MGMT_OP_REMOVE_DEVICE,
8637 MGMT_STATUS_INVALID_PARAMS,
8638 &cp->addr, sizeof(cp->addr));
8642 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
8643 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
8644 err = mgmt_cmd_complete(sk, hdev->id,
8645 MGMT_OP_REMOVE_DEVICE,
8646 MGMT_STATUS_INVALID_PARAMS,
8647 &cp->addr, sizeof(cp->addr));
8651 list_del(¶ms->action);
8652 list_del(¶ms->list);
8654 hci_update_background_scan(hdev);
8656 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
8658 struct hci_conn_params *p, *tmp;
8659 struct bdaddr_list *b, *btmp;
8661 if (cp->addr.type) {
8662 err = mgmt_cmd_complete(sk, hdev->id,
8663 MGMT_OP_REMOVE_DEVICE,
8664 MGMT_STATUS_INVALID_PARAMS,
8665 &cp->addr, sizeof(cp->addr));
8669 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
8670 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
8675 hci_req_update_scan(hdev);
8677 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
8678 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
8680 device_removed(sk, hdev, &p->addr, p->addr_type);
8681 if (p->explicit_connect) {
8682 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
8685 list_del(&p->action);
8690 bt_dev_dbg(hdev, "All LE connection parameters were removed");
8692 hci_update_background_scan(hdev);
8696 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
8697 MGMT_STATUS_SUCCESS, &cp->addr,
8700 hci_dev_unlock(hdev);
8704 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
8707 struct mgmt_cp_load_conn_param *cp = data;
8708 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
8709 sizeof(struct mgmt_conn_param));
8710 u16 param_count, expected_len;
8713 if (!lmp_le_capable(hdev))
8714 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8715 MGMT_STATUS_NOT_SUPPORTED);
8717 param_count = __le16_to_cpu(cp->param_count);
8718 if (param_count > max_param_count) {
8719 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
8721 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8722 MGMT_STATUS_INVALID_PARAMS);
8725 expected_len = struct_size(cp, params, param_count);
8726 if (expected_len != len) {
8727 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
8729 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8730 MGMT_STATUS_INVALID_PARAMS);
8733 bt_dev_dbg(hdev, "param_count %u", param_count);
8737 hci_conn_params_clear_disabled(hdev);
8739 for (i = 0; i < param_count; i++) {
8740 struct mgmt_conn_param *param = &cp->params[i];
8741 struct hci_conn_params *hci_param;
8742 u16 min, max, latency, timeout;
8745 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
8748 if (param->addr.type == BDADDR_LE_PUBLIC) {
8749 addr_type = ADDR_LE_DEV_PUBLIC;
8750 } else if (param->addr.type == BDADDR_LE_RANDOM) {
8751 addr_type = ADDR_LE_DEV_RANDOM;
8753 bt_dev_err(hdev, "ignoring invalid connection parameters");
8757 min = le16_to_cpu(param->min_interval);
8758 max = le16_to_cpu(param->max_interval);
8759 latency = le16_to_cpu(param->latency);
8760 timeout = le16_to_cpu(param->timeout);
8762 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
8763 min, max, latency, timeout);
8765 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8766 bt_dev_err(hdev, "ignoring invalid connection parameters");
8770 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
8773 bt_dev_err(hdev, "failed to add connection parameters");
8777 hci_param->conn_min_interval = min;
8778 hci_param->conn_max_interval = max;
8779 hci_param->conn_latency = latency;
8780 hci_param->supervision_timeout = timeout;
8783 hci_dev_unlock(hdev);
8785 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8789 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8790 void *data, u16 len)
8792 struct mgmt_cp_set_external_config *cp = data;
8796 bt_dev_dbg(hdev, "sock %p", sk);
8798 if (hdev_is_powered(hdev))
8799 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8800 MGMT_STATUS_REJECTED);
8802 if (cp->config != 0x00 && cp->config != 0x01)
8803 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8804 MGMT_STATUS_INVALID_PARAMS);
8806 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
8807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8808 MGMT_STATUS_NOT_SUPPORTED);
8813 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8815 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8817 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8824 err = new_options(hdev, sk);
8826 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8827 mgmt_index_removed(hdev);
8829 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8830 hci_dev_set_flag(hdev, HCI_CONFIG);
8831 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8833 queue_work(hdev->req_workqueue, &hdev->power_on);
8835 set_bit(HCI_RAW, &hdev->flags);
8836 mgmt_index_added(hdev);
8841 hci_dev_unlock(hdev);
8845 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8846 void *data, u16 len)
8848 struct mgmt_cp_set_public_address *cp = data;
8852 bt_dev_dbg(hdev, "sock %p", sk);
8854 if (hdev_is_powered(hdev))
8855 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8856 MGMT_STATUS_REJECTED);
8858 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8859 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8860 MGMT_STATUS_INVALID_PARAMS);
8862 if (!hdev->set_bdaddr)
8863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8864 MGMT_STATUS_NOT_SUPPORTED);
8868 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8869 bacpy(&hdev->public_addr, &cp->bdaddr);
8871 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8878 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8879 err = new_options(hdev, sk);
8881 if (is_configured(hdev)) {
8882 mgmt_index_removed(hdev);
8884 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8886 hci_dev_set_flag(hdev, HCI_CONFIG);
8887 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8889 queue_work(hdev->req_workqueue, &hdev->power_on);
8893 hci_dev_unlock(hdev);
8898 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
8902 struct mgmt_ev_device_name_update *ev = (void *)buf;
8908 bacpy(&ev->addr.bdaddr, bdaddr);
8909 ev->addr.type = BDADDR_BREDR;
8911 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8914 ev->eir_len = cpu_to_le16(eir_len);
8916 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
8917 sizeof(*ev) + eir_len, NULL);
8920 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8921 u8 link_type, u8 addr_type, u8 status)
8923 struct mgmt_ev_conn_update_failed ev;
8925 bacpy(&ev.addr.bdaddr, bdaddr);
8926 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8929 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
8930 &ev, sizeof(ev), NULL);
8933 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
8934 u8 link_type, u8 addr_type, u16 conn_interval,
8935 u16 conn_latency, u16 supervision_timeout)
8937 struct mgmt_ev_conn_updated ev;
8939 bacpy(&ev.addr.bdaddr, bdaddr);
8940 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8941 ev.conn_interval = cpu_to_le16(conn_interval);
8942 ev.conn_latency = cpu_to_le16(conn_latency);
8943 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
8945 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
8946 &ev, sizeof(ev), NULL);
8949 /* le device found event - Pass adv type */
8950 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8951 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
8952 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
8955 struct mgmt_ev_le_device_found *ev = (void *)buf;
8958 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
8961 /* Make sure that the buffer is big enough. The 5 extra bytes
8962 * are for the potential CoD field.
8964 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8967 memset(buf, 0, sizeof(buf));
8969 bacpy(&ev->addr.bdaddr, bdaddr);
8970 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8972 ev->flags = cpu_to_le32(flags);
8973 ev->adv_type = adv_type;
8976 memcpy(ev->eir, eir, eir_len);
8978 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
8979 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8982 if (scan_rsp_len > 0)
8983 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8985 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8986 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8988 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8992 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
8993 u16 opcode, struct sk_buff *skb)
8995 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8996 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8997 u8 *h192, *r192, *h256, *r256;
8998 struct mgmt_pending_cmd *cmd;
9002 bt_dev_dbg(hdev, "status %u", status);
9004 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
9008 mgmt_cp = cmd->param;
9011 status = mgmt_status(status);
9018 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
9019 struct hci_rp_read_local_oob_data *rp;
9021 if (skb->len != sizeof(*rp)) {
9022 status = MGMT_STATUS_FAILED;
9025 status = MGMT_STATUS_SUCCESS;
9026 rp = (void *)skb->data;
9028 eir_len = 5 + 18 + 18;
9035 struct hci_rp_read_local_oob_ext_data *rp;
9037 if (skb->len != sizeof(*rp)) {
9038 status = MGMT_STATUS_FAILED;
9041 status = MGMT_STATUS_SUCCESS;
9042 rp = (void *)skb->data;
9044 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
9045 eir_len = 5 + 18 + 18;
9049 eir_len = 5 + 18 + 18 + 18 + 18;
9059 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
9066 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
9067 hdev->dev_class, 3);
9070 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9071 EIR_SSP_HASH_C192, h192, 16);
9072 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9073 EIR_SSP_RAND_R192, r192, 16);
9077 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9078 EIR_SSP_HASH_C256, h256, 16);
9079 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9080 EIR_SSP_RAND_R256, r256, 16);
9084 mgmt_rp->type = mgmt_cp->type;
9085 mgmt_rp->eir_len = cpu_to_le16(eir_len);
9087 err = mgmt_cmd_complete(cmd->sk, hdev->id,
9088 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
9089 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
9090 if (err < 0 || status)
9093 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
9095 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9096 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
9097 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
9100 mgmt_pending_remove(cmd);
9103 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
9104 struct mgmt_cp_read_local_oob_ext_data *cp)
9106 struct mgmt_pending_cmd *cmd;
9107 struct hci_request req;
9110 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
9115 hci_req_init(&req, hdev);
9117 if (bredr_sc_enabled(hdev))
9118 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
9120 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
9122 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
9124 mgmt_pending_remove(cmd);
9131 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
9132 void *data, u16 data_len)
9134 struct mgmt_cp_read_local_oob_ext_data *cp = data;
9135 struct mgmt_rp_read_local_oob_ext_data *rp;
9138 u8 status, flags, role, addr[7], hash[16], rand[16];
9141 bt_dev_dbg(hdev, "sock %p", sk);
9143 if (hdev_is_powered(hdev)) {
9145 case BIT(BDADDR_BREDR):
9146 status = mgmt_bredr_support(hdev);
9152 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9153 status = mgmt_le_support(hdev);
9157 eir_len = 9 + 3 + 18 + 18 + 3;
9160 status = MGMT_STATUS_INVALID_PARAMS;
9165 status = MGMT_STATUS_NOT_POWERED;
9169 rp_len = sizeof(*rp) + eir_len;
9170 rp = kmalloc(rp_len, GFP_ATOMIC);
9181 case BIT(BDADDR_BREDR):
9182 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9183 err = read_local_ssp_oob_req(hdev, sk, cp);
9184 hci_dev_unlock(hdev);
9188 status = MGMT_STATUS_FAILED;
9191 eir_len = eir_append_data(rp->eir, eir_len,
9193 hdev->dev_class, 3);
9196 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9197 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
9198 smp_generate_oob(hdev, hash, rand) < 0) {
9199 hci_dev_unlock(hdev);
9200 status = MGMT_STATUS_FAILED;
9204 /* This should return the active RPA, but since the RPA
9205 * is only programmed on demand, it is really hard to fill
9206 * this in at the moment. For now disallow retrieving
9207 * local out-of-band data when privacy is in use.
9209 * Returning the identity address will not help here since
9210 * pairing happens before the identity resolving key is
9211 * known and thus the connection establishment happens
9212 * based on the RPA and not the identity address.
9214 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
9215 hci_dev_unlock(hdev);
9216 status = MGMT_STATUS_REJECTED;
9220 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
9221 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
9222 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
9223 bacmp(&hdev->static_addr, BDADDR_ANY))) {
9224 memcpy(addr, &hdev->static_addr, 6);
9227 memcpy(addr, &hdev->bdaddr, 6);
9231 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
9232 addr, sizeof(addr));
9234 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
9239 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
9240 &role, sizeof(role));
9242 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
9243 eir_len = eir_append_data(rp->eir, eir_len,
9245 hash, sizeof(hash));
9247 eir_len = eir_append_data(rp->eir, eir_len,
9249 rand, sizeof(rand));
9252 flags = mgmt_get_adv_discov_flags(hdev);
9254 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
9255 flags |= LE_AD_NO_BREDR;
9257 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
9258 &flags, sizeof(flags));
9262 hci_dev_unlock(hdev);
9264 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
9266 status = MGMT_STATUS_SUCCESS;
9269 rp->type = cp->type;
9270 rp->eir_len = cpu_to_le16(eir_len);
9272 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
9273 status, rp, sizeof(*rp) + eir_len);
9274 if (err < 0 || status)
9277 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9278 rp, sizeof(*rp) + eir_len,
9279 HCI_MGMT_OOB_DATA_EVENTS, sk);
9287 static u32 get_supported_adv_flags(struct hci_dev *hdev)
9291 flags |= MGMT_ADV_FLAG_CONNECTABLE;
9292 flags |= MGMT_ADV_FLAG_DISCOV;
9293 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
9294 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
9295 flags |= MGMT_ADV_FLAG_APPEARANCE;
9296 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
9297 flags |= MGMT_ADV_PARAM_DURATION;
9298 flags |= MGMT_ADV_PARAM_TIMEOUT;
9299 flags |= MGMT_ADV_PARAM_INTERVALS;
9300 flags |= MGMT_ADV_PARAM_TX_POWER;
9301 flags |= MGMT_ADV_PARAM_SCAN_RSP;
9303 /* In extended adv TX_POWER returned from Set Adv Param
9304 * will be always valid.
9306 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
9307 ext_adv_capable(hdev))
9308 flags |= MGMT_ADV_FLAG_TX_POWER;
9310 if (ext_adv_capable(hdev)) {
9311 flags |= MGMT_ADV_FLAG_SEC_1M;
9312 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
9313 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
9315 if (hdev->le_features[1] & HCI_LE_PHY_2M)
9316 flags |= MGMT_ADV_FLAG_SEC_2M;
9318 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
9319 flags |= MGMT_ADV_FLAG_SEC_CODED;
9325 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
9326 void *data, u16 data_len)
9328 struct mgmt_rp_read_adv_features *rp;
9331 struct adv_info *adv_instance;
9332 u32 supported_flags;
9335 bt_dev_dbg(hdev, "sock %p", sk);
9337 if (!lmp_le_capable(hdev))
9338 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9339 MGMT_STATUS_REJECTED);
9341 /* Enabling the experimental LL Privay support disables support for
9344 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9345 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9346 MGMT_STATUS_NOT_SUPPORTED);
9350 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
9351 rp = kmalloc(rp_len, GFP_ATOMIC);
9353 hci_dev_unlock(hdev);
9357 supported_flags = get_supported_adv_flags(hdev);
9359 rp->supported_flags = cpu_to_le32(supported_flags);
9360 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
9361 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
9362 rp->max_instances = hdev->le_num_of_adv_sets;
9363 rp->num_instances = hdev->adv_instance_cnt;
9365 instance = rp->instance;
9366 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
9367 *instance = adv_instance->instance;
9371 hci_dev_unlock(hdev);
9373 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9374 MGMT_STATUS_SUCCESS, rp, rp_len);
9381 static u8 calculate_name_len(struct hci_dev *hdev)
9383 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
9385 return append_local_name(hdev, buf, 0);
9388 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
9391 u8 max_len = HCI_MAX_AD_LENGTH;
9394 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
9395 MGMT_ADV_FLAG_LIMITED_DISCOV |
9396 MGMT_ADV_FLAG_MANAGED_FLAGS))
9399 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
9402 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
9403 max_len -= calculate_name_len(hdev);
9405 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
9412 static bool flags_managed(u32 adv_flags)
9414 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
9415 MGMT_ADV_FLAG_LIMITED_DISCOV |
9416 MGMT_ADV_FLAG_MANAGED_FLAGS);
9419 static bool tx_power_managed(u32 adv_flags)
9421 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
9424 static bool name_managed(u32 adv_flags)
9426 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
9429 static bool appearance_managed(u32 adv_flags)
9431 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
9434 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
9435 u8 len, bool is_adv_data)
9440 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
9445 /* Make sure that the data is correctly formatted. */
9446 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
9452 if (data[i + 1] == EIR_FLAGS &&
9453 (!is_adv_data || flags_managed(adv_flags)))
9456 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
9459 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
9462 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
9465 if (data[i + 1] == EIR_APPEARANCE &&
9466 appearance_managed(adv_flags))
9469 /* If the current field length would exceed the total data
9470 * length, then it's invalid.
9472 if (i + cur_len >= len)
9479 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
9481 u32 supported_flags, phy_flags;
9483 /* The current implementation only supports a subset of the specified
9484 * flags. Also need to check mutual exclusiveness of sec flags.
9486 supported_flags = get_supported_adv_flags(hdev);
9487 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
9488 if (adv_flags & ~supported_flags ||
9489 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
9495 static bool adv_busy(struct hci_dev *hdev)
9497 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
9498 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
9499 pending_find(MGMT_OP_SET_LE, hdev) ||
9500 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
9501 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
9504 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
9507 struct mgmt_pending_cmd *cmd;
9508 struct mgmt_cp_add_advertising *cp;
9509 struct mgmt_rp_add_advertising rp;
9510 struct adv_info *adv_instance, *n;
9513 bt_dev_dbg(hdev, "status %u", status);
9517 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
9519 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
9521 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
9522 if (!adv_instance->pending)
9526 adv_instance->pending = false;
9530 instance = adv_instance->instance;
9532 if (hdev->cur_adv_instance == instance)
9533 cancel_adv_timeout(hdev);
9535 hci_remove_adv_instance(hdev, instance);
9536 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
9543 rp.instance = cp->instance;
9546 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9547 mgmt_status(status));
9549 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9550 mgmt_status(status), &rp, sizeof(rp));
9552 mgmt_pending_remove(cmd);
9555 hci_dev_unlock(hdev);
9558 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
9559 void *data, u16 data_len)
9561 struct mgmt_cp_add_advertising *cp = data;
9562 struct mgmt_rp_add_advertising rp;
9565 u16 timeout, duration;
9566 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
9567 u8 schedule_instance = 0;
9568 struct adv_info *next_instance;
9570 struct mgmt_pending_cmd *cmd;
9571 struct hci_request req;
9573 bt_dev_dbg(hdev, "sock %p", sk);
9575 status = mgmt_le_support(hdev);
9577 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9580 /* Enabling the experimental LL Privay support disables support for
9583 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9585 MGMT_STATUS_NOT_SUPPORTED);
9587 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9588 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9589 MGMT_STATUS_INVALID_PARAMS);
9591 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
9592 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9593 MGMT_STATUS_INVALID_PARAMS);
9595 flags = __le32_to_cpu(cp->flags);
9596 timeout = __le16_to_cpu(cp->timeout);
9597 duration = __le16_to_cpu(cp->duration);
9599 if (!requested_adv_flags_are_valid(hdev, flags))
9600 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9601 MGMT_STATUS_INVALID_PARAMS);
9605 if (timeout && !hdev_is_powered(hdev)) {
9606 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9607 MGMT_STATUS_REJECTED);
9611 if (adv_busy(hdev)) {
9612 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9617 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
9618 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
9619 cp->scan_rsp_len, false)) {
9620 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9621 MGMT_STATUS_INVALID_PARAMS);
9625 err = hci_add_adv_instance(hdev, cp->instance, flags,
9626 cp->adv_data_len, cp->data,
9628 cp->data + cp->adv_data_len,
9630 HCI_ADV_TX_POWER_NO_PREFERENCE,
9631 hdev->le_adv_min_interval,
9632 hdev->le_adv_max_interval);
9634 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9635 MGMT_STATUS_FAILED);
9639 /* Only trigger an advertising added event if a new instance was
9642 if (hdev->adv_instance_cnt > prev_instance_cnt)
9643 mgmt_advertising_added(sk, hdev, cp->instance);
9645 if (hdev->cur_adv_instance == cp->instance) {
9646 /* If the currently advertised instance is being changed then
9647 * cancel the current advertising and schedule the next
9648 * instance. If there is only one instance then the overridden
9649 * advertising data will be visible right away.
9651 cancel_adv_timeout(hdev);
9653 next_instance = hci_get_next_instance(hdev, cp->instance);
9655 schedule_instance = next_instance->instance;
9656 } else if (!hdev->adv_instance_timeout) {
9657 /* Immediately advertise the new instance if no other
9658 * instance is currently being advertised.
9660 schedule_instance = cp->instance;
9663 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
9664 * there is no instance to be advertised then we have no HCI
9665 * communication to make. Simply return.
9667 if (!hdev_is_powered(hdev) ||
9668 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
9669 !schedule_instance) {
9670 rp.instance = cp->instance;
9671 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9672 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9676 /* We're good to go, update advertising data, parameters, and start
9679 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
9686 hci_req_init(&req, hdev);
9688 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
9691 err = hci_req_run(&req, add_advertising_complete);
9694 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9695 MGMT_STATUS_FAILED);
9696 mgmt_pending_remove(cmd);
9700 hci_dev_unlock(hdev);
9705 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
9708 struct mgmt_pending_cmd *cmd;
9709 struct mgmt_cp_add_ext_adv_params *cp;
9710 struct mgmt_rp_add_ext_adv_params rp;
9711 struct adv_info *adv_instance;
9714 BT_DBG("%s", hdev->name);
9718 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
9723 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9727 rp.instance = cp->instance;
9728 rp.tx_power = adv_instance->tx_power;
9730 /* While we're at it, inform userspace of the available space for this
9731 * advertisement, given the flags that will be used.
9733 flags = __le32_to_cpu(cp->flags);
9734 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9735 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9738 /* If this advertisement was previously advertising and we
9739 * failed to update it, we signal that it has been removed and
9740 * delete its structure
9742 if (!adv_instance->pending)
9743 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
9745 hci_remove_adv_instance(hdev, cp->instance);
9747 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9748 mgmt_status(status));
9751 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9752 mgmt_status(status), &rp, sizeof(rp));
9757 mgmt_pending_remove(cmd);
9759 hci_dev_unlock(hdev);
9762 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
9763 void *data, u16 data_len)
9765 struct mgmt_cp_add_ext_adv_params *cp = data;
9766 struct mgmt_rp_add_ext_adv_params rp;
9767 struct mgmt_pending_cmd *cmd = NULL;
9768 struct adv_info *adv_instance;
9769 struct hci_request req;
9770 u32 flags, min_interval, max_interval;
9771 u16 timeout, duration;
9776 BT_DBG("%s", hdev->name);
9778 status = mgmt_le_support(hdev);
9780 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9783 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9785 MGMT_STATUS_INVALID_PARAMS);
9787 /* The purpose of breaking add_advertising into two separate MGMT calls
9788 * for params and data is to allow more parameters to be added to this
9789 * structure in the future. For this reason, we verify that we have the
9790 * bare minimum structure we know of when the interface was defined. Any
9791 * extra parameters we don't know about will be ignored in this request.
9793 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
9794 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9795 MGMT_STATUS_INVALID_PARAMS);
9797 flags = __le32_to_cpu(cp->flags);
9799 if (!requested_adv_flags_are_valid(hdev, flags))
9800 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9801 MGMT_STATUS_INVALID_PARAMS);
9805 /* In new interface, we require that we are powered to register */
9806 if (!hdev_is_powered(hdev)) {
9807 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9808 MGMT_STATUS_REJECTED);
9812 if (adv_busy(hdev)) {
9813 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9818 /* Parse defined parameters from request, use defaults otherwise */
9819 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
9820 __le16_to_cpu(cp->timeout) : 0;
9822 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
9823 __le16_to_cpu(cp->duration) :
9824 hdev->def_multi_adv_rotation_duration;
9826 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9827 __le32_to_cpu(cp->min_interval) :
9828 hdev->le_adv_min_interval;
9830 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9831 __le32_to_cpu(cp->max_interval) :
9832 hdev->le_adv_max_interval;
9834 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
9836 HCI_ADV_TX_POWER_NO_PREFERENCE;
9838 /* Create advertising instance with no advertising or response data */
9839 err = hci_add_adv_instance(hdev, cp->instance, flags,
9840 0, NULL, 0, NULL, timeout, duration,
9841 tx_power, min_interval, max_interval);
9844 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9845 MGMT_STATUS_FAILED);
9849 /* Submit request for advertising params if ext adv available */
9850 if (ext_adv_capable(hdev)) {
9851 hci_req_init(&req, hdev);
9852 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9854 /* Updating parameters of an active instance will return a
9855 * Command Disallowed error, so we must first disable the
9856 * instance if it is active.
9858 if (!adv_instance->pending)
9859 __hci_req_disable_ext_adv_instance(&req, cp->instance);
9861 __hci_req_setup_ext_adv_instance(&req, cp->instance);
9863 err = hci_req_run(&req, add_ext_adv_params_complete);
9866 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
9867 hdev, data, data_len);
9870 hci_remove_adv_instance(hdev, cp->instance);
9875 rp.instance = cp->instance;
9876 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9877 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9878 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9879 err = mgmt_cmd_complete(sk, hdev->id,
9880 MGMT_OP_ADD_EXT_ADV_PARAMS,
9881 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9885 hci_dev_unlock(hdev);
9890 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9893 struct mgmt_cp_add_ext_adv_data *cp = data;
9894 struct mgmt_rp_add_ext_adv_data rp;
9895 u8 schedule_instance = 0;
9896 struct adv_info *next_instance;
9897 struct adv_info *adv_instance;
9899 struct mgmt_pending_cmd *cmd;
9900 struct hci_request req;
9902 BT_DBG("%s", hdev->name);
9906 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9908 if (!adv_instance) {
9909 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9910 MGMT_STATUS_INVALID_PARAMS);
9914 /* In new interface, we require that we are powered to register */
9915 if (!hdev_is_powered(hdev)) {
9916 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9917 MGMT_STATUS_REJECTED);
9918 goto clear_new_instance;
9921 if (adv_busy(hdev)) {
9922 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9924 goto clear_new_instance;
9927 /* Validate new data */
9928 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9929 cp->adv_data_len, true) ||
9930 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9931 cp->adv_data_len, cp->scan_rsp_len, false)) {
9932 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9933 MGMT_STATUS_INVALID_PARAMS);
9934 goto clear_new_instance;
9937 /* Set the data in the advertising instance */
9938 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9939 cp->data, cp->scan_rsp_len,
9940 cp->data + cp->adv_data_len);
9942 /* We're good to go, update advertising data, parameters, and start
9946 hci_req_init(&req, hdev);
9948 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
9950 if (ext_adv_capable(hdev)) {
9951 __hci_req_update_adv_data(&req, cp->instance);
9952 __hci_req_update_scan_rsp_data(&req, cp->instance);
9953 __hci_req_enable_ext_advertising(&req, cp->instance);
9956 /* If using software rotation, determine next instance to use */
9958 if (hdev->cur_adv_instance == cp->instance) {
9959 /* If the currently advertised instance is being changed
9960 * then cancel the current advertising and schedule the
9961 * next instance. If there is only one instance then the
9962 * overridden advertising data will be visible right
9965 cancel_adv_timeout(hdev);
9967 next_instance = hci_get_next_instance(hdev,
9970 schedule_instance = next_instance->instance;
9971 } else if (!hdev->adv_instance_timeout) {
9972 /* Immediately advertise the new instance if no other
9973 * instance is currently being advertised.
9975 schedule_instance = cp->instance;
9978 /* If the HCI_ADVERTISING flag is set or there is no instance to
9979 * be advertised then we have no HCI communication to make.
9982 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
9983 !schedule_instance) {
9984 if (adv_instance->pending) {
9985 mgmt_advertising_added(sk, hdev, cp->instance);
9986 adv_instance->pending = false;
9988 rp.instance = cp->instance;
9989 err = mgmt_cmd_complete(sk, hdev->id,
9990 MGMT_OP_ADD_EXT_ADV_DATA,
9991 MGMT_STATUS_SUCCESS, &rp,
9996 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
10000 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
10004 goto clear_new_instance;
10008 err = hci_req_run(&req, add_advertising_complete);
10011 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10012 MGMT_STATUS_FAILED);
10013 mgmt_pending_remove(cmd);
10014 goto clear_new_instance;
10017 /* We were successful in updating data, so trigger advertising_added
10018 * event if this is an instance that wasn't previously advertising. If
10019 * a failure occurs in the requests we initiated, we will remove the
10020 * instance again in add_advertising_complete
10022 if (adv_instance->pending)
10023 mgmt_advertising_added(sk, hdev, cp->instance);
10027 clear_new_instance:
10028 hci_remove_adv_instance(hdev, cp->instance);
10031 hci_dev_unlock(hdev);
10036 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
10039 struct mgmt_pending_cmd *cmd;
10040 struct mgmt_cp_remove_advertising *cp;
10041 struct mgmt_rp_remove_advertising rp;
10043 bt_dev_dbg(hdev, "status %u", status);
10045 hci_dev_lock(hdev);
10047 /* A failure status here only means that we failed to disable
10048 * advertising. Otherwise, the advertising instance has been removed,
10049 * so report success.
10051 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
10056 rp.instance = cp->instance;
10058 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
10060 mgmt_pending_remove(cmd);
10063 hci_dev_unlock(hdev);
10066 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
10067 void *data, u16 data_len)
10069 struct mgmt_cp_remove_advertising *cp = data;
10070 struct mgmt_rp_remove_advertising rp;
10071 struct mgmt_pending_cmd *cmd;
10072 struct hci_request req;
10075 bt_dev_dbg(hdev, "sock %p", sk);
10077 /* Enabling the experimental LL Privay support disables support for
10080 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
10081 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
10082 MGMT_STATUS_NOT_SUPPORTED);
10084 hci_dev_lock(hdev);
10086 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
10087 err = mgmt_cmd_status(sk, hdev->id,
10088 MGMT_OP_REMOVE_ADVERTISING,
10089 MGMT_STATUS_INVALID_PARAMS);
10093 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
10094 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
10095 pending_find(MGMT_OP_SET_LE, hdev)) {
10096 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10101 if (list_empty(&hdev->adv_instances)) {
10102 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10103 MGMT_STATUS_INVALID_PARAMS);
10107 hci_req_init(&req, hdev);
10109 /* If we use extended advertising, instance is disabled and removed */
10110 if (ext_adv_capable(hdev)) {
10111 __hci_req_disable_ext_adv_instance(&req, cp->instance);
10112 __hci_req_remove_ext_adv_instance(&req, cp->instance);
10115 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
10117 if (list_empty(&hdev->adv_instances))
10118 __hci_req_disable_advertising(&req);
10120 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
10121 * flag is set or the device isn't powered then we have no HCI
10122 * communication to make. Simply return.
10124 if (skb_queue_empty(&req.cmd_q) ||
10125 !hdev_is_powered(hdev) ||
10126 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
10127 hci_req_purge(&req);
10128 rp.instance = cp->instance;
10129 err = mgmt_cmd_complete(sk, hdev->id,
10130 MGMT_OP_REMOVE_ADVERTISING,
10131 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10135 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
10142 err = hci_req_run(&req, remove_advertising_complete);
10144 mgmt_pending_remove(cmd);
10147 hci_dev_unlock(hdev);
10152 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
10153 void *data, u16 data_len)
10155 struct mgmt_cp_get_adv_size_info *cp = data;
10156 struct mgmt_rp_get_adv_size_info rp;
10157 u32 flags, supported_flags;
10160 bt_dev_dbg(hdev, "sock %p", sk);
10162 if (!lmp_le_capable(hdev))
10163 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10164 MGMT_STATUS_REJECTED);
10166 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10167 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10168 MGMT_STATUS_INVALID_PARAMS);
10170 flags = __le32_to_cpu(cp->flags);
10172 /* The current implementation only supports a subset of the specified
10175 supported_flags = get_supported_adv_flags(hdev);
10176 if (flags & ~supported_flags)
10177 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10178 MGMT_STATUS_INVALID_PARAMS);
10180 rp.instance = cp->instance;
10181 rp.flags = cp->flags;
10182 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10183 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10185 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10186 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10191 static const struct hci_mgmt_handler mgmt_handlers[] = {
10192 { NULL }, /* 0x0000 (no command) */
10193 { read_version, MGMT_READ_VERSION_SIZE,
10195 HCI_MGMT_UNTRUSTED },
10196 { read_commands, MGMT_READ_COMMANDS_SIZE,
10198 HCI_MGMT_UNTRUSTED },
10199 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
10201 HCI_MGMT_UNTRUSTED },
10202 { read_controller_info, MGMT_READ_INFO_SIZE,
10203 HCI_MGMT_UNTRUSTED },
10204 { set_powered, MGMT_SETTING_SIZE },
10205 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
10206 { set_connectable, MGMT_SETTING_SIZE },
10207 { set_fast_connectable, MGMT_SETTING_SIZE },
10208 { set_bondable, MGMT_SETTING_SIZE },
10209 { set_link_security, MGMT_SETTING_SIZE },
10210 { set_ssp, MGMT_SETTING_SIZE },
10211 { set_hs, MGMT_SETTING_SIZE },
10212 { set_le, MGMT_SETTING_SIZE },
10213 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
10214 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
10215 { add_uuid, MGMT_ADD_UUID_SIZE },
10216 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
10217 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
10218 HCI_MGMT_VAR_LEN },
10219 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
10220 HCI_MGMT_VAR_LEN },
10221 { disconnect, MGMT_DISCONNECT_SIZE },
10222 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
10223 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
10224 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
10225 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
10226 { pair_device, MGMT_PAIR_DEVICE_SIZE },
10227 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
10228 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
10229 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
10230 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
10231 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
10232 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
10233 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
10234 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
10235 HCI_MGMT_VAR_LEN },
10236 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
10237 { start_discovery, MGMT_START_DISCOVERY_SIZE },
10238 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
10239 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
10240 { block_device, MGMT_BLOCK_DEVICE_SIZE },
10241 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
10242 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
10243 { set_advertising, MGMT_SETTING_SIZE },
10244 { set_bredr, MGMT_SETTING_SIZE },
10245 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
10246 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
10247 { set_secure_conn, MGMT_SETTING_SIZE },
10248 { set_debug_keys, MGMT_SETTING_SIZE },
10249 { set_privacy, MGMT_SET_PRIVACY_SIZE },
10250 { load_irks, MGMT_LOAD_IRKS_SIZE,
10251 HCI_MGMT_VAR_LEN },
10252 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
10253 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
10254 { add_device, MGMT_ADD_DEVICE_SIZE },
10255 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
10256 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
10257 HCI_MGMT_VAR_LEN },
10258 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
10260 HCI_MGMT_UNTRUSTED },
10261 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
10262 HCI_MGMT_UNCONFIGURED |
10263 HCI_MGMT_UNTRUSTED },
10264 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
10265 HCI_MGMT_UNCONFIGURED },
10266 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
10267 HCI_MGMT_UNCONFIGURED },
10268 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
10269 HCI_MGMT_VAR_LEN },
10270 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
10271 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
10273 HCI_MGMT_UNTRUSTED },
10274 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
10275 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
10276 HCI_MGMT_VAR_LEN },
10277 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
10278 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
10279 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
10280 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
10281 HCI_MGMT_UNTRUSTED },
10282 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
10283 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
10284 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
10285 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
10286 HCI_MGMT_VAR_LEN },
10287 { set_wideband_speech, MGMT_SETTING_SIZE },
10288 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
10289 HCI_MGMT_UNTRUSTED },
10290 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
10291 HCI_MGMT_UNTRUSTED |
10292 HCI_MGMT_HDEV_OPTIONAL },
10293 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
10295 HCI_MGMT_HDEV_OPTIONAL },
10296 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
10297 HCI_MGMT_UNTRUSTED },
10298 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
10299 HCI_MGMT_VAR_LEN },
10300 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
10301 HCI_MGMT_UNTRUSTED },
10302 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
10303 HCI_MGMT_VAR_LEN },
10304 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
10305 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
10306 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
10307 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
10308 HCI_MGMT_VAR_LEN },
10309 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
10310 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
10311 HCI_MGMT_VAR_LEN },
10312 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
10313 HCI_MGMT_VAR_LEN },
10314 { add_adv_patterns_monitor_rssi,
10315 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
10316 HCI_MGMT_VAR_LEN },
10320 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
10321 { NULL }, /* 0x0000 (no command) */
10322 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
10323 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
10324 HCI_MGMT_VAR_LEN },
10325 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
10326 HCI_MGMT_VAR_LEN },
10327 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
10328 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
10329 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
10330 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
10331 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
10332 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
10333 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
10334 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
10335 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
10336 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
10337 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
10338 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
10339 { set_voice_setting, MGMT_SET_VOICE_SETTING_SIZE },
10343 void mgmt_index_added(struct hci_dev *hdev)
10345 struct mgmt_ev_ext_index ev;
10347 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10350 switch (hdev->dev_type) {
10352 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10353 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
10354 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10357 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
10358 HCI_MGMT_INDEX_EVENTS);
10369 ev.bus = hdev->bus;
10371 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
10372 HCI_MGMT_EXT_INDEX_EVENTS);
10375 void mgmt_index_removed(struct hci_dev *hdev)
10377 struct mgmt_ev_ext_index ev;
10378 u8 status = MGMT_STATUS_INVALID_INDEX;
10380 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10383 switch (hdev->dev_type) {
10385 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10387 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10388 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
10389 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10392 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
10393 HCI_MGMT_INDEX_EVENTS);
10404 ev.bus = hdev->bus;
10406 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
10407 HCI_MGMT_EXT_INDEX_EVENTS);
10410 /* This function requires the caller holds hdev->lock */
10411 static void restart_le_actions(struct hci_dev *hdev)
10413 struct hci_conn_params *p;
10415 list_for_each_entry(p, &hdev->le_conn_params, list) {
10416 /* Needed for AUTO_OFF case where might not "really"
10417 * have been powered off.
10419 list_del_init(&p->action);
10421 switch (p->auto_connect) {
10422 case HCI_AUTO_CONN_DIRECT:
10423 case HCI_AUTO_CONN_ALWAYS:
10424 list_add(&p->action, &hdev->pend_le_conns);
10426 case HCI_AUTO_CONN_REPORT:
10427 list_add(&p->action, &hdev->pend_le_reports);
10435 void mgmt_power_on(struct hci_dev *hdev, int err)
10437 struct cmd_lookup match = { NULL, hdev };
10439 bt_dev_dbg(hdev, "err %d", err);
10441 hci_dev_lock(hdev);
10444 restart_le_actions(hdev);
10445 hci_update_background_scan(hdev);
10448 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10450 new_settings(hdev, match.sk);
10453 sock_put(match.sk);
10455 hci_dev_unlock(hdev);
10458 void __mgmt_power_off(struct hci_dev *hdev)
10460 struct cmd_lookup match = { NULL, hdev };
10461 u8 status, zero_cod[] = { 0, 0, 0 };
10463 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10465 /* If the power off is because of hdev unregistration let
10466 * use the appropriate INVALID_INDEX status. Otherwise use
10467 * NOT_POWERED. We cover both scenarios here since later in
10468 * mgmt_index_removed() any hci_conn callbacks will have already
10469 * been triggered, potentially causing misleading DISCONNECTED
10470 * status responses.
10472 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
10473 status = MGMT_STATUS_INVALID_INDEX;
10475 status = MGMT_STATUS_NOT_POWERED;
10477 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10479 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
10480 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
10481 zero_cod, sizeof(zero_cod),
10482 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10483 ext_info_changed(hdev, NULL);
10486 new_settings(hdev, match.sk);
10489 sock_put(match.sk);
10492 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
10494 struct mgmt_pending_cmd *cmd;
10497 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10501 if (err == -ERFKILL)
10502 status = MGMT_STATUS_RFKILLED;
10504 status = MGMT_STATUS_FAILED;
10506 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
10508 mgmt_pending_remove(cmd);
10511 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
10514 struct mgmt_ev_new_link_key ev;
10516 memset(&ev, 0, sizeof(ev));
10518 ev.store_hint = persistent;
10519 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10520 ev.key.addr.type = BDADDR_BREDR;
10521 ev.key.type = key->type;
10522 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
10523 ev.key.pin_len = key->pin_len;
10525 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
10528 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
10530 switch (ltk->type) {
10532 case SMP_LTK_RESPONDER:
10533 if (ltk->authenticated)
10534 return MGMT_LTK_AUTHENTICATED;
10535 return MGMT_LTK_UNAUTHENTICATED;
10537 if (ltk->authenticated)
10538 return MGMT_LTK_P256_AUTH;
10539 return MGMT_LTK_P256_UNAUTH;
10540 case SMP_LTK_P256_DEBUG:
10541 return MGMT_LTK_P256_DEBUG;
10544 return MGMT_LTK_UNAUTHENTICATED;
10547 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
10549 struct mgmt_ev_new_long_term_key ev;
10551 memset(&ev, 0, sizeof(ev));
10553 /* Devices using resolvable or non-resolvable random addresses
10554 * without providing an identity resolving key don't require
10555 * to store long term keys. Their addresses will change the
10556 * next time around.
10558 * Only when a remote device provides an identity address
10559 * make sure the long term key is stored. If the remote
10560 * identity is known, the long term keys are internally
10561 * mapped to the identity address. So allow static random
10562 * and public addresses here.
10564 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10565 (key->bdaddr.b[5] & 0xc0) != 0xc0)
10566 ev.store_hint = 0x00;
10568 ev.store_hint = persistent;
10570 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10571 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
10572 ev.key.type = mgmt_ltk_type(key);
10573 ev.key.enc_size = key->enc_size;
10574 ev.key.ediv = key->ediv;
10575 ev.key.rand = key->rand;
10577 if (key->type == SMP_LTK)
10578 ev.key.initiator = 1;
10580 /* Make sure we copy only the significant bytes based on the
10581 * encryption key size, and set the rest of the value to zeroes.
10583 memcpy(ev.key.val, key->val, key->enc_size);
10584 memset(ev.key.val + key->enc_size, 0,
10585 sizeof(ev.key.val) - key->enc_size);
10587 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
10590 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
10592 struct mgmt_ev_new_irk ev;
10594 memset(&ev, 0, sizeof(ev));
10596 ev.store_hint = persistent;
10598 bacpy(&ev.rpa, &irk->rpa);
10599 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
10600 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
10601 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
10603 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
10606 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
10609 struct mgmt_ev_new_csrk ev;
10611 memset(&ev, 0, sizeof(ev));
10613 /* Devices using resolvable or non-resolvable random addresses
10614 * without providing an identity resolving key don't require
10615 * to store signature resolving keys. Their addresses will change
10616 * the next time around.
10618 * Only when a remote device provides an identity address
10619 * make sure the signature resolving key is stored. So allow
10620 * static random and public addresses here.
10622 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10623 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
10624 ev.store_hint = 0x00;
10626 ev.store_hint = persistent;
10628 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
10629 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
10630 ev.key.type = csrk->type;
10631 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
10633 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
10636 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
10637 u8 bdaddr_type, u8 store_hint, u16 min_interval,
10638 u16 max_interval, u16 latency, u16 timeout)
10640 struct mgmt_ev_new_conn_param ev;
10642 if (!hci_is_identity_address(bdaddr, bdaddr_type))
10645 memset(&ev, 0, sizeof(ev));
10646 bacpy(&ev.addr.bdaddr, bdaddr);
10647 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
10648 ev.store_hint = store_hint;
10649 ev.min_interval = cpu_to_le16(min_interval);
10650 ev.max_interval = cpu_to_le16(max_interval);
10651 ev.latency = cpu_to_le16(latency);
10652 ev.timeout = cpu_to_le16(timeout);
10654 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
10657 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
10658 u8 *name, u8 name_len)
10661 struct mgmt_ev_device_connected *ev = (void *) buf;
10665 bacpy(&ev->addr.bdaddr, &conn->dst);
10666 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10669 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
10671 ev->flags = __cpu_to_le32(flags);
10673 /* We must ensure that the EIR Data fields are ordered and
10674 * unique. Keep it simple for now and avoid the problem by not
10675 * adding any BR/EDR data to the LE adv.
10677 if (conn->le_adv_data_len > 0) {
10678 memcpy(&ev->eir[eir_len],
10679 conn->le_adv_data, conn->le_adv_data_len);
10680 eir_len = conn->le_adv_data_len;
10683 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
10686 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
10687 eir_len = eir_append_data(ev->eir, eir_len,
10689 conn->dev_class, 3);
10692 ev->eir_len = cpu_to_le16(eir_len);
10694 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
10695 sizeof(*ev) + eir_len, NULL);
10698 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
10700 struct sock **sk = data;
10702 cmd->cmd_complete(cmd, 0);
10707 mgmt_pending_remove(cmd);
10710 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
10712 struct hci_dev *hdev = data;
10713 struct mgmt_cp_unpair_device *cp = cmd->param;
10715 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
10717 cmd->cmd_complete(cmd, 0);
10718 mgmt_pending_remove(cmd);
10721 bool mgmt_powering_down(struct hci_dev *hdev)
10723 struct mgmt_pending_cmd *cmd;
10724 struct mgmt_mode *cp;
10726 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10737 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
10738 u8 link_type, u8 addr_type, u8 reason,
10739 bool mgmt_connected)
10741 struct mgmt_ev_device_disconnected ev;
10742 struct sock *sk = NULL;
10744 /* The connection is still in hci_conn_hash so test for 1
10745 * instead of 0 to know if this is the last one.
10747 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10748 cancel_delayed_work(&hdev->power_off);
10749 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10752 if (!mgmt_connected)
10755 if (link_type != ACL_LINK && link_type != LE_LINK)
10758 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
10760 bacpy(&ev.addr.bdaddr, bdaddr);
10761 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10762 ev.reason = reason;
10764 /* Report disconnects due to suspend */
10765 if (hdev->suspended)
10766 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
10768 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
10773 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10777 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
10778 u8 link_type, u8 addr_type, u8 status)
10780 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
10781 struct mgmt_cp_disconnect *cp;
10782 struct mgmt_pending_cmd *cmd;
10784 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10787 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
10793 if (bacmp(bdaddr, &cp->addr.bdaddr))
10796 if (cp->addr.type != bdaddr_type)
10799 cmd->cmd_complete(cmd, mgmt_status(status));
10800 mgmt_pending_remove(cmd);
10803 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10804 u8 addr_type, u8 status)
10806 struct mgmt_ev_connect_failed ev;
10808 /* The connection is still in hci_conn_hash so test for 1
10809 * instead of 0 to know if this is the last one.
10811 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10812 cancel_delayed_work(&hdev->power_off);
10813 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10816 bacpy(&ev.addr.bdaddr, bdaddr);
10817 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10818 ev.status = mgmt_status(status);
10820 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
10823 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
10825 struct mgmt_ev_pin_code_request ev;
10827 bacpy(&ev.addr.bdaddr, bdaddr);
10828 ev.addr.type = BDADDR_BREDR;
10829 ev.secure = secure;
10831 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
10834 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10837 struct mgmt_pending_cmd *cmd;
10839 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
10843 cmd->cmd_complete(cmd, mgmt_status(status));
10844 mgmt_pending_remove(cmd);
10847 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10850 struct mgmt_pending_cmd *cmd;
10852 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
10856 cmd->cmd_complete(cmd, mgmt_status(status));
10857 mgmt_pending_remove(cmd);
10860 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10861 u8 link_type, u8 addr_type, u32 value,
10864 struct mgmt_ev_user_confirm_request ev;
10866 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10868 bacpy(&ev.addr.bdaddr, bdaddr);
10869 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10870 ev.confirm_hint = confirm_hint;
10871 ev.value = cpu_to_le32(value);
10873 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
10877 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10878 u8 link_type, u8 addr_type)
10880 struct mgmt_ev_user_passkey_request ev;
10882 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10884 bacpy(&ev.addr.bdaddr, bdaddr);
10885 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10887 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
10891 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10892 u8 link_type, u8 addr_type, u8 status,
10895 struct mgmt_pending_cmd *cmd;
10897 cmd = pending_find(opcode, hdev);
10901 cmd->cmd_complete(cmd, mgmt_status(status));
10902 mgmt_pending_remove(cmd);
10907 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10908 u8 link_type, u8 addr_type, u8 status)
10910 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10911 status, MGMT_OP_USER_CONFIRM_REPLY);
10914 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10915 u8 link_type, u8 addr_type, u8 status)
10917 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10919 MGMT_OP_USER_CONFIRM_NEG_REPLY);
10922 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10923 u8 link_type, u8 addr_type, u8 status)
10925 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10926 status, MGMT_OP_USER_PASSKEY_REPLY);
10929 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10930 u8 link_type, u8 addr_type, u8 status)
10932 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10934 MGMT_OP_USER_PASSKEY_NEG_REPLY);
10937 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
10938 u8 link_type, u8 addr_type, u32 passkey,
10941 struct mgmt_ev_passkey_notify ev;
10943 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10945 bacpy(&ev.addr.bdaddr, bdaddr);
10946 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10947 ev.passkey = __cpu_to_le32(passkey);
10948 ev.entered = entered;
10950 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10953 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10955 struct mgmt_ev_auth_failed ev;
10956 struct mgmt_pending_cmd *cmd;
10957 u8 status = mgmt_status(hci_status);
10959 bacpy(&ev.addr.bdaddr, &conn->dst);
10960 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10961 ev.status = status;
10963 cmd = find_pairing(conn);
10965 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10966 cmd ? cmd->sk : NULL);
10969 cmd->cmd_complete(cmd, status);
10970 mgmt_pending_remove(cmd);
10974 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10976 struct cmd_lookup match = { NULL, hdev };
10980 u8 mgmt_err = mgmt_status(status);
10981 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10982 cmd_status_rsp, &mgmt_err);
10986 if (test_bit(HCI_AUTH, &hdev->flags))
10987 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10989 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10991 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10995 new_settings(hdev, match.sk);
10998 sock_put(match.sk);
11001 static void clear_eir(struct hci_request *req)
11003 struct hci_dev *hdev = req->hdev;
11004 struct hci_cp_write_eir cp;
11006 if (!lmp_ext_inq_capable(hdev))
11009 memset(hdev->eir, 0, sizeof(hdev->eir));
11011 memset(&cp, 0, sizeof(cp));
11013 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
11016 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
11018 struct cmd_lookup match = { NULL, hdev };
11019 struct hci_request req;
11020 bool changed = false;
11023 u8 mgmt_err = mgmt_status(status);
11025 if (enable && hci_dev_test_and_clear_flag(hdev,
11026 HCI_SSP_ENABLED)) {
11027 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
11028 new_settings(hdev, NULL);
11031 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
11037 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
11039 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
11041 changed = hci_dev_test_and_clear_flag(hdev,
11044 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
11047 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
11050 new_settings(hdev, match.sk);
11053 sock_put(match.sk);
11055 hci_req_init(&req, hdev);
11057 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
11058 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
11059 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
11060 sizeof(enable), &enable);
11061 __hci_req_update_eir(&req);
11066 hci_req_run(&req, NULL);
11069 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
11071 struct cmd_lookup *match = data;
11073 if (match->sk == NULL) {
11074 match->sk = cmd->sk;
11075 sock_hold(match->sk);
11079 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
11082 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
11084 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
11085 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
11086 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
11089 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
11090 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11091 ext_info_changed(hdev, NULL);
11095 sock_put(match.sk);
11098 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
11100 struct mgmt_cp_set_local_name ev;
11101 struct mgmt_pending_cmd *cmd;
11106 memset(&ev, 0, sizeof(ev));
11107 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
11108 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
11110 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
11112 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
11114 /* If this is a HCI command related to powering on the
11115 * HCI dev don't send any mgmt signals.
11117 if (pending_find(MGMT_OP_SET_POWERED, hdev))
11121 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
11122 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
11123 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
11126 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
11130 for (i = 0; i < uuid_count; i++) {
11131 if (!memcmp(uuid, uuids[i], 16))
11138 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
11142 while (parsed < eir_len) {
11143 u8 field_len = eir[0];
11147 if (field_len == 0)
11150 if (eir_len - parsed < field_len + 1)
11154 case EIR_UUID16_ALL:
11155 case EIR_UUID16_SOME:
11156 for (i = 0; i + 3 <= field_len; i += 2) {
11157 memcpy(uuid, bluetooth_base_uuid, 16);
11158 uuid[13] = eir[i + 3];
11159 uuid[12] = eir[i + 2];
11160 if (has_uuid(uuid, uuid_count, uuids))
11164 case EIR_UUID32_ALL:
11165 case EIR_UUID32_SOME:
11166 for (i = 0; i + 5 <= field_len; i += 4) {
11167 memcpy(uuid, bluetooth_base_uuid, 16);
11168 uuid[15] = eir[i + 5];
11169 uuid[14] = eir[i + 4];
11170 uuid[13] = eir[i + 3];
11171 uuid[12] = eir[i + 2];
11172 if (has_uuid(uuid, uuid_count, uuids))
11176 case EIR_UUID128_ALL:
11177 case EIR_UUID128_SOME:
11178 for (i = 0; i + 17 <= field_len; i += 16) {
11179 memcpy(uuid, eir + i + 2, 16);
11180 if (has_uuid(uuid, uuid_count, uuids))
11186 parsed += field_len + 1;
11187 eir += field_len + 1;
11193 static void restart_le_scan(struct hci_dev *hdev)
11195 /* If controller is not scanning we are done. */
11196 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
11199 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
11200 hdev->discovery.scan_start +
11201 hdev->discovery.scan_duration))
11204 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
11205 DISCOV_LE_RESTART_DELAY);
11208 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
11209 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11211 /* If a RSSI threshold has been specified, and
11212 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
11213 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
11214 * is set, let it through for further processing, as we might need to
11215 * restart the scan.
11217 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
11218 * the results are also dropped.
11220 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11221 (rssi == HCI_RSSI_INVALID ||
11222 (rssi < hdev->discovery.rssi &&
11223 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
11226 if (hdev->discovery.uuid_count != 0) {
11227 /* If a list of UUIDs is provided in filter, results with no
11228 * matching UUID should be dropped.
11230 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
11231 hdev->discovery.uuids) &&
11232 !eir_has_uuids(scan_rsp, scan_rsp_len,
11233 hdev->discovery.uuid_count,
11234 hdev->discovery.uuids))
11238 /* If duplicate filtering does not report RSSI changes, then restart
11239 * scanning to ensure updated result with updated RSSI values.
11241 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
11242 restart_le_scan(hdev);
11244 /* Validate RSSI value against the RSSI threshold once more. */
11245 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11246 rssi < hdev->discovery.rssi)
11253 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11254 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
11255 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11258 struct mgmt_ev_device_found *ev = (void *)buf;
11261 /* Don't send events for a non-kernel initiated discovery. With
11262 * LE one exception is if we have pend_le_reports > 0 in which
11263 * case we're doing passive scanning and want these events.
11265 if (!hci_discovery_active(hdev)) {
11266 if (link_type == ACL_LINK)
11268 if (link_type == LE_LINK &&
11269 list_empty(&hdev->pend_le_reports) &&
11270 !hci_is_adv_monitoring(hdev)) {
11275 if (hdev->discovery.result_filtering) {
11276 /* We are using service discovery */
11277 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
11282 if (hdev->discovery.limited) {
11283 /* Check for limited discoverable bit */
11285 if (!(dev_class[1] & 0x20))
11288 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
11289 if (!flags || !(flags[0] & LE_AD_LIMITED))
11294 /* Make sure that the buffer is big enough. The 5 extra bytes
11295 * are for the potential CoD field.
11297 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
11300 memset(buf, 0, sizeof(buf));
11302 /* In case of device discovery with BR/EDR devices (pre 1.2), the
11303 * RSSI value was reported as 0 when not available. This behavior
11304 * is kept when using device discovery. This is required for full
11305 * backwards compatibility with the API.
11307 * However when using service discovery, the value 127 will be
11308 * returned when the RSSI is not available.
11310 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
11311 link_type == ACL_LINK)
11314 bacpy(&ev->addr.bdaddr, bdaddr);
11315 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11317 ev->flags = cpu_to_le32(flags);
11320 /* Copy EIR or advertising data into event */
11321 memcpy(ev->eir, eir, eir_len);
11323 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11325 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11328 if (scan_rsp_len > 0)
11329 /* Append scan response data to event */
11330 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
11332 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
11333 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
11335 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
11338 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11339 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
11341 struct mgmt_ev_device_found *ev;
11342 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
11345 ev = (struct mgmt_ev_device_found *) buf;
11347 memset(buf, 0, sizeof(buf));
11349 bacpy(&ev->addr.bdaddr, bdaddr);
11350 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11353 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
11356 ev->eir_len = cpu_to_le16(eir_len);
11358 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
11361 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
11363 struct mgmt_ev_discovering ev;
11365 bt_dev_dbg(hdev, "discovering %u", discovering);
11367 memset(&ev, 0, sizeof(ev));
11368 ev.type = hdev->discovery.type;
11369 ev.discovering = discovering;
11371 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
11374 void mgmt_suspending(struct hci_dev *hdev, u8 state)
11376 struct mgmt_ev_controller_suspend ev;
11378 ev.suspend_state = state;
11379 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
11382 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
11385 struct mgmt_ev_controller_resume ev;
11387 ev.wake_reason = reason;
11389 bacpy(&ev.addr.bdaddr, bdaddr);
11390 ev.addr.type = addr_type;
11392 memset(&ev.addr, 0, sizeof(ev.addr));
11395 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
11398 static struct hci_mgmt_chan chan = {
11399 .channel = HCI_CHANNEL_CONTROL,
11400 .handler_count = ARRAY_SIZE(mgmt_handlers),
11401 .handlers = mgmt_handlers,
11403 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
11404 .tizen_handlers = tizen_mgmt_handlers,
11406 .hdev_init = mgmt_init_hdev,
11409 int mgmt_init(void)
11411 return hci_mgmt_chan_register(&chan);
11414 void mgmt_exit(void)
11416 hci_mgmt_chan_unregister(&chan);