2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
40 #include "hci_request.h"
42 #include "mgmt_util.h"
43 #include "mgmt_config.h"
46 #define MGMT_VERSION 1
47 #define MGMT_REVISION 21
49 static const u16 mgmt_commands[] = {
50 MGMT_OP_READ_INDEX_LIST,
53 MGMT_OP_SET_DISCOVERABLE,
54 MGMT_OP_SET_CONNECTABLE,
55 MGMT_OP_SET_FAST_CONNECTABLE,
57 MGMT_OP_SET_LINK_SECURITY,
61 MGMT_OP_SET_DEV_CLASS,
62 MGMT_OP_SET_LOCAL_NAME,
65 MGMT_OP_LOAD_LINK_KEYS,
66 MGMT_OP_LOAD_LONG_TERM_KEYS,
68 MGMT_OP_GET_CONNECTIONS,
69 MGMT_OP_PIN_CODE_REPLY,
70 MGMT_OP_PIN_CODE_NEG_REPLY,
71 MGMT_OP_SET_IO_CAPABILITY,
73 MGMT_OP_CANCEL_PAIR_DEVICE,
74 MGMT_OP_UNPAIR_DEVICE,
75 MGMT_OP_USER_CONFIRM_REPLY,
76 MGMT_OP_USER_CONFIRM_NEG_REPLY,
77 MGMT_OP_USER_PASSKEY_REPLY,
78 MGMT_OP_USER_PASSKEY_NEG_REPLY,
79 MGMT_OP_READ_LOCAL_OOB_DATA,
80 MGMT_OP_ADD_REMOTE_OOB_DATA,
81 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
82 MGMT_OP_START_DISCOVERY,
83 MGMT_OP_STOP_DISCOVERY,
86 MGMT_OP_UNBLOCK_DEVICE,
87 MGMT_OP_SET_DEVICE_ID,
88 MGMT_OP_SET_ADVERTISING,
90 MGMT_OP_SET_STATIC_ADDRESS,
91 MGMT_OP_SET_SCAN_PARAMS,
92 MGMT_OP_SET_SECURE_CONN,
93 MGMT_OP_SET_DEBUG_KEYS,
96 MGMT_OP_GET_CONN_INFO,
97 MGMT_OP_GET_CLOCK_INFO,
99 MGMT_OP_REMOVE_DEVICE,
100 MGMT_OP_LOAD_CONN_PARAM,
101 MGMT_OP_READ_UNCONF_INDEX_LIST,
102 MGMT_OP_READ_CONFIG_INFO,
103 MGMT_OP_SET_EXTERNAL_CONFIG,
104 MGMT_OP_SET_PUBLIC_ADDRESS,
105 MGMT_OP_START_SERVICE_DISCOVERY,
106 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
107 MGMT_OP_READ_EXT_INDEX_LIST,
108 MGMT_OP_READ_ADV_FEATURES,
109 MGMT_OP_ADD_ADVERTISING,
110 MGMT_OP_REMOVE_ADVERTISING,
111 MGMT_OP_GET_ADV_SIZE_INFO,
112 MGMT_OP_START_LIMITED_DISCOVERY,
113 MGMT_OP_READ_EXT_INFO,
114 MGMT_OP_SET_APPEARANCE,
115 MGMT_OP_GET_PHY_CONFIGURATION,
116 MGMT_OP_SET_PHY_CONFIGURATION,
117 MGMT_OP_SET_BLOCKED_KEYS,
118 MGMT_OP_SET_WIDEBAND_SPEECH,
119 MGMT_OP_READ_CONTROLLER_CAP,
120 MGMT_OP_READ_EXP_FEATURES_INFO,
121 MGMT_OP_SET_EXP_FEATURE,
122 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
123 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
124 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
125 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
126 MGMT_OP_GET_DEVICE_FLAGS,
127 MGMT_OP_SET_DEVICE_FLAGS,
128 MGMT_OP_READ_ADV_MONITOR_FEATURES,
129 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
130 MGMT_OP_REMOVE_ADV_MONITOR,
131 MGMT_OP_ADD_EXT_ADV_PARAMS,
132 MGMT_OP_ADD_EXT_ADV_DATA,
133 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
136 static const u16 mgmt_events[] = {
137 MGMT_EV_CONTROLLER_ERROR,
139 MGMT_EV_INDEX_REMOVED,
140 MGMT_EV_NEW_SETTINGS,
141 MGMT_EV_CLASS_OF_DEV_CHANGED,
142 MGMT_EV_LOCAL_NAME_CHANGED,
143 MGMT_EV_NEW_LINK_KEY,
144 MGMT_EV_NEW_LONG_TERM_KEY,
145 MGMT_EV_DEVICE_CONNECTED,
146 MGMT_EV_DEVICE_DISCONNECTED,
147 MGMT_EV_CONNECT_FAILED,
148 MGMT_EV_PIN_CODE_REQUEST,
149 MGMT_EV_USER_CONFIRM_REQUEST,
150 MGMT_EV_USER_PASSKEY_REQUEST,
152 MGMT_EV_DEVICE_FOUND,
154 MGMT_EV_DEVICE_BLOCKED,
155 MGMT_EV_DEVICE_UNBLOCKED,
156 MGMT_EV_DEVICE_UNPAIRED,
157 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_DEVICE_ADDED,
161 MGMT_EV_DEVICE_REMOVED,
162 MGMT_EV_NEW_CONN_PARAM,
163 MGMT_EV_UNCONF_INDEX_ADDED,
164 MGMT_EV_UNCONF_INDEX_REMOVED,
165 MGMT_EV_NEW_CONFIG_OPTIONS,
166 MGMT_EV_EXT_INDEX_ADDED,
167 MGMT_EV_EXT_INDEX_REMOVED,
168 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
169 MGMT_EV_ADVERTISING_ADDED,
170 MGMT_EV_ADVERTISING_REMOVED,
171 MGMT_EV_EXT_INFO_CHANGED,
172 MGMT_EV_PHY_CONFIGURATION_CHANGED,
173 MGMT_EV_EXP_FEATURE_CHANGED,
174 MGMT_EV_DEVICE_FLAGS_CHANGED,
175 MGMT_EV_ADV_MONITOR_ADDED,
176 MGMT_EV_ADV_MONITOR_REMOVED,
177 MGMT_EV_CONTROLLER_SUSPEND,
178 MGMT_EV_CONTROLLER_RESUME,
181 static const u16 mgmt_untrusted_commands[] = {
182 MGMT_OP_READ_INDEX_LIST,
184 MGMT_OP_READ_UNCONF_INDEX_LIST,
185 MGMT_OP_READ_CONFIG_INFO,
186 MGMT_OP_READ_EXT_INDEX_LIST,
187 MGMT_OP_READ_EXT_INFO,
188 MGMT_OP_READ_CONTROLLER_CAP,
189 MGMT_OP_READ_EXP_FEATURES_INFO,
190 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
194 static const u16 mgmt_untrusted_events[] = {
196 MGMT_EV_INDEX_REMOVED,
197 MGMT_EV_NEW_SETTINGS,
198 MGMT_EV_CLASS_OF_DEV_CHANGED,
199 MGMT_EV_LOCAL_NAME_CHANGED,
200 MGMT_EV_UNCONF_INDEX_ADDED,
201 MGMT_EV_UNCONF_INDEX_REMOVED,
202 MGMT_EV_NEW_CONFIG_OPTIONS,
203 MGMT_EV_EXT_INDEX_ADDED,
204 MGMT_EV_EXT_INDEX_REMOVED,
205 MGMT_EV_EXT_INFO_CHANGED,
206 MGMT_EV_EXP_FEATURE_CHANGED,
209 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 "\x00\x00\x00\x00\x00\x00\x00\x00"
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
217 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
218 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
219 MGMT_STATUS_FAILED, /* Hardware Failure */
220 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
221 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
222 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
223 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
224 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
225 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
226 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
227 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
228 MGMT_STATUS_BUSY, /* Command Disallowed */
229 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
230 MGMT_STATUS_REJECTED, /* Rejected Security */
231 MGMT_STATUS_REJECTED, /* Rejected Personal */
232 MGMT_STATUS_TIMEOUT, /* Host Timeout */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
234 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
235 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
236 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
237 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
238 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
239 MGMT_STATUS_BUSY, /* Repeated Attempts */
240 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
241 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
242 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
243 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
244 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
245 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
246 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
247 MGMT_STATUS_FAILED, /* Unspecified Error */
248 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
249 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
250 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
251 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
252 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
253 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
254 MGMT_STATUS_FAILED, /* Unit Link Key Used */
255 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
256 MGMT_STATUS_TIMEOUT, /* Instant Passed */
257 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
258 MGMT_STATUS_FAILED, /* Transaction Collision */
259 MGMT_STATUS_FAILED, /* Reserved for future use */
260 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
261 MGMT_STATUS_REJECTED, /* QoS Rejected */
262 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
263 MGMT_STATUS_REJECTED, /* Insufficient Security */
264 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
265 MGMT_STATUS_FAILED, /* Reserved for future use */
266 MGMT_STATUS_BUSY, /* Role Switch Pending */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_FAILED, /* Slot Violation */
269 MGMT_STATUS_FAILED, /* Role Switch Failed */
270 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
271 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
272 MGMT_STATUS_BUSY, /* Host Busy Pairing */
273 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
274 MGMT_STATUS_BUSY, /* Controller Busy */
275 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
276 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
277 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
278 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
279 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
282 static u8 mgmt_status(u8 hci_status)
284 if (hci_status < ARRAY_SIZE(mgmt_status_table))
285 return mgmt_status_table[hci_status];
287 return MGMT_STATUS_FAILED;
290 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
293 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
297 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
298 u16 len, int flag, struct sock *skip_sk)
300 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
304 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
305 struct sock *skip_sk)
307 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
308 HCI_SOCK_TRUSTED, skip_sk);
311 static u8 le_addr_type(u8 mgmt_addr_type)
313 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
314 return ADDR_LE_DEV_PUBLIC;
316 return ADDR_LE_DEV_RANDOM;
319 void mgmt_fill_version_info(void *ver)
321 struct mgmt_rp_read_version *rp = ver;
323 rp->version = MGMT_VERSION;
324 rp->revision = cpu_to_le16(MGMT_REVISION);
327 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
330 struct mgmt_rp_read_version rp;
332 bt_dev_dbg(hdev, "sock %p", sk);
334 mgmt_fill_version_info(&rp);
336 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
340 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
343 struct mgmt_rp_read_commands *rp;
344 u16 num_commands, num_events;
348 bt_dev_dbg(hdev, "sock %p", sk);
350 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
351 num_commands = ARRAY_SIZE(mgmt_commands);
352 num_events = ARRAY_SIZE(mgmt_events);
354 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
355 num_events = ARRAY_SIZE(mgmt_untrusted_events);
358 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
360 rp = kmalloc(rp_size, GFP_KERNEL);
364 rp->num_commands = cpu_to_le16(num_commands);
365 rp->num_events = cpu_to_le16(num_events);
367 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
368 __le16 *opcode = rp->opcodes;
370 for (i = 0; i < num_commands; i++, opcode++)
371 put_unaligned_le16(mgmt_commands[i], opcode);
373 for (i = 0; i < num_events; i++, opcode++)
374 put_unaligned_le16(mgmt_events[i], opcode);
376 __le16 *opcode = rp->opcodes;
378 for (i = 0; i < num_commands; i++, opcode++)
379 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
381 for (i = 0; i < num_events; i++, opcode++)
382 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
385 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
392 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
395 struct mgmt_rp_read_index_list *rp;
401 bt_dev_dbg(hdev, "sock %p", sk);
403 read_lock(&hci_dev_list_lock);
406 list_for_each_entry(d, &hci_dev_list, list) {
407 if (d->dev_type == HCI_PRIMARY &&
408 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
412 rp_len = sizeof(*rp) + (2 * count);
413 rp = kmalloc(rp_len, GFP_ATOMIC);
415 read_unlock(&hci_dev_list_lock);
420 list_for_each_entry(d, &hci_dev_list, list) {
421 if (hci_dev_test_flag(d, HCI_SETUP) ||
422 hci_dev_test_flag(d, HCI_CONFIG) ||
423 hci_dev_test_flag(d, HCI_USER_CHANNEL))
426 /* Devices marked as raw-only are neither configured
427 * nor unconfigured controllers.
429 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
432 if (d->dev_type == HCI_PRIMARY &&
433 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
434 rp->index[count++] = cpu_to_le16(d->id);
435 bt_dev_dbg(hdev, "Added hci%u", d->id);
439 rp->num_controllers = cpu_to_le16(count);
440 rp_len = sizeof(*rp) + (2 * count);
442 read_unlock(&hci_dev_list_lock);
444 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
452 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
453 void *data, u16 data_len)
455 struct mgmt_rp_read_unconf_index_list *rp;
461 bt_dev_dbg(hdev, "sock %p", sk);
463 read_lock(&hci_dev_list_lock);
466 list_for_each_entry(d, &hci_dev_list, list) {
467 if (d->dev_type == HCI_PRIMARY &&
468 hci_dev_test_flag(d, HCI_UNCONFIGURED))
472 rp_len = sizeof(*rp) + (2 * count);
473 rp = kmalloc(rp_len, GFP_ATOMIC);
475 read_unlock(&hci_dev_list_lock);
480 list_for_each_entry(d, &hci_dev_list, list) {
481 if (hci_dev_test_flag(d, HCI_SETUP) ||
482 hci_dev_test_flag(d, HCI_CONFIG) ||
483 hci_dev_test_flag(d, HCI_USER_CHANNEL))
486 /* Devices marked as raw-only are neither configured
487 * nor unconfigured controllers.
489 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
492 if (d->dev_type == HCI_PRIMARY &&
493 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
494 rp->index[count++] = cpu_to_le16(d->id);
495 bt_dev_dbg(hdev, "Added hci%u", d->id);
499 rp->num_controllers = cpu_to_le16(count);
500 rp_len = sizeof(*rp) + (2 * count);
502 read_unlock(&hci_dev_list_lock);
504 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
505 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
512 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
513 void *data, u16 data_len)
515 struct mgmt_rp_read_ext_index_list *rp;
520 bt_dev_dbg(hdev, "sock %p", sk);
522 read_lock(&hci_dev_list_lock);
525 list_for_each_entry(d, &hci_dev_list, list) {
526 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
530 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
532 read_unlock(&hci_dev_list_lock);
537 list_for_each_entry(d, &hci_dev_list, list) {
538 if (hci_dev_test_flag(d, HCI_SETUP) ||
539 hci_dev_test_flag(d, HCI_CONFIG) ||
540 hci_dev_test_flag(d, HCI_USER_CHANNEL))
543 /* Devices marked as raw-only are neither configured
544 * nor unconfigured controllers.
546 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
549 if (d->dev_type == HCI_PRIMARY) {
550 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
551 rp->entry[count].type = 0x01;
553 rp->entry[count].type = 0x00;
554 } else if (d->dev_type == HCI_AMP) {
555 rp->entry[count].type = 0x02;
560 rp->entry[count].bus = d->bus;
561 rp->entry[count++].index = cpu_to_le16(d->id);
562 bt_dev_dbg(hdev, "Added hci%u", d->id);
565 rp->num_controllers = cpu_to_le16(count);
567 read_unlock(&hci_dev_list_lock);
569 /* If this command is called at least once, then all the
570 * default index and unconfigured index events are disabled
571 * and from now on only extended index events are used.
573 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
574 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
575 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
577 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
578 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
579 struct_size(rp, entry, count));
586 static bool is_configured(struct hci_dev *hdev)
588 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
589 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
592 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
593 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
594 !bacmp(&hdev->public_addr, BDADDR_ANY))
600 static __le32 get_missing_options(struct hci_dev *hdev)
604 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
605 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
606 options |= MGMT_OPTION_EXTERNAL_CONFIG;
608 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
609 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
610 !bacmp(&hdev->public_addr, BDADDR_ANY))
611 options |= MGMT_OPTION_PUBLIC_ADDRESS;
613 return cpu_to_le32(options);
616 static int new_options(struct hci_dev *hdev, struct sock *skip)
618 __le32 options = get_missing_options(hdev);
620 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
621 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
624 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
626 __le32 options = get_missing_options(hdev);
628 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
632 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
633 void *data, u16 data_len)
635 struct mgmt_rp_read_config_info rp;
638 bt_dev_dbg(hdev, "sock %p", sk);
642 memset(&rp, 0, sizeof(rp));
643 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
645 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
646 options |= MGMT_OPTION_EXTERNAL_CONFIG;
648 if (hdev->set_bdaddr)
649 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651 rp.supported_options = cpu_to_le32(options);
652 rp.missing_options = get_missing_options(hdev);
654 hci_dev_unlock(hdev);
656 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
660 static u32 get_supported_phys(struct hci_dev *hdev)
662 u32 supported_phys = 0;
664 if (lmp_bredr_capable(hdev)) {
665 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
667 if (hdev->features[0][0] & LMP_3SLOT)
668 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
670 if (hdev->features[0][0] & LMP_5SLOT)
671 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
673 if (lmp_edr_2m_capable(hdev)) {
674 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
676 if (lmp_edr_3slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
679 if (lmp_edr_5slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
682 if (lmp_edr_3m_capable(hdev)) {
683 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
685 if (lmp_edr_3slot_capable(hdev))
686 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
688 if (lmp_edr_5slot_capable(hdev))
689 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
694 if (lmp_le_capable(hdev)) {
695 supported_phys |= MGMT_PHY_LE_1M_TX;
696 supported_phys |= MGMT_PHY_LE_1M_RX;
698 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
699 supported_phys |= MGMT_PHY_LE_2M_TX;
700 supported_phys |= MGMT_PHY_LE_2M_RX;
703 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
704 supported_phys |= MGMT_PHY_LE_CODED_TX;
705 supported_phys |= MGMT_PHY_LE_CODED_RX;
709 return supported_phys;
712 static u32 get_selected_phys(struct hci_dev *hdev)
714 u32 selected_phys = 0;
716 if (lmp_bredr_capable(hdev)) {
717 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
719 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
720 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
722 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
723 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
725 if (lmp_edr_2m_capable(hdev)) {
726 if (!(hdev->pkt_type & HCI_2DH1))
727 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
729 if (lmp_edr_3slot_capable(hdev) &&
730 !(hdev->pkt_type & HCI_2DH3))
731 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
733 if (lmp_edr_5slot_capable(hdev) &&
734 !(hdev->pkt_type & HCI_2DH5))
735 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
737 if (lmp_edr_3m_capable(hdev)) {
738 if (!(hdev->pkt_type & HCI_3DH1))
739 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
741 if (lmp_edr_3slot_capable(hdev) &&
742 !(hdev->pkt_type & HCI_3DH3))
743 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
745 if (lmp_edr_5slot_capable(hdev) &&
746 !(hdev->pkt_type & HCI_3DH5))
747 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
752 if (lmp_le_capable(hdev)) {
753 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
754 selected_phys |= MGMT_PHY_LE_1M_TX;
756 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
757 selected_phys |= MGMT_PHY_LE_1M_RX;
759 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
760 selected_phys |= MGMT_PHY_LE_2M_TX;
762 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
763 selected_phys |= MGMT_PHY_LE_2M_RX;
765 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
766 selected_phys |= MGMT_PHY_LE_CODED_TX;
768 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
769 selected_phys |= MGMT_PHY_LE_CODED_RX;
772 return selected_phys;
775 static u32 get_configurable_phys(struct hci_dev *hdev)
777 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
778 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
781 static u32 get_supported_settings(struct hci_dev *hdev)
785 settings |= MGMT_SETTING_POWERED;
786 settings |= MGMT_SETTING_BONDABLE;
787 settings |= MGMT_SETTING_DEBUG_KEYS;
788 settings |= MGMT_SETTING_CONNECTABLE;
789 settings |= MGMT_SETTING_DISCOVERABLE;
791 if (lmp_bredr_capable(hdev)) {
792 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
793 settings |= MGMT_SETTING_FAST_CONNECTABLE;
794 settings |= MGMT_SETTING_BREDR;
795 settings |= MGMT_SETTING_LINK_SECURITY;
797 if (lmp_ssp_capable(hdev)) {
798 settings |= MGMT_SETTING_SSP;
799 if (IS_ENABLED(CONFIG_BT_HS))
800 settings |= MGMT_SETTING_HS;
803 if (lmp_sc_capable(hdev))
804 settings |= MGMT_SETTING_SECURE_CONN;
806 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
808 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
811 if (lmp_le_capable(hdev)) {
812 settings |= MGMT_SETTING_LE;
813 settings |= MGMT_SETTING_SECURE_CONN;
814 settings |= MGMT_SETTING_PRIVACY;
815 settings |= MGMT_SETTING_STATIC_ADDRESS;
817 /* When the experimental feature for LL Privacy support is
818 * enabled, then advertising is no longer supported.
820 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
821 settings |= MGMT_SETTING_ADVERTISING;
824 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
826 settings |= MGMT_SETTING_CONFIGURATION;
828 settings |= MGMT_SETTING_PHY_CONFIGURATION;
833 static u32 get_current_settings(struct hci_dev *hdev)
837 if (hdev_is_powered(hdev))
838 settings |= MGMT_SETTING_POWERED;
840 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
841 settings |= MGMT_SETTING_CONNECTABLE;
843 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
844 settings |= MGMT_SETTING_FAST_CONNECTABLE;
846 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
847 settings |= MGMT_SETTING_DISCOVERABLE;
849 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
850 settings |= MGMT_SETTING_BONDABLE;
852 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
853 settings |= MGMT_SETTING_BREDR;
855 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
856 settings |= MGMT_SETTING_LE;
858 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
859 settings |= MGMT_SETTING_LINK_SECURITY;
861 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
862 settings |= MGMT_SETTING_SSP;
864 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
865 settings |= MGMT_SETTING_HS;
867 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
868 settings |= MGMT_SETTING_ADVERTISING;
870 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
871 settings |= MGMT_SETTING_SECURE_CONN;
873 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
874 settings |= MGMT_SETTING_DEBUG_KEYS;
876 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
877 settings |= MGMT_SETTING_PRIVACY;
879 /* The current setting for static address has two purposes. The
880 * first is to indicate if the static address will be used and
881 * the second is to indicate if it is actually set.
883 * This means if the static address is not configured, this flag
884 * will never be set. If the address is configured, then if the
885 * address is actually used decides if the flag is set or not.
887 * For single mode LE only controllers and dual-mode controllers
888 * with BR/EDR disabled, the existence of the static address will
891 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
892 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
893 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
894 if (bacmp(&hdev->static_addr, BDADDR_ANY))
895 settings |= MGMT_SETTING_STATIC_ADDRESS;
898 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
899 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
904 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
906 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
909 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
910 struct hci_dev *hdev,
913 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
916 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
918 struct mgmt_pending_cmd *cmd;
920 /* If there's a pending mgmt command the flags will not yet have
921 * their final values, so check for this first.
923 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
925 struct mgmt_mode *cp = cmd->param;
927 return LE_AD_GENERAL;
928 else if (cp->val == 0x02)
929 return LE_AD_LIMITED;
931 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
932 return LE_AD_LIMITED;
933 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
934 return LE_AD_GENERAL;
940 bool mgmt_get_connectable(struct hci_dev *hdev)
942 struct mgmt_pending_cmd *cmd;
944 /* If there's a pending mgmt command the flag will not yet have
945 * it's final value, so check for this first.
947 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
949 struct mgmt_mode *cp = cmd->param;
954 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
957 static void service_cache_off(struct work_struct *work)
959 struct hci_dev *hdev = container_of(work, struct hci_dev,
961 struct hci_request req;
963 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
966 hci_req_init(&req, hdev);
970 __hci_req_update_eir(&req);
971 __hci_req_update_class(&req);
973 hci_dev_unlock(hdev);
975 hci_req_run(&req, NULL);
978 static void rpa_expired(struct work_struct *work)
980 struct hci_dev *hdev = container_of(work, struct hci_dev,
982 struct hci_request req;
984 bt_dev_dbg(hdev, "");
986 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
988 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
991 /* The generation of a new RPA and programming it into the
992 * controller happens in the hci_req_enable_advertising()
995 hci_req_init(&req, hdev);
996 if (ext_adv_capable(hdev))
997 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
999 __hci_req_enable_advertising(&req);
1000 hci_req_run(&req, NULL);
1003 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1005 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1008 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1009 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1011 /* Non-mgmt controlled devices get this bit set
1012 * implicitly so that pairing works for them, however
1013 * for mgmt we require user-space to explicitly enable
1016 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1019 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1020 void *data, u16 data_len)
1022 struct mgmt_rp_read_info rp;
1024 bt_dev_dbg(hdev, "sock %p", sk);
1028 memset(&rp, 0, sizeof(rp));
1030 bacpy(&rp.bdaddr, &hdev->bdaddr);
1032 rp.version = hdev->hci_ver;
1033 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1035 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1036 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1038 memcpy(rp.dev_class, hdev->dev_class, 3);
1040 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1041 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1043 hci_dev_unlock(hdev);
1045 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1049 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1054 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1055 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1056 hdev->dev_class, 3);
1058 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1059 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1062 name_len = strlen(hdev->dev_name);
1063 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1064 hdev->dev_name, name_len);
1066 name_len = strlen(hdev->short_name);
1067 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1068 hdev->short_name, name_len);
1073 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1074 void *data, u16 data_len)
1077 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1080 bt_dev_dbg(hdev, "sock %p", sk);
1082 memset(&buf, 0, sizeof(buf));
1086 bacpy(&rp->bdaddr, &hdev->bdaddr);
1088 rp->version = hdev->hci_ver;
1089 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1091 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1092 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1095 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1096 rp->eir_len = cpu_to_le16(eir_len);
1098 hci_dev_unlock(hdev);
1100 /* If this command is called at least once, then the events
1101 * for class of device and local name changes are disabled
1102 * and only the new extended controller information event
1105 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1106 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1107 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1109 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1110 sizeof(*rp) + eir_len);
1113 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1116 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1119 memset(buf, 0, sizeof(buf));
1121 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1122 ev->eir_len = cpu_to_le16(eir_len);
1124 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1125 sizeof(*ev) + eir_len,
1126 HCI_MGMT_EXT_INFO_EVENTS, skip);
1129 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1131 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1133 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1137 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1139 bt_dev_dbg(hdev, "status 0x%02x", status);
1141 if (hci_conn_count(hdev) == 0) {
1142 cancel_delayed_work(&hdev->power_off);
1143 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1147 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1149 struct mgmt_ev_advertising_added ev;
1151 ev.instance = instance;
1153 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1156 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1159 struct mgmt_ev_advertising_removed ev;
1161 ev.instance = instance;
1163 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1166 static void cancel_adv_timeout(struct hci_dev *hdev)
1168 if (hdev->adv_instance_timeout) {
1169 hdev->adv_instance_timeout = 0;
1170 cancel_delayed_work(&hdev->adv_instance_expire);
1174 static int clean_up_hci_state(struct hci_dev *hdev)
1176 struct hci_request req;
1177 struct hci_conn *conn;
1178 bool discov_stopped;
1181 hci_req_init(&req, hdev);
1183 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1184 test_bit(HCI_PSCAN, &hdev->flags)) {
1186 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1189 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1191 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1192 __hci_req_disable_advertising(&req);
1194 discov_stopped = hci_req_stop_discovery(&req);
1196 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1197 /* 0x15 == Terminated due to Power Off */
1198 __hci_abort_conn(&req, conn, 0x15);
1201 err = hci_req_run(&req, clean_up_hci_complete);
1202 if (!err && discov_stopped)
1203 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1208 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1211 struct mgmt_mode *cp = data;
1212 struct mgmt_pending_cmd *cmd;
1215 bt_dev_dbg(hdev, "sock %p", sk);
1217 if (cp->val != 0x00 && cp->val != 0x01)
1218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1219 MGMT_STATUS_INVALID_PARAMS);
1223 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1224 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1229 if (!!cp->val == hdev_is_powered(hdev)) {
1230 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1234 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1241 queue_work(hdev->req_workqueue, &hdev->power_on);
1244 /* Disconnect connections, stop scans, etc */
1245 err = clean_up_hci_state(hdev);
1247 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1248 HCI_POWER_OFF_TIMEOUT);
1250 /* ENODATA means there were no HCI commands queued */
1251 if (err == -ENODATA) {
1252 cancel_delayed_work(&hdev->power_off);
1253 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1259 hci_dev_unlock(hdev);
1263 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1265 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1267 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1268 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1271 int mgmt_new_settings(struct hci_dev *hdev)
1273 return new_settings(hdev, NULL);
1278 struct hci_dev *hdev;
1282 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1284 struct cmd_lookup *match = data;
1286 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1288 list_del(&cmd->list);
1290 if (match->sk == NULL) {
1291 match->sk = cmd->sk;
1292 sock_hold(match->sk);
1295 mgmt_pending_free(cmd);
1298 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1302 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1303 mgmt_pending_remove(cmd);
1306 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1308 if (cmd->cmd_complete) {
1311 cmd->cmd_complete(cmd, *status);
1312 mgmt_pending_remove(cmd);
1317 cmd_status_rsp(cmd, data);
1320 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1322 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1323 cmd->param, cmd->param_len);
1326 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1328 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1329 cmd->param, sizeof(struct mgmt_addr_info));
1332 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1334 if (!lmp_bredr_capable(hdev))
1335 return MGMT_STATUS_NOT_SUPPORTED;
1336 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1337 return MGMT_STATUS_REJECTED;
1339 return MGMT_STATUS_SUCCESS;
1342 static u8 mgmt_le_support(struct hci_dev *hdev)
1344 if (!lmp_le_capable(hdev))
1345 return MGMT_STATUS_NOT_SUPPORTED;
1346 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1347 return MGMT_STATUS_REJECTED;
1349 return MGMT_STATUS_SUCCESS;
1352 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1354 struct mgmt_pending_cmd *cmd;
1356 bt_dev_dbg(hdev, "status 0x%02x", status);
1360 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1365 u8 mgmt_err = mgmt_status(status);
1366 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1367 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1371 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1372 hdev->discov_timeout > 0) {
1373 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1374 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1377 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1378 new_settings(hdev, cmd->sk);
1381 mgmt_pending_remove(cmd);
1384 hci_dev_unlock(hdev);
1387 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1390 struct mgmt_cp_set_discoverable *cp = data;
1391 struct mgmt_pending_cmd *cmd;
1395 bt_dev_dbg(hdev, "sock %p", sk);
1397 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1398 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1400 MGMT_STATUS_REJECTED);
1402 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1404 MGMT_STATUS_INVALID_PARAMS);
1406 timeout = __le16_to_cpu(cp->timeout);
1408 /* Disabling discoverable requires that no timeout is set,
1409 * and enabling limited discoverable requires a timeout.
1411 if ((cp->val == 0x00 && timeout > 0) ||
1412 (cp->val == 0x02 && timeout == 0))
1413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1414 MGMT_STATUS_INVALID_PARAMS);
1418 if (!hdev_is_powered(hdev) && timeout > 0) {
1419 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1420 MGMT_STATUS_NOT_POWERED);
1424 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1425 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1431 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1432 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1433 MGMT_STATUS_REJECTED);
1437 if (hdev->advertising_paused) {
1438 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1443 if (!hdev_is_powered(hdev)) {
1444 bool changed = false;
1446 /* Setting limited discoverable when powered off is
1447 * not a valid operation since it requires a timeout
1448 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1450 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1451 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1455 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1460 err = new_settings(hdev, sk);
1465 /* If the current mode is the same, then just update the timeout
1466 * value with the new value. And if only the timeout gets updated,
1467 * then no need for any HCI transactions.
1469 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1470 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1471 HCI_LIMITED_DISCOVERABLE)) {
1472 cancel_delayed_work(&hdev->discov_off);
1473 hdev->discov_timeout = timeout;
1475 if (cp->val && hdev->discov_timeout > 0) {
1476 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1477 queue_delayed_work(hdev->req_workqueue,
1478 &hdev->discov_off, to);
1481 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1485 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1491 /* Cancel any potential discoverable timeout that might be
1492 * still active and store new timeout value. The arming of
1493 * the timeout happens in the complete handler.
1495 cancel_delayed_work(&hdev->discov_off);
1496 hdev->discov_timeout = timeout;
1499 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1501 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1503 /* Limited discoverable mode */
1504 if (cp->val == 0x02)
1505 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1507 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1509 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1513 hci_dev_unlock(hdev);
1517 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1519 struct mgmt_pending_cmd *cmd;
1521 bt_dev_dbg(hdev, "status 0x%02x", status);
1525 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1530 u8 mgmt_err = mgmt_status(status);
1531 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1535 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1536 new_settings(hdev, cmd->sk);
1539 mgmt_pending_remove(cmd);
1542 hci_dev_unlock(hdev);
1545 static int set_connectable_update_settings(struct hci_dev *hdev,
1546 struct sock *sk, u8 val)
1548 bool changed = false;
1551 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1555 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1557 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1558 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1561 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1566 hci_req_update_scan(hdev);
1567 hci_update_background_scan(hdev);
1568 return new_settings(hdev, sk);
1574 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1577 struct mgmt_mode *cp = data;
1578 struct mgmt_pending_cmd *cmd;
1581 bt_dev_dbg(hdev, "sock %p", sk);
1583 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1584 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1585 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1586 MGMT_STATUS_REJECTED);
1588 if (cp->val != 0x00 && cp->val != 0x01)
1589 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1590 MGMT_STATUS_INVALID_PARAMS);
1594 if (!hdev_is_powered(hdev)) {
1595 err = set_connectable_update_settings(hdev, sk, cp->val);
1599 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1600 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1601 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1606 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1613 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1615 if (hdev->discov_timeout > 0)
1616 cancel_delayed_work(&hdev->discov_off);
1618 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1619 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1620 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1623 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1627 hci_dev_unlock(hdev);
1631 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1634 struct mgmt_mode *cp = data;
1638 bt_dev_dbg(hdev, "sock %p", sk);
1640 if (cp->val != 0x00 && cp->val != 0x01)
1641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1642 MGMT_STATUS_INVALID_PARAMS);
1647 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1649 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1651 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1656 /* In limited privacy mode the change of bondable mode
1657 * may affect the local advertising address.
1659 if (hdev_is_powered(hdev) &&
1660 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1661 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1662 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1663 queue_work(hdev->req_workqueue,
1664 &hdev->discoverable_update);
1666 err = new_settings(hdev, sk);
1670 hci_dev_unlock(hdev);
1674 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1677 struct mgmt_mode *cp = data;
1678 struct mgmt_pending_cmd *cmd;
1682 bt_dev_dbg(hdev, "sock %p", sk);
1684 status = mgmt_bredr_support(hdev);
1686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1689 if (cp->val != 0x00 && cp->val != 0x01)
1690 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1691 MGMT_STATUS_INVALID_PARAMS);
1695 if (!hdev_is_powered(hdev)) {
1696 bool changed = false;
1698 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1699 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1703 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1708 err = new_settings(hdev, sk);
1713 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1721 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1722 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1726 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1732 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1734 mgmt_pending_remove(cmd);
1739 hci_dev_unlock(hdev);
1743 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1745 struct mgmt_mode *cp = data;
1746 struct mgmt_pending_cmd *cmd;
1750 bt_dev_dbg(hdev, "sock %p", sk);
1752 status = mgmt_bredr_support(hdev);
1754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1756 if (!lmp_ssp_capable(hdev))
1757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1758 MGMT_STATUS_NOT_SUPPORTED);
1760 if (cp->val != 0x00 && cp->val != 0x01)
1761 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1762 MGMT_STATUS_INVALID_PARAMS);
1766 if (!hdev_is_powered(hdev)) {
1770 changed = !hci_dev_test_and_set_flag(hdev,
1773 changed = hci_dev_test_and_clear_flag(hdev,
1776 changed = hci_dev_test_and_clear_flag(hdev,
1779 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1782 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1787 err = new_settings(hdev, sk);
1792 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1793 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1798 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1799 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1803 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1809 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1810 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1811 sizeof(cp->val), &cp->val);
1813 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1815 mgmt_pending_remove(cmd);
1820 hci_dev_unlock(hdev);
1824 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1826 struct mgmt_mode *cp = data;
1831 bt_dev_dbg(hdev, "sock %p", sk);
1833 if (!IS_ENABLED(CONFIG_BT_HS))
1834 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1835 MGMT_STATUS_NOT_SUPPORTED);
1837 status = mgmt_bredr_support(hdev);
1839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1841 if (!lmp_ssp_capable(hdev))
1842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1843 MGMT_STATUS_NOT_SUPPORTED);
1845 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1847 MGMT_STATUS_REJECTED);
1849 if (cp->val != 0x00 && cp->val != 0x01)
1850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1851 MGMT_STATUS_INVALID_PARAMS);
1855 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1862 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1864 if (hdev_is_powered(hdev)) {
1865 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1866 MGMT_STATUS_REJECTED);
1870 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1873 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1878 err = new_settings(hdev, sk);
1881 hci_dev_unlock(hdev);
1885 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1887 struct cmd_lookup match = { NULL, hdev };
1892 u8 mgmt_err = mgmt_status(status);
1894 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1899 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1901 new_settings(hdev, match.sk);
1906 /* Make sure the controller has a good default for
1907 * advertising data. Restrict the update to when LE
1908 * has actually been enabled. During power on, the
1909 * update in powered_update_hci will take care of it.
1911 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1912 struct hci_request req;
1913 hci_req_init(&req, hdev);
1914 if (ext_adv_capable(hdev)) {
1917 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1919 __hci_req_update_scan_rsp_data(&req, 0x00);
1921 __hci_req_update_adv_data(&req, 0x00);
1922 __hci_req_update_scan_rsp_data(&req, 0x00);
1924 hci_req_run(&req, NULL);
1925 hci_update_background_scan(hdev);
1929 hci_dev_unlock(hdev);
1932 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1934 struct mgmt_mode *cp = data;
1935 struct hci_cp_write_le_host_supported hci_cp;
1936 struct mgmt_pending_cmd *cmd;
1937 struct hci_request req;
1941 bt_dev_dbg(hdev, "sock %p", sk);
1943 if (!lmp_le_capable(hdev))
1944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1945 MGMT_STATUS_NOT_SUPPORTED);
1947 if (cp->val != 0x00 && cp->val != 0x01)
1948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1949 MGMT_STATUS_INVALID_PARAMS);
1951 /* Bluetooth single mode LE only controllers or dual-mode
1952 * controllers configured as LE only devices, do not allow
1953 * switching LE off. These have either LE enabled explicitly
1954 * or BR/EDR has been previously switched off.
1956 * When trying to enable an already enabled LE, then gracefully
1957 * send a positive response. Trying to disable it however will
1958 * result into rejection.
1960 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1961 if (cp->val == 0x01)
1962 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1965 MGMT_STATUS_REJECTED);
1971 enabled = lmp_host_le_capable(hdev);
1974 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1976 if (!hdev_is_powered(hdev) || val == enabled) {
1977 bool changed = false;
1979 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1980 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1984 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1985 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1989 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1994 err = new_settings(hdev, sk);
1999 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2000 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2006 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2012 hci_req_init(&req, hdev);
2014 memset(&hci_cp, 0, sizeof(hci_cp));
2018 hci_cp.simul = 0x00;
2020 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2021 __hci_req_disable_advertising(&req);
2023 if (ext_adv_capable(hdev))
2024 __hci_req_clear_ext_adv_sets(&req);
2027 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2030 err = hci_req_run(&req, le_enable_complete);
2032 mgmt_pending_remove(cmd);
2035 hci_dev_unlock(hdev);
2039 /* This is a helper function to test for pending mgmt commands that can
2040 * cause CoD or EIR HCI commands. We can only allow one such pending
2041 * mgmt command at a time since otherwise we cannot easily track what
2042 * the current values are, will be, and based on that calculate if a new
2043 * HCI command needs to be sent and if yes with what value.
2045 static bool pending_eir_or_class(struct hci_dev *hdev)
2047 struct mgmt_pending_cmd *cmd;
2049 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2050 switch (cmd->opcode) {
2051 case MGMT_OP_ADD_UUID:
2052 case MGMT_OP_REMOVE_UUID:
2053 case MGMT_OP_SET_DEV_CLASS:
2054 case MGMT_OP_SET_POWERED:
2062 static const u8 bluetooth_base_uuid[] = {
2063 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2064 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2067 static u8 get_uuid_size(const u8 *uuid)
2071 if (memcmp(uuid, bluetooth_base_uuid, 12))
2074 val = get_unaligned_le32(&uuid[12]);
2081 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2083 struct mgmt_pending_cmd *cmd;
2087 cmd = pending_find(mgmt_op, hdev);
2091 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2092 mgmt_status(status), hdev->dev_class, 3);
2094 mgmt_pending_remove(cmd);
2097 hci_dev_unlock(hdev);
2100 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2102 bt_dev_dbg(hdev, "status 0x%02x", status);
2104 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2107 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2109 struct mgmt_cp_add_uuid *cp = data;
2110 struct mgmt_pending_cmd *cmd;
2111 struct hci_request req;
2112 struct bt_uuid *uuid;
2115 bt_dev_dbg(hdev, "sock %p", sk);
2119 if (pending_eir_or_class(hdev)) {
2120 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2125 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2131 memcpy(uuid->uuid, cp->uuid, 16);
2132 uuid->svc_hint = cp->svc_hint;
2133 uuid->size = get_uuid_size(cp->uuid);
2135 list_add_tail(&uuid->list, &hdev->uuids);
2137 hci_req_init(&req, hdev);
2139 __hci_req_update_class(&req);
2140 __hci_req_update_eir(&req);
2142 err = hci_req_run(&req, add_uuid_complete);
2144 if (err != -ENODATA)
2147 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2148 hdev->dev_class, 3);
2152 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2161 hci_dev_unlock(hdev);
2165 static bool enable_service_cache(struct hci_dev *hdev)
2167 if (!hdev_is_powered(hdev))
2170 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2171 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2179 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2181 bt_dev_dbg(hdev, "status 0x%02x", status);
2183 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2186 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2189 struct mgmt_cp_remove_uuid *cp = data;
2190 struct mgmt_pending_cmd *cmd;
2191 struct bt_uuid *match, *tmp;
2192 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2193 struct hci_request req;
2196 bt_dev_dbg(hdev, "sock %p", sk);
2200 if (pending_eir_or_class(hdev)) {
2201 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2206 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2207 hci_uuids_clear(hdev);
2209 if (enable_service_cache(hdev)) {
2210 err = mgmt_cmd_complete(sk, hdev->id,
2211 MGMT_OP_REMOVE_UUID,
2212 0, hdev->dev_class, 3);
2221 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2222 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2225 list_del(&match->list);
2231 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2232 MGMT_STATUS_INVALID_PARAMS);
2237 hci_req_init(&req, hdev);
2239 __hci_req_update_class(&req);
2240 __hci_req_update_eir(&req);
2242 err = hci_req_run(&req, remove_uuid_complete);
2244 if (err != -ENODATA)
2247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2248 hdev->dev_class, 3);
2252 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2261 hci_dev_unlock(hdev);
2265 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2267 bt_dev_dbg(hdev, "status 0x%02x", status);
2269 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2272 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2275 struct mgmt_cp_set_dev_class *cp = data;
2276 struct mgmt_pending_cmd *cmd;
2277 struct hci_request req;
2280 bt_dev_dbg(hdev, "sock %p", sk);
2282 if (!lmp_bredr_capable(hdev))
2283 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2284 MGMT_STATUS_NOT_SUPPORTED);
2288 if (pending_eir_or_class(hdev)) {
2289 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2294 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2295 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2296 MGMT_STATUS_INVALID_PARAMS);
2300 hdev->major_class = cp->major;
2301 hdev->minor_class = cp->minor;
2303 if (!hdev_is_powered(hdev)) {
2304 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2305 hdev->dev_class, 3);
2309 hci_req_init(&req, hdev);
2311 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2312 hci_dev_unlock(hdev);
2313 cancel_delayed_work_sync(&hdev->service_cache);
2315 __hci_req_update_eir(&req);
2318 __hci_req_update_class(&req);
2320 err = hci_req_run(&req, set_class_complete);
2322 if (err != -ENODATA)
2325 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2326 hdev->dev_class, 3);
2330 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2339 hci_dev_unlock(hdev);
2343 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2346 struct mgmt_cp_load_link_keys *cp = data;
2347 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2348 sizeof(struct mgmt_link_key_info));
2349 u16 key_count, expected_len;
2353 bt_dev_dbg(hdev, "sock %p", sk);
2355 if (!lmp_bredr_capable(hdev))
2356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2357 MGMT_STATUS_NOT_SUPPORTED);
2359 key_count = __le16_to_cpu(cp->key_count);
2360 if (key_count > max_key_count) {
2361 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2364 MGMT_STATUS_INVALID_PARAMS);
2367 expected_len = struct_size(cp, keys, key_count);
2368 if (expected_len != len) {
2369 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2372 MGMT_STATUS_INVALID_PARAMS);
2375 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2376 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2377 MGMT_STATUS_INVALID_PARAMS);
2379 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2382 for (i = 0; i < key_count; i++) {
2383 struct mgmt_link_key_info *key = &cp->keys[i];
2385 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2386 return mgmt_cmd_status(sk, hdev->id,
2387 MGMT_OP_LOAD_LINK_KEYS,
2388 MGMT_STATUS_INVALID_PARAMS);
2393 hci_link_keys_clear(hdev);
2396 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2398 changed = hci_dev_test_and_clear_flag(hdev,
2399 HCI_KEEP_DEBUG_KEYS);
2402 new_settings(hdev, NULL);
2404 for (i = 0; i < key_count; i++) {
2405 struct mgmt_link_key_info *key = &cp->keys[i];
2407 if (hci_is_blocked_key(hdev,
2408 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2410 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2415 /* Always ignore debug keys and require a new pairing if
2416 * the user wants to use them.
2418 if (key->type == HCI_LK_DEBUG_COMBINATION)
2421 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2422 key->type, key->pin_len, NULL);
2425 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2427 hci_dev_unlock(hdev);
2432 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2433 u8 addr_type, struct sock *skip_sk)
2435 struct mgmt_ev_device_unpaired ev;
2437 bacpy(&ev.addr.bdaddr, bdaddr);
2438 ev.addr.type = addr_type;
2440 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2444 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2447 struct mgmt_cp_unpair_device *cp = data;
2448 struct mgmt_rp_unpair_device rp;
2449 struct hci_conn_params *params;
2450 struct mgmt_pending_cmd *cmd;
2451 struct hci_conn *conn;
2455 memset(&rp, 0, sizeof(rp));
2456 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2457 rp.addr.type = cp->addr.type;
2459 if (!bdaddr_type_is_valid(cp->addr.type))
2460 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2461 MGMT_STATUS_INVALID_PARAMS,
2464 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2465 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2466 MGMT_STATUS_INVALID_PARAMS,
2471 if (!hdev_is_powered(hdev)) {
2472 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2473 MGMT_STATUS_NOT_POWERED, &rp,
2478 if (cp->addr.type == BDADDR_BREDR) {
2479 /* If disconnection is requested, then look up the
2480 * connection. If the remote device is connected, it
2481 * will be later used to terminate the link.
2483 * Setting it to NULL explicitly will cause no
2484 * termination of the link.
2487 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2492 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2494 err = mgmt_cmd_complete(sk, hdev->id,
2495 MGMT_OP_UNPAIR_DEVICE,
2496 MGMT_STATUS_NOT_PAIRED, &rp,
2504 /* LE address type */
2505 addr_type = le_addr_type(cp->addr.type);
2507 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2508 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2510 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2511 MGMT_STATUS_NOT_PAIRED, &rp,
2516 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2518 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2523 /* Defer clearing up the connection parameters until closing to
2524 * give a chance of keeping them if a repairing happens.
2526 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2528 /* Disable auto-connection parameters if present */
2529 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2531 if (params->explicit_connect)
2532 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2534 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2537 /* If disconnection is not requested, then clear the connection
2538 * variable so that the link is not terminated.
2540 if (!cp->disconnect)
2544 /* If the connection variable is set, then termination of the
2545 * link is requested.
2548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2550 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2554 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2561 cmd->cmd_complete = addr_cmd_complete;
2563 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2565 mgmt_pending_remove(cmd);
2568 hci_dev_unlock(hdev);
2572 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2575 struct mgmt_cp_disconnect *cp = data;
2576 struct mgmt_rp_disconnect rp;
2577 struct mgmt_pending_cmd *cmd;
2578 struct hci_conn *conn;
2581 bt_dev_dbg(hdev, "sock %p", sk);
2583 memset(&rp, 0, sizeof(rp));
2584 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2585 rp.addr.type = cp->addr.type;
2587 if (!bdaddr_type_is_valid(cp->addr.type))
2588 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2589 MGMT_STATUS_INVALID_PARAMS,
2594 if (!test_bit(HCI_UP, &hdev->flags)) {
2595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2596 MGMT_STATUS_NOT_POWERED, &rp,
2601 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2602 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2603 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2607 if (cp->addr.type == BDADDR_BREDR)
2608 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2611 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2612 le_addr_type(cp->addr.type));
2614 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2615 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2616 MGMT_STATUS_NOT_CONNECTED, &rp,
2621 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2627 cmd->cmd_complete = generic_cmd_complete;
2629 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2631 mgmt_pending_remove(cmd);
2634 hci_dev_unlock(hdev);
2638 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2640 switch (link_type) {
2642 switch (addr_type) {
2643 case ADDR_LE_DEV_PUBLIC:
2644 return BDADDR_LE_PUBLIC;
2647 /* Fallback to LE Random address type */
2648 return BDADDR_LE_RANDOM;
2652 /* Fallback to BR/EDR type */
2653 return BDADDR_BREDR;
2657 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2660 struct mgmt_rp_get_connections *rp;
2665 bt_dev_dbg(hdev, "sock %p", sk);
2669 if (!hdev_is_powered(hdev)) {
2670 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2671 MGMT_STATUS_NOT_POWERED);
2676 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2677 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2681 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2688 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2689 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2691 bacpy(&rp->addr[i].bdaddr, &c->dst);
2692 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2693 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2698 rp->conn_count = cpu_to_le16(i);
2700 /* Recalculate length in case of filtered SCO connections, etc */
2701 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2702 struct_size(rp, addr, i));
2707 hci_dev_unlock(hdev);
2711 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2712 struct mgmt_cp_pin_code_neg_reply *cp)
2714 struct mgmt_pending_cmd *cmd;
2717 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2722 cmd->cmd_complete = addr_cmd_complete;
2724 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2725 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2727 mgmt_pending_remove(cmd);
2732 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2735 struct hci_conn *conn;
2736 struct mgmt_cp_pin_code_reply *cp = data;
2737 struct hci_cp_pin_code_reply reply;
2738 struct mgmt_pending_cmd *cmd;
2741 bt_dev_dbg(hdev, "sock %p", sk);
2745 if (!hdev_is_powered(hdev)) {
2746 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2747 MGMT_STATUS_NOT_POWERED);
2751 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2753 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2754 MGMT_STATUS_NOT_CONNECTED);
2758 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2759 struct mgmt_cp_pin_code_neg_reply ncp;
2761 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2763 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2765 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2767 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2768 MGMT_STATUS_INVALID_PARAMS);
2773 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2779 cmd->cmd_complete = addr_cmd_complete;
2781 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2782 reply.pin_len = cp->pin_len;
2783 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2785 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2787 mgmt_pending_remove(cmd);
2790 hci_dev_unlock(hdev);
2794 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2797 struct mgmt_cp_set_io_capability *cp = data;
2799 bt_dev_dbg(hdev, "sock %p", sk);
2801 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2803 MGMT_STATUS_INVALID_PARAMS);
2807 hdev->io_capability = cp->io_capability;
2809 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2811 hci_dev_unlock(hdev);
2813 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2817 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2819 struct hci_dev *hdev = conn->hdev;
2820 struct mgmt_pending_cmd *cmd;
2822 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2823 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2826 if (cmd->user_data != conn)
2835 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2837 struct mgmt_rp_pair_device rp;
2838 struct hci_conn *conn = cmd->user_data;
2841 bacpy(&rp.addr.bdaddr, &conn->dst);
2842 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2844 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2845 status, &rp, sizeof(rp));
2847 /* So we don't get further callbacks for this connection */
2848 conn->connect_cfm_cb = NULL;
2849 conn->security_cfm_cb = NULL;
2850 conn->disconn_cfm_cb = NULL;
2852 hci_conn_drop(conn);
2854 /* The device is paired so there is no need to remove
2855 * its connection parameters anymore.
2857 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2864 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2866 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2867 struct mgmt_pending_cmd *cmd;
2869 cmd = find_pairing(conn);
2871 cmd->cmd_complete(cmd, status);
2872 mgmt_pending_remove(cmd);
2876 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2878 struct mgmt_pending_cmd *cmd;
2880 BT_DBG("status %u", status);
2882 cmd = find_pairing(conn);
2884 BT_DBG("Unable to find a pending command");
2888 cmd->cmd_complete(cmd, mgmt_status(status));
2889 mgmt_pending_remove(cmd);
2892 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2894 struct mgmt_pending_cmd *cmd;
2896 BT_DBG("status %u", status);
2901 cmd = find_pairing(conn);
2903 BT_DBG("Unable to find a pending command");
2907 cmd->cmd_complete(cmd, mgmt_status(status));
2908 mgmt_pending_remove(cmd);
2911 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2914 struct mgmt_cp_pair_device *cp = data;
2915 struct mgmt_rp_pair_device rp;
2916 struct mgmt_pending_cmd *cmd;
2917 u8 sec_level, auth_type;
2918 struct hci_conn *conn;
2921 bt_dev_dbg(hdev, "sock %p", sk);
2923 memset(&rp, 0, sizeof(rp));
2924 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2925 rp.addr.type = cp->addr.type;
2927 if (!bdaddr_type_is_valid(cp->addr.type))
2928 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2929 MGMT_STATUS_INVALID_PARAMS,
2932 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2933 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2934 MGMT_STATUS_INVALID_PARAMS,
2939 if (!hdev_is_powered(hdev)) {
2940 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2941 MGMT_STATUS_NOT_POWERED, &rp,
2946 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2947 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2948 MGMT_STATUS_ALREADY_PAIRED, &rp,
2953 sec_level = BT_SECURITY_MEDIUM;
2954 auth_type = HCI_AT_DEDICATED_BONDING;
2956 if (cp->addr.type == BDADDR_BREDR) {
2957 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2958 auth_type, CONN_REASON_PAIR_DEVICE);
2960 u8 addr_type = le_addr_type(cp->addr.type);
2961 struct hci_conn_params *p;
2963 /* When pairing a new device, it is expected to remember
2964 * this device for future connections. Adding the connection
2965 * parameter information ahead of time allows tracking
2966 * of the peripheral preferred values and will speed up any
2967 * further connection establishment.
2969 * If connection parameters already exist, then they
2970 * will be kept and this function does nothing.
2972 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2974 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2975 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2977 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2978 sec_level, HCI_LE_CONN_TIMEOUT,
2979 CONN_REASON_PAIR_DEVICE);
2985 if (PTR_ERR(conn) == -EBUSY)
2986 status = MGMT_STATUS_BUSY;
2987 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2988 status = MGMT_STATUS_NOT_SUPPORTED;
2989 else if (PTR_ERR(conn) == -ECONNREFUSED)
2990 status = MGMT_STATUS_REJECTED;
2992 status = MGMT_STATUS_CONNECT_FAILED;
2994 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2995 status, &rp, sizeof(rp));
2999 if (conn->connect_cfm_cb) {
3000 hci_conn_drop(conn);
3001 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3002 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3006 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3009 hci_conn_drop(conn);
3013 cmd->cmd_complete = pairing_complete;
3015 /* For LE, just connecting isn't a proof that the pairing finished */
3016 if (cp->addr.type == BDADDR_BREDR) {
3017 conn->connect_cfm_cb = pairing_complete_cb;
3018 conn->security_cfm_cb = pairing_complete_cb;
3019 conn->disconn_cfm_cb = pairing_complete_cb;
3021 conn->connect_cfm_cb = le_pairing_complete_cb;
3022 conn->security_cfm_cb = le_pairing_complete_cb;
3023 conn->disconn_cfm_cb = le_pairing_complete_cb;
3026 conn->io_capability = cp->io_cap;
3027 cmd->user_data = hci_conn_get(conn);
3029 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3030 hci_conn_security(conn, sec_level, auth_type, true)) {
3031 cmd->cmd_complete(cmd, 0);
3032 mgmt_pending_remove(cmd);
3038 hci_dev_unlock(hdev);
3042 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3045 struct mgmt_addr_info *addr = data;
3046 struct mgmt_pending_cmd *cmd;
3047 struct hci_conn *conn;
3050 bt_dev_dbg(hdev, "sock %p", sk);
3054 if (!hdev_is_powered(hdev)) {
3055 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3056 MGMT_STATUS_NOT_POWERED);
3060 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3062 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3063 MGMT_STATUS_INVALID_PARAMS);
3067 conn = cmd->user_data;
3069 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3070 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3071 MGMT_STATUS_INVALID_PARAMS);
3075 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3076 mgmt_pending_remove(cmd);
3078 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3079 addr, sizeof(*addr));
3081 /* Since user doesn't want to proceed with the connection, abort any
3082 * ongoing pairing and then terminate the link if it was created
3083 * because of the pair device action.
3085 if (addr->type == BDADDR_BREDR)
3086 hci_remove_link_key(hdev, &addr->bdaddr);
3088 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3089 le_addr_type(addr->type));
3091 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3092 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3095 hci_dev_unlock(hdev);
3099 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3100 struct mgmt_addr_info *addr, u16 mgmt_op,
3101 u16 hci_op, __le32 passkey)
3103 struct mgmt_pending_cmd *cmd;
3104 struct hci_conn *conn;
3109 if (!hdev_is_powered(hdev)) {
3110 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3111 MGMT_STATUS_NOT_POWERED, addr,
3116 if (addr->type == BDADDR_BREDR)
3117 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3119 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3120 le_addr_type(addr->type));
3123 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3124 MGMT_STATUS_NOT_CONNECTED, addr,
3129 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3130 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3132 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3133 MGMT_STATUS_SUCCESS, addr,
3136 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3137 MGMT_STATUS_FAILED, addr,
3143 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3149 cmd->cmd_complete = addr_cmd_complete;
3151 /* Continue with pairing via HCI */
3152 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3153 struct hci_cp_user_passkey_reply cp;
3155 bacpy(&cp.bdaddr, &addr->bdaddr);
3156 cp.passkey = passkey;
3157 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3159 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3163 mgmt_pending_remove(cmd);
3166 hci_dev_unlock(hdev);
3170 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3171 void *data, u16 len)
3173 struct mgmt_cp_pin_code_neg_reply *cp = data;
3175 bt_dev_dbg(hdev, "sock %p", sk);
3177 return user_pairing_resp(sk, hdev, &cp->addr,
3178 MGMT_OP_PIN_CODE_NEG_REPLY,
3179 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3182 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3185 struct mgmt_cp_user_confirm_reply *cp = data;
3187 bt_dev_dbg(hdev, "sock %p", sk);
3189 if (len != sizeof(*cp))
3190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3191 MGMT_STATUS_INVALID_PARAMS);
3193 return user_pairing_resp(sk, hdev, &cp->addr,
3194 MGMT_OP_USER_CONFIRM_REPLY,
3195 HCI_OP_USER_CONFIRM_REPLY, 0);
3198 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3199 void *data, u16 len)
3201 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3203 bt_dev_dbg(hdev, "sock %p", sk);
3205 return user_pairing_resp(sk, hdev, &cp->addr,
3206 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3207 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3210 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3213 struct mgmt_cp_user_passkey_reply *cp = data;
3215 bt_dev_dbg(hdev, "sock %p", sk);
3217 return user_pairing_resp(sk, hdev, &cp->addr,
3218 MGMT_OP_USER_PASSKEY_REPLY,
3219 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3222 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3223 void *data, u16 len)
3225 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3227 bt_dev_dbg(hdev, "sock %p", sk);
3229 return user_pairing_resp(sk, hdev, &cp->addr,
3230 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3231 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3234 static void adv_expire(struct hci_dev *hdev, u32 flags)
3236 struct adv_info *adv_instance;
3237 struct hci_request req;
3240 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3244 /* stop if current instance doesn't need to be changed */
3245 if (!(adv_instance->flags & flags))
3248 cancel_adv_timeout(hdev);
3250 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3254 hci_req_init(&req, hdev);
3255 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3260 hci_req_run(&req, NULL);
3263 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3265 struct mgmt_cp_set_local_name *cp;
3266 struct mgmt_pending_cmd *cmd;
3268 bt_dev_dbg(hdev, "status 0x%02x", status);
3272 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3279 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3280 mgmt_status(status));
3282 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3285 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3286 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3289 mgmt_pending_remove(cmd);
3292 hci_dev_unlock(hdev);
3295 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3298 struct mgmt_cp_set_local_name *cp = data;
3299 struct mgmt_pending_cmd *cmd;
3300 struct hci_request req;
3303 bt_dev_dbg(hdev, "sock %p", sk);
3307 /* If the old values are the same as the new ones just return a
3308 * direct command complete event.
3310 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3311 !memcmp(hdev->short_name, cp->short_name,
3312 sizeof(hdev->short_name))) {
3313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3318 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3320 if (!hdev_is_powered(hdev)) {
3321 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3323 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3328 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3329 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3330 ext_info_changed(hdev, sk);
3335 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3341 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3343 hci_req_init(&req, hdev);
3345 if (lmp_bredr_capable(hdev)) {
3346 __hci_req_update_name(&req);
3347 __hci_req_update_eir(&req);
3350 /* The name is stored in the scan response data and so
3351 * no need to update the advertising data here.
3353 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3354 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3356 err = hci_req_run(&req, set_name_complete);
3358 mgmt_pending_remove(cmd);
3361 hci_dev_unlock(hdev);
3365 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3368 struct mgmt_cp_set_appearance *cp = data;
3372 bt_dev_dbg(hdev, "sock %p", sk);
3374 if (!lmp_le_capable(hdev))
3375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3376 MGMT_STATUS_NOT_SUPPORTED);
3378 appearance = le16_to_cpu(cp->appearance);
3382 if (hdev->appearance != appearance) {
3383 hdev->appearance = appearance;
3385 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3386 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3388 ext_info_changed(hdev, sk);
3391 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3394 hci_dev_unlock(hdev);
3399 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3400 void *data, u16 len)
3402 struct mgmt_rp_get_phy_configuration rp;
3404 bt_dev_dbg(hdev, "sock %p", sk);
3408 memset(&rp, 0, sizeof(rp));
3410 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3411 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3412 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3414 hci_dev_unlock(hdev);
3416 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3420 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3422 struct mgmt_ev_phy_configuration_changed ev;
3424 memset(&ev, 0, sizeof(ev));
3426 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3428 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3432 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3433 u16 opcode, struct sk_buff *skb)
3435 struct mgmt_pending_cmd *cmd;
3437 bt_dev_dbg(hdev, "status 0x%02x", status);
3441 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3446 mgmt_cmd_status(cmd->sk, hdev->id,
3447 MGMT_OP_SET_PHY_CONFIGURATION,
3448 mgmt_status(status));
3450 mgmt_cmd_complete(cmd->sk, hdev->id,
3451 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3454 mgmt_phy_configuration_changed(hdev, cmd->sk);
3457 mgmt_pending_remove(cmd);
3460 hci_dev_unlock(hdev);
3463 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3464 void *data, u16 len)
3466 struct mgmt_cp_set_phy_configuration *cp = data;
3467 struct hci_cp_le_set_default_phy cp_phy;
3468 struct mgmt_pending_cmd *cmd;
3469 struct hci_request req;
3470 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3471 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3472 bool changed = false;
3475 bt_dev_dbg(hdev, "sock %p", sk);
3477 configurable_phys = get_configurable_phys(hdev);
3478 supported_phys = get_supported_phys(hdev);
3479 selected_phys = __le32_to_cpu(cp->selected_phys);
3481 if (selected_phys & ~supported_phys)
3482 return mgmt_cmd_status(sk, hdev->id,
3483 MGMT_OP_SET_PHY_CONFIGURATION,
3484 MGMT_STATUS_INVALID_PARAMS);
3486 unconfigure_phys = supported_phys & ~configurable_phys;
3488 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3489 return mgmt_cmd_status(sk, hdev->id,
3490 MGMT_OP_SET_PHY_CONFIGURATION,
3491 MGMT_STATUS_INVALID_PARAMS);
3493 if (selected_phys == get_selected_phys(hdev))
3494 return mgmt_cmd_complete(sk, hdev->id,
3495 MGMT_OP_SET_PHY_CONFIGURATION,
3500 if (!hdev_is_powered(hdev)) {
3501 err = mgmt_cmd_status(sk, hdev->id,
3502 MGMT_OP_SET_PHY_CONFIGURATION,
3503 MGMT_STATUS_REJECTED);
3507 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3508 err = mgmt_cmd_status(sk, hdev->id,
3509 MGMT_OP_SET_PHY_CONFIGURATION,
3514 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3515 pkt_type |= (HCI_DH3 | HCI_DM3);
3517 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3519 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3520 pkt_type |= (HCI_DH5 | HCI_DM5);
3522 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3524 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3525 pkt_type &= ~HCI_2DH1;
3527 pkt_type |= HCI_2DH1;
3529 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3530 pkt_type &= ~HCI_2DH3;
3532 pkt_type |= HCI_2DH3;
3534 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3535 pkt_type &= ~HCI_2DH5;
3537 pkt_type |= HCI_2DH5;
3539 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3540 pkt_type &= ~HCI_3DH1;
3542 pkt_type |= HCI_3DH1;
3544 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3545 pkt_type &= ~HCI_3DH3;
3547 pkt_type |= HCI_3DH3;
3549 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3550 pkt_type &= ~HCI_3DH5;
3552 pkt_type |= HCI_3DH5;
3554 if (pkt_type != hdev->pkt_type) {
3555 hdev->pkt_type = pkt_type;
3559 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3560 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3562 mgmt_phy_configuration_changed(hdev, sk);
3564 err = mgmt_cmd_complete(sk, hdev->id,
3565 MGMT_OP_SET_PHY_CONFIGURATION,
3571 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3578 hci_req_init(&req, hdev);
3580 memset(&cp_phy, 0, sizeof(cp_phy));
3582 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3583 cp_phy.all_phys |= 0x01;
3585 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3586 cp_phy.all_phys |= 0x02;
3588 if (selected_phys & MGMT_PHY_LE_1M_TX)
3589 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3591 if (selected_phys & MGMT_PHY_LE_2M_TX)
3592 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3594 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3595 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3597 if (selected_phys & MGMT_PHY_LE_1M_RX)
3598 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3600 if (selected_phys & MGMT_PHY_LE_2M_RX)
3601 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3603 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3604 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3606 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3608 err = hci_req_run_skb(&req, set_default_phy_complete);
3610 mgmt_pending_remove(cmd);
3613 hci_dev_unlock(hdev);
3618 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3621 int err = MGMT_STATUS_SUCCESS;
3622 struct mgmt_cp_set_blocked_keys *keys = data;
3623 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3624 sizeof(struct mgmt_blocked_key_info));
3625 u16 key_count, expected_len;
3628 bt_dev_dbg(hdev, "sock %p", sk);
3630 key_count = __le16_to_cpu(keys->key_count);
3631 if (key_count > max_key_count) {
3632 bt_dev_err(hdev, "too big key_count value %u", key_count);
3633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3634 MGMT_STATUS_INVALID_PARAMS);
3637 expected_len = struct_size(keys, keys, key_count);
3638 if (expected_len != len) {
3639 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3642 MGMT_STATUS_INVALID_PARAMS);
3647 hci_blocked_keys_clear(hdev);
3649 for (i = 0; i < keys->key_count; ++i) {
3650 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3653 err = MGMT_STATUS_NO_RESOURCES;
3657 b->type = keys->keys[i].type;
3658 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3659 list_add_rcu(&b->list, &hdev->blocked_keys);
3661 hci_dev_unlock(hdev);
3663 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3667 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3668 void *data, u16 len)
3670 struct mgmt_mode *cp = data;
3672 bool changed = false;
3674 bt_dev_dbg(hdev, "sock %p", sk);
3676 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3677 return mgmt_cmd_status(sk, hdev->id,
3678 MGMT_OP_SET_WIDEBAND_SPEECH,
3679 MGMT_STATUS_NOT_SUPPORTED);
3681 if (cp->val != 0x00 && cp->val != 0x01)
3682 return mgmt_cmd_status(sk, hdev->id,
3683 MGMT_OP_SET_WIDEBAND_SPEECH,
3684 MGMT_STATUS_INVALID_PARAMS);
3688 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3689 err = mgmt_cmd_status(sk, hdev->id,
3690 MGMT_OP_SET_WIDEBAND_SPEECH,
3695 if (hdev_is_powered(hdev) &&
3696 !!cp->val != hci_dev_test_flag(hdev,
3697 HCI_WIDEBAND_SPEECH_ENABLED)) {
3698 err = mgmt_cmd_status(sk, hdev->id,
3699 MGMT_OP_SET_WIDEBAND_SPEECH,
3700 MGMT_STATUS_REJECTED);
3705 changed = !hci_dev_test_and_set_flag(hdev,
3706 HCI_WIDEBAND_SPEECH_ENABLED);
3708 changed = hci_dev_test_and_clear_flag(hdev,
3709 HCI_WIDEBAND_SPEECH_ENABLED);
3711 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3716 err = new_settings(hdev, sk);
3719 hci_dev_unlock(hdev);
3723 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3724 void *data, u16 data_len)
3727 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3730 u8 tx_power_range[2];
3732 bt_dev_dbg(hdev, "sock %p", sk);
3734 memset(&buf, 0, sizeof(buf));
3738 /* When the Read Simple Pairing Options command is supported, then
3739 * the remote public key validation is supported.
3741 * Alternatively, when Microsoft extensions are available, they can
3742 * indicate support for public key validation as well.
3744 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3745 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3747 flags |= 0x02; /* Remote public key validation (LE) */
3749 /* When the Read Encryption Key Size command is supported, then the
3750 * encryption key size is enforced.
3752 if (hdev->commands[20] & 0x10)
3753 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3755 flags |= 0x08; /* Encryption key size enforcement (LE) */
3757 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3760 /* When the Read Simple Pairing Options command is supported, then
3761 * also max encryption key size information is provided.
3763 if (hdev->commands[41] & 0x08)
3764 cap_len = eir_append_le16(rp->cap, cap_len,
3765 MGMT_CAP_MAX_ENC_KEY_SIZE,
3766 hdev->max_enc_key_size);
3768 cap_len = eir_append_le16(rp->cap, cap_len,
3769 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3770 SMP_MAX_ENC_KEY_SIZE);
3772 /* Append the min/max LE tx power parameters if we were able to fetch
3773 * it from the controller
3775 if (hdev->commands[38] & 0x80) {
3776 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3777 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3778 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3782 rp->cap_len = cpu_to_le16(cap_len);
3784 hci_dev_unlock(hdev);
3786 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3787 rp, sizeof(*rp) + cap_len);
3790 #ifdef CONFIG_BT_FEATURE_DEBUG
3791 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3792 static const u8 debug_uuid[16] = {
3793 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3794 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3798 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3799 static const u8 simult_central_periph_uuid[16] = {
3800 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3801 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3804 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3805 static const u8 rpa_resolution_uuid[16] = {
3806 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3807 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3810 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3811 void *data, u16 data_len)
3813 char buf[62]; /* Enough space for 3 features */
3814 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3818 bt_dev_dbg(hdev, "sock %p", sk);
3820 memset(&buf, 0, sizeof(buf));
3822 #ifdef CONFIG_BT_FEATURE_DEBUG
3824 flags = bt_dbg_get() ? BIT(0) : 0;
3826 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3827 rp->features[idx].flags = cpu_to_le32(flags);
3833 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3834 (hdev->le_states[4] & 0x08) && /* Central */
3835 (hdev->le_states[4] & 0x40) && /* Peripheral */
3836 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3841 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3842 rp->features[idx].flags = cpu_to_le32(flags);
3846 if (hdev && use_ll_privacy(hdev)) {
3847 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3848 flags = BIT(0) | BIT(1);
3852 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3853 rp->features[idx].flags = cpu_to_le32(flags);
3857 rp->feature_count = cpu_to_le16(idx);
3859 /* After reading the experimental features information, enable
3860 * the events to update client on any future change.
3862 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3864 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3865 MGMT_OP_READ_EXP_FEATURES_INFO,
3866 0, rp, sizeof(*rp) + (20 * idx));
3869 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3872 struct mgmt_ev_exp_feature_changed ev;
3874 memset(&ev, 0, sizeof(ev));
3875 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3876 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3878 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3880 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3884 #ifdef CONFIG_BT_FEATURE_DEBUG
3885 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3887 struct mgmt_ev_exp_feature_changed ev;
3889 memset(&ev, 0, sizeof(ev));
3890 memcpy(ev.uuid, debug_uuid, 16);
3891 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3893 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3895 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3899 #define EXP_FEAT(_uuid, _set_func) \
3902 .set_func = _set_func, \
3905 /* The zero key uuid is special. Multiple exp features are set through it. */
3906 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
3907 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3909 struct mgmt_rp_set_exp_feature rp;
3911 memset(rp.uuid, 0, 16);
3912 rp.flags = cpu_to_le32(0);
3914 #ifdef CONFIG_BT_FEATURE_DEBUG
3916 bool changed = bt_dbg_get();
3921 exp_debug_feature_changed(false, sk);
3925 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3928 changed = hci_dev_test_and_clear_flag(hdev,
3929 HCI_ENABLE_LL_PRIVACY);
3931 exp_ll_privacy_feature_changed(false, hdev, sk);
3934 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3936 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3937 MGMT_OP_SET_EXP_FEATURE, 0,
3941 #ifdef CONFIG_BT_FEATURE_DEBUG
3942 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
3943 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3945 struct mgmt_rp_set_exp_feature rp;
3950 /* Command requires to use the non-controller index */
3952 return mgmt_cmd_status(sk, hdev->id,
3953 MGMT_OP_SET_EXP_FEATURE,
3954 MGMT_STATUS_INVALID_INDEX);
3956 /* Parameters are limited to a single octet */
3957 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3958 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3959 MGMT_OP_SET_EXP_FEATURE,
3960 MGMT_STATUS_INVALID_PARAMS);
3962 /* Only boolean on/off is supported */
3963 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3964 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3965 MGMT_OP_SET_EXP_FEATURE,
3966 MGMT_STATUS_INVALID_PARAMS);
3968 val = !!cp->param[0];
3969 changed = val ? !bt_dbg_get() : bt_dbg_get();
3972 memcpy(rp.uuid, debug_uuid, 16);
3973 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3975 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3977 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3978 MGMT_OP_SET_EXP_FEATURE, 0,
3982 exp_debug_feature_changed(val, sk);
3988 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
3989 struct mgmt_cp_set_exp_feature *cp,
3992 struct mgmt_rp_set_exp_feature rp;
3997 /* Command requires to use the controller index */
3999 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4000 MGMT_OP_SET_EXP_FEATURE,
4001 MGMT_STATUS_INVALID_INDEX);
4003 /* Changes can only be made when controller is powered down */
4004 if (hdev_is_powered(hdev))
4005 return mgmt_cmd_status(sk, hdev->id,
4006 MGMT_OP_SET_EXP_FEATURE,
4007 MGMT_STATUS_REJECTED);
4009 /* Parameters are limited to a single octet */
4010 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4011 return mgmt_cmd_status(sk, hdev->id,
4012 MGMT_OP_SET_EXP_FEATURE,
4013 MGMT_STATUS_INVALID_PARAMS);
4015 /* Only boolean on/off is supported */
4016 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4017 return mgmt_cmd_status(sk, hdev->id,
4018 MGMT_OP_SET_EXP_FEATURE,
4019 MGMT_STATUS_INVALID_PARAMS);
4021 val = !!cp->param[0];
4024 changed = !hci_dev_test_and_set_flag(hdev,
4025 HCI_ENABLE_LL_PRIVACY);
4026 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4028 /* Enable LL privacy + supported settings changed */
4029 flags = BIT(0) | BIT(1);
4031 changed = hci_dev_test_and_clear_flag(hdev,
4032 HCI_ENABLE_LL_PRIVACY);
4034 /* Disable LL privacy + supported settings changed */
4038 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4039 rp.flags = cpu_to_le32(flags);
4041 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4043 err = mgmt_cmd_complete(sk, hdev->id,
4044 MGMT_OP_SET_EXP_FEATURE, 0,
4048 exp_ll_privacy_feature_changed(val, hdev, sk);
4053 static const struct mgmt_exp_feature {
4055 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4056 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4057 } exp_features[] = {
4058 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4059 #ifdef CONFIG_BT_FEATURE_DEBUG
4060 EXP_FEAT(debug_uuid, set_debug_func),
4062 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4064 /* end with a null feature */
4065 EXP_FEAT(NULL, NULL)
4068 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4069 void *data, u16 data_len)
4071 struct mgmt_cp_set_exp_feature *cp = data;
4074 bt_dev_dbg(hdev, "sock %p", sk);
4076 for (i = 0; exp_features[i].uuid; i++) {
4077 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4078 return exp_features[i].set_func(sk, hdev, cp, data_len);
4081 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4082 MGMT_OP_SET_EXP_FEATURE,
4083 MGMT_STATUS_NOT_SUPPORTED);
4086 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4088 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4091 struct mgmt_cp_get_device_flags *cp = data;
4092 struct mgmt_rp_get_device_flags rp;
4093 struct bdaddr_list_with_flags *br_params;
4094 struct hci_conn_params *params;
4095 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4096 u32 current_flags = 0;
4097 u8 status = MGMT_STATUS_INVALID_PARAMS;
4099 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4100 &cp->addr.bdaddr, cp->addr.type);
4104 memset(&rp, 0, sizeof(rp));
4106 if (cp->addr.type == BDADDR_BREDR) {
4107 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4113 current_flags = br_params->current_flags;
4115 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4116 le_addr_type(cp->addr.type));
4121 current_flags = params->current_flags;
4124 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4125 rp.addr.type = cp->addr.type;
4126 rp.supported_flags = cpu_to_le32(supported_flags);
4127 rp.current_flags = cpu_to_le32(current_flags);
4129 status = MGMT_STATUS_SUCCESS;
4132 hci_dev_unlock(hdev);
4134 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4138 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4139 bdaddr_t *bdaddr, u8 bdaddr_type,
4140 u32 supported_flags, u32 current_flags)
4142 struct mgmt_ev_device_flags_changed ev;
4144 bacpy(&ev.addr.bdaddr, bdaddr);
4145 ev.addr.type = bdaddr_type;
4146 ev.supported_flags = cpu_to_le32(supported_flags);
4147 ev.current_flags = cpu_to_le32(current_flags);
4149 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4152 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4155 struct mgmt_cp_set_device_flags *cp = data;
4156 struct bdaddr_list_with_flags *br_params;
4157 struct hci_conn_params *params;
4158 u8 status = MGMT_STATUS_INVALID_PARAMS;
4159 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4160 u32 current_flags = __le32_to_cpu(cp->current_flags);
4162 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4163 &cp->addr.bdaddr, cp->addr.type,
4164 __le32_to_cpu(current_flags));
4166 if ((supported_flags | current_flags) != supported_flags) {
4167 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4168 current_flags, supported_flags);
4174 if (cp->addr.type == BDADDR_BREDR) {
4175 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4180 br_params->current_flags = current_flags;
4181 status = MGMT_STATUS_SUCCESS;
4183 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4184 &cp->addr.bdaddr, cp->addr.type);
4187 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4188 le_addr_type(cp->addr.type));
4190 params->current_flags = current_flags;
4191 status = MGMT_STATUS_SUCCESS;
4193 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4195 le_addr_type(cp->addr.type));
4200 hci_dev_unlock(hdev);
4202 if (status == MGMT_STATUS_SUCCESS)
4203 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4204 supported_flags, current_flags);
4206 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4207 &cp->addr, sizeof(cp->addr));
4210 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4213 struct mgmt_ev_adv_monitor_added ev;
4215 ev.monitor_handle = cpu_to_le16(handle);
4217 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4220 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4222 struct mgmt_ev_adv_monitor_removed ev;
4223 struct mgmt_pending_cmd *cmd;
4224 struct sock *sk_skip = NULL;
4225 struct mgmt_cp_remove_adv_monitor *cp;
4227 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4231 if (cp->monitor_handle)
4235 ev.monitor_handle = cpu_to_le16(handle);
4237 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4240 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4241 void *data, u16 len)
4243 struct adv_monitor *monitor = NULL;
4244 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4247 __u32 supported = 0;
4249 __u16 num_handles = 0;
4250 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4252 BT_DBG("request for %s", hdev->name);
4256 if (msft_monitor_supported(hdev))
4257 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4259 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4260 handles[num_handles++] = monitor->handle;
4262 hci_dev_unlock(hdev);
4264 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4265 rp = kmalloc(rp_size, GFP_KERNEL);
4269 /* All supported features are currently enabled */
4270 enabled = supported;
4272 rp->supported_features = cpu_to_le32(supported);
4273 rp->enabled_features = cpu_to_le32(enabled);
4274 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4275 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4276 rp->num_handles = cpu_to_le16(num_handles);
4278 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4280 err = mgmt_cmd_complete(sk, hdev->id,
4281 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4282 MGMT_STATUS_SUCCESS, rp, rp_size);
4289 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4291 struct mgmt_rp_add_adv_patterns_monitor rp;
4292 struct mgmt_pending_cmd *cmd;
4293 struct adv_monitor *monitor;
4298 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4300 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4305 monitor = cmd->user_data;
4306 rp.monitor_handle = cpu_to_le16(monitor->handle);
4309 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4310 hdev->adv_monitors_cnt++;
4311 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4312 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4313 hci_update_background_scan(hdev);
4316 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4317 mgmt_status(status), &rp, sizeof(rp));
4318 mgmt_pending_remove(cmd);
4321 hci_dev_unlock(hdev);
4322 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4323 rp.monitor_handle, status);
4328 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4329 struct adv_monitor *m, u8 status,
4330 void *data, u16 len, u16 op)
4332 struct mgmt_rp_add_adv_patterns_monitor rp;
4333 struct mgmt_pending_cmd *cmd;
4342 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4343 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4344 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4345 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4346 status = MGMT_STATUS_BUSY;
4350 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4352 status = MGMT_STATUS_NO_RESOURCES;
4357 pending = hci_add_adv_monitor(hdev, m, &err);
4359 if (err == -ENOSPC || err == -ENOMEM)
4360 status = MGMT_STATUS_NO_RESOURCES;
4361 else if (err == -EINVAL)
4362 status = MGMT_STATUS_INVALID_PARAMS;
4364 status = MGMT_STATUS_FAILED;
4366 mgmt_pending_remove(cmd);
4371 mgmt_pending_remove(cmd);
4372 rp.monitor_handle = cpu_to_le16(m->handle);
4373 mgmt_adv_monitor_added(sk, hdev, m->handle);
4374 m->state = ADV_MONITOR_STATE_REGISTERED;
4375 hdev->adv_monitors_cnt++;
4377 hci_dev_unlock(hdev);
4378 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4382 hci_dev_unlock(hdev);
4387 hci_free_adv_monitor(hdev, m);
4388 hci_dev_unlock(hdev);
4389 return mgmt_cmd_status(sk, hdev->id, op, status);
4392 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4393 struct mgmt_adv_rssi_thresholds *rssi)
4396 m->rssi.low_threshold = rssi->low_threshold;
4397 m->rssi.low_threshold_timeout =
4398 __le16_to_cpu(rssi->low_threshold_timeout);
4399 m->rssi.high_threshold = rssi->high_threshold;
4400 m->rssi.high_threshold_timeout =
4401 __le16_to_cpu(rssi->high_threshold_timeout);
4402 m->rssi.sampling_period = rssi->sampling_period;
4404 /* Default values. These numbers are the least constricting
4405 * parameters for MSFT API to work, so it behaves as if there
4406 * are no rssi parameter to consider. May need to be changed
4407 * if other API are to be supported.
4409 m->rssi.low_threshold = -127;
4410 m->rssi.low_threshold_timeout = 60;
4411 m->rssi.high_threshold = -127;
4412 m->rssi.high_threshold_timeout = 0;
4413 m->rssi.sampling_period = 0;
4417 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4418 struct mgmt_adv_pattern *patterns)
4420 u8 offset = 0, length = 0;
4421 struct adv_pattern *p = NULL;
4424 for (i = 0; i < pattern_count; i++) {
4425 offset = patterns[i].offset;
4426 length = patterns[i].length;
4427 if (offset >= HCI_MAX_AD_LENGTH ||
4428 length > HCI_MAX_AD_LENGTH ||
4429 (offset + length) > HCI_MAX_AD_LENGTH)
4430 return MGMT_STATUS_INVALID_PARAMS;
4432 p = kmalloc(sizeof(*p), GFP_KERNEL);
4434 return MGMT_STATUS_NO_RESOURCES;
4436 p->ad_type = patterns[i].ad_type;
4437 p->offset = patterns[i].offset;
4438 p->length = patterns[i].length;
4439 memcpy(p->value, patterns[i].value, p->length);
4441 INIT_LIST_HEAD(&p->list);
4442 list_add(&p->list, &m->patterns);
4445 return MGMT_STATUS_SUCCESS;
4448 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4449 void *data, u16 len)
4451 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4452 struct adv_monitor *m = NULL;
4453 u8 status = MGMT_STATUS_SUCCESS;
4454 size_t expected_size = sizeof(*cp);
4456 BT_DBG("request for %s", hdev->name);
4458 if (len <= sizeof(*cp)) {
4459 status = MGMT_STATUS_INVALID_PARAMS;
4463 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4464 if (len != expected_size) {
4465 status = MGMT_STATUS_INVALID_PARAMS;
4469 m = kzalloc(sizeof(*m), GFP_KERNEL);
4471 status = MGMT_STATUS_NO_RESOURCES;
4475 INIT_LIST_HEAD(&m->patterns);
4477 parse_adv_monitor_rssi(m, NULL);
4478 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4481 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4482 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4485 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4486 void *data, u16 len)
4488 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4489 struct adv_monitor *m = NULL;
4490 u8 status = MGMT_STATUS_SUCCESS;
4491 size_t expected_size = sizeof(*cp);
4493 BT_DBG("request for %s", hdev->name);
4495 if (len <= sizeof(*cp)) {
4496 status = MGMT_STATUS_INVALID_PARAMS;
4500 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4501 if (len != expected_size) {
4502 status = MGMT_STATUS_INVALID_PARAMS;
4506 m = kzalloc(sizeof(*m), GFP_KERNEL);
4508 status = MGMT_STATUS_NO_RESOURCES;
4512 INIT_LIST_HEAD(&m->patterns);
4514 parse_adv_monitor_rssi(m, &cp->rssi);
4515 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4518 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4519 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4522 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4524 struct mgmt_rp_remove_adv_monitor rp;
4525 struct mgmt_cp_remove_adv_monitor *cp;
4526 struct mgmt_pending_cmd *cmd;
4531 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4536 rp.monitor_handle = cp->monitor_handle;
4539 hci_update_background_scan(hdev);
4541 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4542 mgmt_status(status), &rp, sizeof(rp));
4543 mgmt_pending_remove(cmd);
4546 hci_dev_unlock(hdev);
4547 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4548 rp.monitor_handle, status);
4553 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4554 void *data, u16 len)
4556 struct mgmt_cp_remove_adv_monitor *cp = data;
4557 struct mgmt_rp_remove_adv_monitor rp;
4558 struct mgmt_pending_cmd *cmd;
4559 u16 handle = __le16_to_cpu(cp->monitor_handle);
4563 BT_DBG("request for %s", hdev->name);
4564 rp.monitor_handle = cp->monitor_handle;
4568 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4569 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4570 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4571 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4572 status = MGMT_STATUS_BUSY;
4576 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4578 status = MGMT_STATUS_NO_RESOURCES;
4583 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4585 pending = hci_remove_all_adv_monitor(hdev, &err);
4588 mgmt_pending_remove(cmd);
4591 status = MGMT_STATUS_INVALID_INDEX;
4593 status = MGMT_STATUS_FAILED;
4598 /* monitor can be removed without forwarding request to controller */
4600 mgmt_pending_remove(cmd);
4601 hci_dev_unlock(hdev);
4603 return mgmt_cmd_complete(sk, hdev->id,
4604 MGMT_OP_REMOVE_ADV_MONITOR,
4605 MGMT_STATUS_SUCCESS,
4609 hci_dev_unlock(hdev);
4613 hci_dev_unlock(hdev);
4614 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4618 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4619 u16 opcode, struct sk_buff *skb)
4621 struct mgmt_rp_read_local_oob_data mgmt_rp;
4622 size_t rp_size = sizeof(mgmt_rp);
4623 struct mgmt_pending_cmd *cmd;
4625 bt_dev_dbg(hdev, "status %u", status);
4627 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4631 if (status || !skb) {
4632 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4633 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4637 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4639 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4640 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4642 if (skb->len < sizeof(*rp)) {
4643 mgmt_cmd_status(cmd->sk, hdev->id,
4644 MGMT_OP_READ_LOCAL_OOB_DATA,
4645 MGMT_STATUS_FAILED);
4649 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4650 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4652 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4654 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4656 if (skb->len < sizeof(*rp)) {
4657 mgmt_cmd_status(cmd->sk, hdev->id,
4658 MGMT_OP_READ_LOCAL_OOB_DATA,
4659 MGMT_STATUS_FAILED);
4663 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4664 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4666 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4667 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4670 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4671 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4674 mgmt_pending_remove(cmd);
4677 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4678 void *data, u16 data_len)
4680 struct mgmt_pending_cmd *cmd;
4681 struct hci_request req;
4684 bt_dev_dbg(hdev, "sock %p", sk);
4688 if (!hdev_is_powered(hdev)) {
4689 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4690 MGMT_STATUS_NOT_POWERED);
4694 if (!lmp_ssp_capable(hdev)) {
4695 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4696 MGMT_STATUS_NOT_SUPPORTED);
4700 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4701 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4706 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4712 hci_req_init(&req, hdev);
4714 if (bredr_sc_enabled(hdev))
4715 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4717 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4719 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4721 mgmt_pending_remove(cmd);
4724 hci_dev_unlock(hdev);
4728 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4729 void *data, u16 len)
4731 struct mgmt_addr_info *addr = data;
4734 bt_dev_dbg(hdev, "sock %p", sk);
4736 if (!bdaddr_type_is_valid(addr->type))
4737 return mgmt_cmd_complete(sk, hdev->id,
4738 MGMT_OP_ADD_REMOTE_OOB_DATA,
4739 MGMT_STATUS_INVALID_PARAMS,
4740 addr, sizeof(*addr));
4744 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4745 struct mgmt_cp_add_remote_oob_data *cp = data;
4748 if (cp->addr.type != BDADDR_BREDR) {
4749 err = mgmt_cmd_complete(sk, hdev->id,
4750 MGMT_OP_ADD_REMOTE_OOB_DATA,
4751 MGMT_STATUS_INVALID_PARAMS,
4752 &cp->addr, sizeof(cp->addr));
4756 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4757 cp->addr.type, cp->hash,
4758 cp->rand, NULL, NULL);
4760 status = MGMT_STATUS_FAILED;
4762 status = MGMT_STATUS_SUCCESS;
4764 err = mgmt_cmd_complete(sk, hdev->id,
4765 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4766 &cp->addr, sizeof(cp->addr));
4767 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4768 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4769 u8 *rand192, *hash192, *rand256, *hash256;
4772 if (bdaddr_type_is_le(cp->addr.type)) {
4773 /* Enforce zero-valued 192-bit parameters as
4774 * long as legacy SMP OOB isn't implemented.
4776 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4777 memcmp(cp->hash192, ZERO_KEY, 16)) {
4778 err = mgmt_cmd_complete(sk, hdev->id,
4779 MGMT_OP_ADD_REMOTE_OOB_DATA,
4780 MGMT_STATUS_INVALID_PARAMS,
4781 addr, sizeof(*addr));
4788 /* In case one of the P-192 values is set to zero,
4789 * then just disable OOB data for P-192.
4791 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4792 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4796 rand192 = cp->rand192;
4797 hash192 = cp->hash192;
4801 /* In case one of the P-256 values is set to zero, then just
4802 * disable OOB data for P-256.
4804 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4805 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4809 rand256 = cp->rand256;
4810 hash256 = cp->hash256;
4813 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4814 cp->addr.type, hash192, rand192,
4817 status = MGMT_STATUS_FAILED;
4819 status = MGMT_STATUS_SUCCESS;
4821 err = mgmt_cmd_complete(sk, hdev->id,
4822 MGMT_OP_ADD_REMOTE_OOB_DATA,
4823 status, &cp->addr, sizeof(cp->addr));
4825 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4827 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4828 MGMT_STATUS_INVALID_PARAMS);
4832 hci_dev_unlock(hdev);
4836 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4837 void *data, u16 len)
4839 struct mgmt_cp_remove_remote_oob_data *cp = data;
4843 bt_dev_dbg(hdev, "sock %p", sk);
4845 if (cp->addr.type != BDADDR_BREDR)
4846 return mgmt_cmd_complete(sk, hdev->id,
4847 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4848 MGMT_STATUS_INVALID_PARAMS,
4849 &cp->addr, sizeof(cp->addr));
4853 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4854 hci_remote_oob_data_clear(hdev);
4855 status = MGMT_STATUS_SUCCESS;
4859 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4861 status = MGMT_STATUS_INVALID_PARAMS;
4863 status = MGMT_STATUS_SUCCESS;
4866 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4867 status, &cp->addr, sizeof(cp->addr));
4869 hci_dev_unlock(hdev);
4873 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4875 struct mgmt_pending_cmd *cmd;
4877 bt_dev_dbg(hdev, "status %u", status);
4881 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4883 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4886 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4889 cmd->cmd_complete(cmd, mgmt_status(status));
4890 mgmt_pending_remove(cmd);
4893 hci_dev_unlock(hdev);
4895 /* Handle suspend notifier */
4896 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4897 hdev->suspend_tasks)) {
4898 bt_dev_dbg(hdev, "Unpaused discovery");
4899 wake_up(&hdev->suspend_wait_q);
4903 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4904 uint8_t *mgmt_status)
4907 case DISCOV_TYPE_LE:
4908 *mgmt_status = mgmt_le_support(hdev);
4912 case DISCOV_TYPE_INTERLEAVED:
4913 *mgmt_status = mgmt_le_support(hdev);
4917 case DISCOV_TYPE_BREDR:
4918 *mgmt_status = mgmt_bredr_support(hdev);
4923 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4930 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4931 u16 op, void *data, u16 len)
4933 struct mgmt_cp_start_discovery *cp = data;
4934 struct mgmt_pending_cmd *cmd;
4938 bt_dev_dbg(hdev, "sock %p", sk);
4942 if (!hdev_is_powered(hdev)) {
4943 err = mgmt_cmd_complete(sk, hdev->id, op,
4944 MGMT_STATUS_NOT_POWERED,
4945 &cp->type, sizeof(cp->type));
4949 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4950 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4951 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4952 &cp->type, sizeof(cp->type));
4956 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4957 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4958 &cp->type, sizeof(cp->type));
4962 /* Can't start discovery when it is paused */
4963 if (hdev->discovery_paused) {
4964 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4965 &cp->type, sizeof(cp->type));
4969 /* Clear the discovery filter first to free any previously
4970 * allocated memory for the UUID list.
4972 hci_discovery_filter_clear(hdev);
4974 hdev->discovery.type = cp->type;
4975 hdev->discovery.report_invalid_rssi = false;
4976 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4977 hdev->discovery.limited = true;
4979 hdev->discovery.limited = false;
4981 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4987 cmd->cmd_complete = generic_cmd_complete;
4989 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4990 queue_work(hdev->req_workqueue, &hdev->discov_update);
4994 hci_dev_unlock(hdev);
4998 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4999 void *data, u16 len)
5001 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5005 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5006 void *data, u16 len)
5008 return start_discovery_internal(sk, hdev,
5009 MGMT_OP_START_LIMITED_DISCOVERY,
5013 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
5016 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
5020 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5021 void *data, u16 len)
5023 struct mgmt_cp_start_service_discovery *cp = data;
5024 struct mgmt_pending_cmd *cmd;
5025 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5026 u16 uuid_count, expected_len;
5030 bt_dev_dbg(hdev, "sock %p", sk);
5034 if (!hdev_is_powered(hdev)) {
5035 err = mgmt_cmd_complete(sk, hdev->id,
5036 MGMT_OP_START_SERVICE_DISCOVERY,
5037 MGMT_STATUS_NOT_POWERED,
5038 &cp->type, sizeof(cp->type));
5042 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5043 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5044 err = mgmt_cmd_complete(sk, hdev->id,
5045 MGMT_OP_START_SERVICE_DISCOVERY,
5046 MGMT_STATUS_BUSY, &cp->type,
5051 if (hdev->discovery_paused) {
5052 err = mgmt_cmd_complete(sk, hdev->id,
5053 MGMT_OP_START_SERVICE_DISCOVERY,
5054 MGMT_STATUS_BUSY, &cp->type,
5059 uuid_count = __le16_to_cpu(cp->uuid_count);
5060 if (uuid_count > max_uuid_count) {
5061 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5063 err = mgmt_cmd_complete(sk, hdev->id,
5064 MGMT_OP_START_SERVICE_DISCOVERY,
5065 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5070 expected_len = sizeof(*cp) + uuid_count * 16;
5071 if (expected_len != len) {
5072 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5074 err = mgmt_cmd_complete(sk, hdev->id,
5075 MGMT_OP_START_SERVICE_DISCOVERY,
5076 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5081 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5082 err = mgmt_cmd_complete(sk, hdev->id,
5083 MGMT_OP_START_SERVICE_DISCOVERY,
5084 status, &cp->type, sizeof(cp->type));
5088 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5095 cmd->cmd_complete = service_discovery_cmd_complete;
5097 /* Clear the discovery filter first to free any previously
5098 * allocated memory for the UUID list.
5100 hci_discovery_filter_clear(hdev);
5102 hdev->discovery.result_filtering = true;
5103 hdev->discovery.type = cp->type;
5104 hdev->discovery.rssi = cp->rssi;
5105 hdev->discovery.uuid_count = uuid_count;
5107 if (uuid_count > 0) {
5108 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5110 if (!hdev->discovery.uuids) {
5111 err = mgmt_cmd_complete(sk, hdev->id,
5112 MGMT_OP_START_SERVICE_DISCOVERY,
5114 &cp->type, sizeof(cp->type));
5115 mgmt_pending_remove(cmd);
5120 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5121 queue_work(hdev->req_workqueue, &hdev->discov_update);
5125 hci_dev_unlock(hdev);
5129 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5131 struct mgmt_pending_cmd *cmd;
5133 bt_dev_dbg(hdev, "status %u", status);
5137 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5139 cmd->cmd_complete(cmd, mgmt_status(status));
5140 mgmt_pending_remove(cmd);
5143 hci_dev_unlock(hdev);
5145 /* Handle suspend notifier */
5146 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5147 bt_dev_dbg(hdev, "Paused discovery");
5148 wake_up(&hdev->suspend_wait_q);
5152 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5155 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5156 struct mgmt_pending_cmd *cmd;
5159 bt_dev_dbg(hdev, "sock %p", sk);
5163 if (!hci_discovery_active(hdev)) {
5164 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5165 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5166 sizeof(mgmt_cp->type));
5170 if (hdev->discovery.type != mgmt_cp->type) {
5171 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5172 MGMT_STATUS_INVALID_PARAMS,
5173 &mgmt_cp->type, sizeof(mgmt_cp->type));
5177 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5183 cmd->cmd_complete = generic_cmd_complete;
5185 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5186 queue_work(hdev->req_workqueue, &hdev->discov_update);
5190 hci_dev_unlock(hdev);
5194 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5197 struct mgmt_cp_confirm_name *cp = data;
5198 struct inquiry_entry *e;
5201 bt_dev_dbg(hdev, "sock %p", sk);
5205 if (!hci_discovery_active(hdev)) {
5206 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5207 MGMT_STATUS_FAILED, &cp->addr,
5212 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5214 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5215 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5220 if (cp->name_known) {
5221 e->name_state = NAME_KNOWN;
5224 e->name_state = NAME_NEEDED;
5225 hci_inquiry_cache_update_resolve(hdev, e);
5228 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5229 &cp->addr, sizeof(cp->addr));
5232 hci_dev_unlock(hdev);
5236 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5239 struct mgmt_cp_block_device *cp = data;
5243 bt_dev_dbg(hdev, "sock %p", sk);
5245 if (!bdaddr_type_is_valid(cp->addr.type))
5246 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5247 MGMT_STATUS_INVALID_PARAMS,
5248 &cp->addr, sizeof(cp->addr));
5252 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5255 status = MGMT_STATUS_FAILED;
5259 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5261 status = MGMT_STATUS_SUCCESS;
5264 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5265 &cp->addr, sizeof(cp->addr));
5267 hci_dev_unlock(hdev);
5272 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5275 struct mgmt_cp_unblock_device *cp = data;
5279 bt_dev_dbg(hdev, "sock %p", sk);
5281 if (!bdaddr_type_is_valid(cp->addr.type))
5282 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5283 MGMT_STATUS_INVALID_PARAMS,
5284 &cp->addr, sizeof(cp->addr));
5288 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5291 status = MGMT_STATUS_INVALID_PARAMS;
5295 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5297 status = MGMT_STATUS_SUCCESS;
5300 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5301 &cp->addr, sizeof(cp->addr));
5303 hci_dev_unlock(hdev);
5308 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5311 struct mgmt_cp_set_device_id *cp = data;
5312 struct hci_request req;
5316 bt_dev_dbg(hdev, "sock %p", sk);
5318 source = __le16_to_cpu(cp->source);
5320 if (source > 0x0002)
5321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5322 MGMT_STATUS_INVALID_PARAMS);
5326 hdev->devid_source = source;
5327 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5328 hdev->devid_product = __le16_to_cpu(cp->product);
5329 hdev->devid_version = __le16_to_cpu(cp->version);
5331 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5334 hci_req_init(&req, hdev);
5335 __hci_req_update_eir(&req);
5336 hci_req_run(&req, NULL);
5338 hci_dev_unlock(hdev);
5343 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5346 bt_dev_dbg(hdev, "status %u", status);
5349 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5352 struct cmd_lookup match = { NULL, hdev };
5353 struct hci_request req;
5355 struct adv_info *adv_instance;
5361 u8 mgmt_err = mgmt_status(status);
5363 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5364 cmd_status_rsp, &mgmt_err);
5368 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5369 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5371 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5373 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5376 new_settings(hdev, match.sk);
5381 /* Handle suspend notifier */
5382 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5383 hdev->suspend_tasks)) {
5384 bt_dev_dbg(hdev, "Paused advertising");
5385 wake_up(&hdev->suspend_wait_q);
5386 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5387 hdev->suspend_tasks)) {
5388 bt_dev_dbg(hdev, "Unpaused advertising");
5389 wake_up(&hdev->suspend_wait_q);
5392 /* If "Set Advertising" was just disabled and instance advertising was
5393 * set up earlier, then re-enable multi-instance advertising.
5395 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5396 list_empty(&hdev->adv_instances))
5399 instance = hdev->cur_adv_instance;
5401 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5402 struct adv_info, list);
5406 instance = adv_instance->instance;
5409 hci_req_init(&req, hdev);
5411 err = __hci_req_schedule_adv_instance(&req, instance, true);
5414 err = hci_req_run(&req, enable_advertising_instance);
5417 bt_dev_err(hdev, "failed to re-configure advertising");
5420 hci_dev_unlock(hdev);
5423 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5426 struct mgmt_mode *cp = data;
5427 struct mgmt_pending_cmd *cmd;
5428 struct hci_request req;
5432 bt_dev_dbg(hdev, "sock %p", sk);
5434 status = mgmt_le_support(hdev);
5436 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5439 /* Enabling the experimental LL Privay support disables support for
5442 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5444 MGMT_STATUS_NOT_SUPPORTED);
5446 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5448 MGMT_STATUS_INVALID_PARAMS);
5450 if (hdev->advertising_paused)
5451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5458 /* The following conditions are ones which mean that we should
5459 * not do any HCI communication but directly send a mgmt
5460 * response to user space (after toggling the flag if
5463 if (!hdev_is_powered(hdev) ||
5464 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5465 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5466 hci_conn_num(hdev, LE_LINK) > 0 ||
5467 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5468 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5472 hdev->cur_adv_instance = 0x00;
5473 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5474 if (cp->val == 0x02)
5475 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5477 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5479 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5480 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5483 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5488 err = new_settings(hdev, sk);
5493 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5494 pending_find(MGMT_OP_SET_LE, hdev)) {
5495 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5500 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5506 hci_req_init(&req, hdev);
5508 if (cp->val == 0x02)
5509 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5511 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5513 cancel_adv_timeout(hdev);
5516 /* Switch to instance "0" for the Set Advertising setting.
5517 * We cannot use update_[adv|scan_rsp]_data() here as the
5518 * HCI_ADVERTISING flag is not yet set.
5520 hdev->cur_adv_instance = 0x00;
5522 if (ext_adv_capable(hdev)) {
5523 __hci_req_start_ext_adv(&req, 0x00);
5525 __hci_req_update_adv_data(&req, 0x00);
5526 __hci_req_update_scan_rsp_data(&req, 0x00);
5527 __hci_req_enable_advertising(&req);
5530 __hci_req_disable_advertising(&req);
5533 err = hci_req_run(&req, set_advertising_complete);
5535 mgmt_pending_remove(cmd);
5538 hci_dev_unlock(hdev);
5542 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5543 void *data, u16 len)
5545 struct mgmt_cp_set_static_address *cp = data;
5548 bt_dev_dbg(hdev, "sock %p", sk);
5550 if (!lmp_le_capable(hdev))
5551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5552 MGMT_STATUS_NOT_SUPPORTED);
5554 if (hdev_is_powered(hdev))
5555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5556 MGMT_STATUS_REJECTED);
5558 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5559 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5560 return mgmt_cmd_status(sk, hdev->id,
5561 MGMT_OP_SET_STATIC_ADDRESS,
5562 MGMT_STATUS_INVALID_PARAMS);
5564 /* Two most significant bits shall be set */
5565 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5566 return mgmt_cmd_status(sk, hdev->id,
5567 MGMT_OP_SET_STATIC_ADDRESS,
5568 MGMT_STATUS_INVALID_PARAMS);
5573 bacpy(&hdev->static_addr, &cp->bdaddr);
5575 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5579 err = new_settings(hdev, sk);
5582 hci_dev_unlock(hdev);
5586 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5587 void *data, u16 len)
5589 struct mgmt_cp_set_scan_params *cp = data;
5590 __u16 interval, window;
5593 bt_dev_dbg(hdev, "sock %p", sk);
5595 if (!lmp_le_capable(hdev))
5596 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5597 MGMT_STATUS_NOT_SUPPORTED);
5599 interval = __le16_to_cpu(cp->interval);
5601 if (interval < 0x0004 || interval > 0x4000)
5602 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5603 MGMT_STATUS_INVALID_PARAMS);
5605 window = __le16_to_cpu(cp->window);
5607 if (window < 0x0004 || window > 0x4000)
5608 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5609 MGMT_STATUS_INVALID_PARAMS);
5611 if (window > interval)
5612 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5613 MGMT_STATUS_INVALID_PARAMS);
5617 hdev->le_scan_interval = interval;
5618 hdev->le_scan_window = window;
5620 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5623 /* If background scan is running, restart it so new parameters are
5626 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5627 hdev->discovery.state == DISCOVERY_STOPPED) {
5628 struct hci_request req;
5630 hci_req_init(&req, hdev);
5632 hci_req_add_le_scan_disable(&req, false);
5633 hci_req_add_le_passive_scan(&req);
5635 hci_req_run(&req, NULL);
5638 hci_dev_unlock(hdev);
5643 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5646 struct mgmt_pending_cmd *cmd;
5648 bt_dev_dbg(hdev, "status 0x%02x", status);
5652 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5657 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5658 mgmt_status(status));
5660 struct mgmt_mode *cp = cmd->param;
5663 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5665 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5667 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5668 new_settings(hdev, cmd->sk);
5671 mgmt_pending_remove(cmd);
5674 hci_dev_unlock(hdev);
5677 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5678 void *data, u16 len)
5680 struct mgmt_mode *cp = data;
5681 struct mgmt_pending_cmd *cmd;
5682 struct hci_request req;
5685 bt_dev_dbg(hdev, "sock %p", sk);
5687 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5688 hdev->hci_ver < BLUETOOTH_VER_1_2)
5689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5690 MGMT_STATUS_NOT_SUPPORTED);
5692 if (cp->val != 0x00 && cp->val != 0x01)
5693 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5694 MGMT_STATUS_INVALID_PARAMS);
5698 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5699 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5704 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5705 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5710 if (!hdev_is_powered(hdev)) {
5711 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5712 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5714 new_settings(hdev, sk);
5718 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5725 hci_req_init(&req, hdev);
5727 __hci_req_write_fast_connectable(&req, cp->val);
5729 err = hci_req_run(&req, fast_connectable_complete);
5731 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5732 MGMT_STATUS_FAILED);
5733 mgmt_pending_remove(cmd);
5737 hci_dev_unlock(hdev);
5742 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5744 struct mgmt_pending_cmd *cmd;
5746 bt_dev_dbg(hdev, "status 0x%02x", status);
5750 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5755 u8 mgmt_err = mgmt_status(status);
5757 /* We need to restore the flag if related HCI commands
5760 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5762 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5764 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5765 new_settings(hdev, cmd->sk);
5768 mgmt_pending_remove(cmd);
5771 hci_dev_unlock(hdev);
5774 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5776 struct mgmt_mode *cp = data;
5777 struct mgmt_pending_cmd *cmd;
5778 struct hci_request req;
5781 bt_dev_dbg(hdev, "sock %p", sk);
5783 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5785 MGMT_STATUS_NOT_SUPPORTED);
5787 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5789 MGMT_STATUS_REJECTED);
5791 if (cp->val != 0x00 && cp->val != 0x01)
5792 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5793 MGMT_STATUS_INVALID_PARAMS);
5797 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5798 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5802 if (!hdev_is_powered(hdev)) {
5804 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5805 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5806 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5807 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5808 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5811 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5813 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5817 err = new_settings(hdev, sk);
5821 /* Reject disabling when powered on */
5823 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5824 MGMT_STATUS_REJECTED);
5827 /* When configuring a dual-mode controller to operate
5828 * with LE only and using a static address, then switching
5829 * BR/EDR back on is not allowed.
5831 * Dual-mode controllers shall operate with the public
5832 * address as its identity address for BR/EDR and LE. So
5833 * reject the attempt to create an invalid configuration.
5835 * The same restrictions applies when secure connections
5836 * has been enabled. For BR/EDR this is a controller feature
5837 * while for LE it is a host stack feature. This means that
5838 * switching BR/EDR back on when secure connections has been
5839 * enabled is not a supported transaction.
5841 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5842 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5843 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5844 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5845 MGMT_STATUS_REJECTED);
5850 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5851 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5856 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5862 /* We need to flip the bit already here so that
5863 * hci_req_update_adv_data generates the correct flags.
5865 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5867 hci_req_init(&req, hdev);
5869 __hci_req_write_fast_connectable(&req, false);
5870 __hci_req_update_scan(&req);
5872 /* Since only the advertising data flags will change, there
5873 * is no need to update the scan response data.
5875 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5877 err = hci_req_run(&req, set_bredr_complete);
5879 mgmt_pending_remove(cmd);
5882 hci_dev_unlock(hdev);
5886 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5888 struct mgmt_pending_cmd *cmd;
5889 struct mgmt_mode *cp;
5891 bt_dev_dbg(hdev, "status %u", status);
5895 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5900 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5901 mgmt_status(status));
5909 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5910 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5913 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5914 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5917 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5918 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5922 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5923 new_settings(hdev, cmd->sk);
5926 mgmt_pending_remove(cmd);
5928 hci_dev_unlock(hdev);
5931 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5932 void *data, u16 len)
5934 struct mgmt_mode *cp = data;
5935 struct mgmt_pending_cmd *cmd;
5936 struct hci_request req;
5940 bt_dev_dbg(hdev, "sock %p", sk);
5942 if (!lmp_sc_capable(hdev) &&
5943 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5945 MGMT_STATUS_NOT_SUPPORTED);
5947 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5948 lmp_sc_capable(hdev) &&
5949 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5951 MGMT_STATUS_REJECTED);
5953 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5954 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5955 MGMT_STATUS_INVALID_PARAMS);
5959 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5960 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5964 changed = !hci_dev_test_and_set_flag(hdev,
5966 if (cp->val == 0x02)
5967 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5969 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5971 changed = hci_dev_test_and_clear_flag(hdev,
5973 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5976 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5981 err = new_settings(hdev, sk);
5986 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5987 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5994 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5995 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5996 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6000 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6006 hci_req_init(&req, hdev);
6007 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
6008 err = hci_req_run(&req, sc_enable_complete);
6010 mgmt_pending_remove(cmd);
6015 hci_dev_unlock(hdev);
6019 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6020 void *data, u16 len)
6022 struct mgmt_mode *cp = data;
6023 bool changed, use_changed;
6026 bt_dev_dbg(hdev, "sock %p", sk);
6028 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6030 MGMT_STATUS_INVALID_PARAMS);
6035 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6037 changed = hci_dev_test_and_clear_flag(hdev,
6038 HCI_KEEP_DEBUG_KEYS);
6040 if (cp->val == 0x02)
6041 use_changed = !hci_dev_test_and_set_flag(hdev,
6042 HCI_USE_DEBUG_KEYS);
6044 use_changed = hci_dev_test_and_clear_flag(hdev,
6045 HCI_USE_DEBUG_KEYS);
6047 if (hdev_is_powered(hdev) && use_changed &&
6048 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6049 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6050 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6051 sizeof(mode), &mode);
6054 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6059 err = new_settings(hdev, sk);
6062 hci_dev_unlock(hdev);
6066 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6069 struct mgmt_cp_set_privacy *cp = cp_data;
6073 bt_dev_dbg(hdev, "sock %p", sk);
6075 if (!lmp_le_capable(hdev))
6076 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6077 MGMT_STATUS_NOT_SUPPORTED);
6079 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6080 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6081 MGMT_STATUS_INVALID_PARAMS);
6084 /* commenting out since set privacy command is always rejected
6085 * if this condition is enabled.
6087 if (hdev_is_powered(hdev))
6088 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6089 MGMT_STATUS_REJECTED);
6094 /* If user space supports this command it is also expected to
6095 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6097 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6100 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6101 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6102 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6103 hci_adv_instances_set_rpa_expired(hdev, true);
6104 if (cp->privacy == 0x02)
6105 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6107 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6109 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6110 memset(hdev->irk, 0, sizeof(hdev->irk));
6111 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6112 hci_adv_instances_set_rpa_expired(hdev, false);
6113 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6116 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6121 err = new_settings(hdev, sk);
6124 hci_dev_unlock(hdev);
6128 static bool irk_is_valid(struct mgmt_irk_info *irk)
6130 switch (irk->addr.type) {
6131 case BDADDR_LE_PUBLIC:
6134 case BDADDR_LE_RANDOM:
6135 /* Two most significant bits shall be set */
6136 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6144 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6147 struct mgmt_cp_load_irks *cp = cp_data;
6148 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6149 sizeof(struct mgmt_irk_info));
6150 u16 irk_count, expected_len;
6153 bt_dev_dbg(hdev, "sock %p", sk);
6155 if (!lmp_le_capable(hdev))
6156 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6157 MGMT_STATUS_NOT_SUPPORTED);
6159 irk_count = __le16_to_cpu(cp->irk_count);
6160 if (irk_count > max_irk_count) {
6161 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6163 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6164 MGMT_STATUS_INVALID_PARAMS);
6167 expected_len = struct_size(cp, irks, irk_count);
6168 if (expected_len != len) {
6169 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6171 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6172 MGMT_STATUS_INVALID_PARAMS);
6175 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6177 for (i = 0; i < irk_count; i++) {
6178 struct mgmt_irk_info *key = &cp->irks[i];
6180 if (!irk_is_valid(key))
6181 return mgmt_cmd_status(sk, hdev->id,
6183 MGMT_STATUS_INVALID_PARAMS);
6188 hci_smp_irks_clear(hdev);
6190 for (i = 0; i < irk_count; i++) {
6191 struct mgmt_irk_info *irk = &cp->irks[i];
6193 if (hci_is_blocked_key(hdev,
6194 HCI_BLOCKED_KEY_TYPE_IRK,
6196 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6201 hci_add_irk(hdev, &irk->addr.bdaddr,
6202 le_addr_type(irk->addr.type), irk->val,
6206 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6208 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6210 hci_dev_unlock(hdev);
6216 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
6217 void *data, u16 len)
6219 struct mgmt_cp_set_advertising_params *cp = data;
6224 BT_DBG("%s", hdev->name);
6226 if (!lmp_le_capable(hdev))
6227 return mgmt_cmd_status(sk, hdev->id,
6228 MGMT_OP_SET_ADVERTISING_PARAMS,
6229 MGMT_STATUS_NOT_SUPPORTED);
6231 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6232 return mgmt_cmd_status(sk, hdev->id,
6233 MGMT_OP_SET_ADVERTISING_PARAMS,
6236 min_interval = __le16_to_cpu(cp->interval_min);
6237 max_interval = __le16_to_cpu(cp->interval_max);
6239 if (min_interval > max_interval ||
6240 min_interval < 0x0020 || max_interval > 0x4000)
6241 return mgmt_cmd_status(sk, hdev->id,
6242 MGMT_OP_SET_ADVERTISING_PARAMS,
6243 MGMT_STATUS_INVALID_PARAMS);
6247 hdev->le_adv_min_interval = min_interval;
6248 hdev->le_adv_max_interval = max_interval;
6249 hdev->adv_filter_policy = cp->filter_policy;
6250 hdev->adv_type = cp->type;
6252 err = mgmt_cmd_complete(sk, hdev->id,
6253 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
6255 hci_dev_unlock(hdev);
6260 static void set_advertising_data_complete(struct hci_dev *hdev,
6261 u8 status, u16 opcode)
6263 struct mgmt_cp_set_advertising_data *cp;
6264 struct mgmt_pending_cmd *cmd;
6266 BT_DBG("status 0x%02x", status);
6270 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
6277 mgmt_cmd_status(cmd->sk, hdev->id,
6278 MGMT_OP_SET_ADVERTISING_DATA,
6279 mgmt_status(status));
6281 mgmt_cmd_complete(cmd->sk, hdev->id,
6282 MGMT_OP_SET_ADVERTISING_DATA, 0,
6285 mgmt_pending_remove(cmd);
6288 hci_dev_unlock(hdev);
6291 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
6292 void *data, u16 len)
6294 struct mgmt_pending_cmd *cmd;
6295 struct hci_request req;
6296 struct mgmt_cp_set_advertising_data *cp = data;
6297 struct hci_cp_le_set_adv_data adv;
6300 BT_DBG("%s", hdev->name);
6302 if (!lmp_le_capable(hdev)) {
6303 return mgmt_cmd_status(sk, hdev->id,
6304 MGMT_OP_SET_ADVERTISING_DATA,
6305 MGMT_STATUS_NOT_SUPPORTED);
6310 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
6311 err = mgmt_cmd_status(sk, hdev->id,
6312 MGMT_OP_SET_ADVERTISING_DATA,
6317 if (len > HCI_MAX_AD_LENGTH) {
6318 err = mgmt_cmd_status(sk, hdev->id,
6319 MGMT_OP_SET_ADVERTISING_DATA,
6320 MGMT_STATUS_INVALID_PARAMS);
6324 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
6331 hci_req_init(&req, hdev);
6333 memset(&adv, 0, sizeof(adv));
6334 memcpy(adv.data, cp->data, len);
6337 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
6339 err = hci_req_run(&req, set_advertising_data_complete);
6341 mgmt_pending_remove(cmd);
6344 hci_dev_unlock(hdev);
6349 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
6352 struct mgmt_cp_set_scan_rsp_data *cp;
6353 struct mgmt_pending_cmd *cmd;
6355 BT_DBG("status 0x%02x", status);
6359 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
6366 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6367 mgmt_status(status));
6369 mgmt_cmd_complete(cmd->sk, hdev->id,
6370 MGMT_OP_SET_SCAN_RSP_DATA, 0,
6373 mgmt_pending_remove(cmd);
6376 hci_dev_unlock(hdev);
6379 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
6382 struct mgmt_pending_cmd *cmd;
6383 struct hci_request req;
6384 struct mgmt_cp_set_scan_rsp_data *cp = data;
6385 struct hci_cp_le_set_scan_rsp_data rsp;
6388 BT_DBG("%s", hdev->name);
6390 if (!lmp_le_capable(hdev))
6391 return mgmt_cmd_status(sk, hdev->id,
6392 MGMT_OP_SET_SCAN_RSP_DATA,
6393 MGMT_STATUS_NOT_SUPPORTED);
6397 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
6398 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6403 if (len > HCI_MAX_AD_LENGTH) {
6404 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6405 MGMT_STATUS_INVALID_PARAMS);
6409 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
6415 hci_req_init(&req, hdev);
6417 memset(&rsp, 0, sizeof(rsp));
6418 memcpy(rsp.data, cp->data, len);
6421 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
6423 err = hci_req_run(&req, set_scan_rsp_data_complete);
6425 mgmt_pending_remove(cmd);
6428 hci_dev_unlock(hdev);
6433 /* Adv White List feature */
6434 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6436 struct mgmt_cp_add_dev_white_list *cp;
6437 struct mgmt_pending_cmd *cmd;
6439 BT_DBG("status 0x%02x", status);
6443 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
6450 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6451 mgmt_status(status));
6453 mgmt_cmd_complete(cmd->sk, hdev->id,
6454 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
6456 mgmt_pending_remove(cmd);
6459 hci_dev_unlock(hdev);
6462 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
6463 void *data, u16 len)
6465 struct mgmt_pending_cmd *cmd;
6466 struct mgmt_cp_add_dev_white_list *cp = data;
6467 struct hci_request req;
6470 BT_DBG("%s", hdev->name);
6472 if (!lmp_le_capable(hdev))
6473 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6474 MGMT_STATUS_NOT_SUPPORTED);
6476 if (!hdev_is_powered(hdev))
6477 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6478 MGMT_STATUS_REJECTED);
6482 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
6483 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6488 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
6494 hci_req_init(&req, hdev);
6496 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
6498 err = hci_req_run(&req, add_white_list_complete);
6500 mgmt_pending_remove(cmd);
6505 hci_dev_unlock(hdev);
6510 static void remove_from_white_list_complete(struct hci_dev *hdev,
6511 u8 status, u16 opcode)
6513 struct mgmt_cp_remove_dev_from_white_list *cp;
6514 struct mgmt_pending_cmd *cmd;
6516 BT_DBG("status 0x%02x", status);
6520 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
6527 mgmt_cmd_status(cmd->sk, hdev->id,
6528 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6529 mgmt_status(status));
6531 mgmt_cmd_complete(cmd->sk, hdev->id,
6532 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
6535 mgmt_pending_remove(cmd);
6538 hci_dev_unlock(hdev);
6541 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
6542 void *data, u16 len)
6544 struct mgmt_pending_cmd *cmd;
6545 struct mgmt_cp_remove_dev_from_white_list *cp = data;
6546 struct hci_request req;
6549 BT_DBG("%s", hdev->name);
6551 if (!lmp_le_capable(hdev))
6552 return mgmt_cmd_status(sk, hdev->id,
6553 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6554 MGMT_STATUS_NOT_SUPPORTED);
6556 if (!hdev_is_powered(hdev))
6557 return mgmt_cmd_status(sk, hdev->id,
6558 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6559 MGMT_STATUS_REJECTED);
6563 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
6564 err = mgmt_cmd_status(sk, hdev->id,
6565 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6570 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6577 hci_req_init(&req, hdev);
6579 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
6581 err = hci_req_run(&req, remove_from_white_list_complete);
6583 mgmt_pending_remove(cmd);
6588 hci_dev_unlock(hdev);
6593 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
6596 struct mgmt_pending_cmd *cmd;
6598 BT_DBG("status 0x%02x", status);
6602 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
6607 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6608 mgmt_status(status));
6610 mgmt_cmd_complete(cmd->sk, hdev->id,
6611 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6614 mgmt_pending_remove(cmd);
6617 hci_dev_unlock(hdev);
6620 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
6621 void *data, u16 len)
6623 struct mgmt_pending_cmd *cmd;
6624 struct hci_request req;
6627 BT_DBG("%s", hdev->name);
6629 if (!lmp_le_capable(hdev))
6630 return mgmt_cmd_status(sk, hdev->id,
6631 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6632 MGMT_STATUS_NOT_SUPPORTED);
6634 if (!hdev_is_powered(hdev))
6635 return mgmt_cmd_status(sk, hdev->id,
6636 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6637 MGMT_STATUS_REJECTED);
6641 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
6642 err = mgmt_cmd_status(sk, hdev->id,
6643 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6648 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6655 hci_req_init(&req, hdev);
6657 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
6659 err = hci_req_run(&req, clear_white_list_complete);
6661 mgmt_pending_remove(cmd);
6666 hci_dev_unlock(hdev);
6671 static void set_rssi_threshold_complete(struct hci_dev *hdev,
6672 u8 status, u16 opcode)
6674 struct mgmt_pending_cmd *cmd;
6676 BT_DBG("status 0x%02x", status);
6680 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6685 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6686 mgmt_status(status));
6688 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6691 mgmt_pending_remove(cmd);
6694 hci_dev_unlock(hdev);
6697 static void set_rssi_disable_complete(struct hci_dev *hdev,
6698 u8 status, u16 opcode)
6700 struct mgmt_pending_cmd *cmd;
6702 BT_DBG("status 0x%02x", status);
6706 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6711 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6712 mgmt_status(status));
6714 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6717 mgmt_pending_remove(cmd);
6720 hci_dev_unlock(hdev);
6723 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
6724 void *data, u16 len)
6727 struct hci_cp_set_rssi_threshold th = { 0, };
6728 struct mgmt_cp_set_enable_rssi *cp = data;
6729 struct hci_conn *conn;
6730 struct mgmt_pending_cmd *cmd;
6731 struct hci_request req;
6736 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6738 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6739 MGMT_STATUS_FAILED);
6743 if (!lmp_le_capable(hdev)) {
6744 mgmt_pending_remove(cmd);
6745 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6746 MGMT_STATUS_NOT_SUPPORTED);
6750 if (!hdev_is_powered(hdev)) {
6751 BT_DBG("%s", hdev->name);
6752 mgmt_pending_remove(cmd);
6753 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6754 MGMT_STATUS_NOT_POWERED);
6758 if (cp->link_type == 0x01)
6759 dest_type = LE_LINK;
6761 dest_type = ACL_LINK;
6763 /* Get LE/ACL link handle info */
6764 conn = hci_conn_hash_lookup_ba(hdev,
6765 dest_type, &cp->bdaddr);
6768 err = mgmt_cmd_complete(sk, hdev->id,
6769 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
6770 mgmt_pending_remove(cmd);
6774 hci_req_init(&req, hdev);
6776 th.hci_le_ext_opcode = 0x0B;
6778 th.conn_handle = conn->handle;
6779 th.alert_mask = 0x07;
6780 th.low_th = cp->low_th;
6781 th.in_range_th = cp->in_range_th;
6782 th.high_th = cp->high_th;
6784 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6785 err = hci_req_run(&req, set_rssi_threshold_complete);
6788 mgmt_pending_remove(cmd);
6789 BT_ERR("Error in requesting hci_req_run");
6794 hci_dev_unlock(hdev);
6798 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
6799 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6801 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
6802 struct mgmt_cp_set_enable_rssi *cp = data;
6803 struct mgmt_pending_cmd *cmd;
6808 mgmt_rp.status = rp->status;
6809 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6810 mgmt_rp.bt_address = cp->bdaddr;
6811 mgmt_rp.link_type = cp->link_type;
6813 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6814 MGMT_STATUS_SUCCESS, &mgmt_rp,
6815 sizeof(struct mgmt_cc_rsp_enable_rssi));
6817 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
6818 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6820 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
6821 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6822 &mgmt_rp.bt_address, true);
6826 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6828 mgmt_pending_remove(cmd);
6830 hci_dev_unlock(hdev);
6833 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
6834 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6836 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
6837 struct mgmt_cp_disable_rssi *cp = data;
6838 struct mgmt_pending_cmd *cmd;
6843 mgmt_rp.status = rp->status;
6844 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6845 mgmt_rp.bt_address = cp->bdaddr;
6846 mgmt_rp.link_type = cp->link_type;
6848 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6849 MGMT_STATUS_SUCCESS, &mgmt_rp,
6850 sizeof(struct mgmt_cc_rsp_enable_rssi));
6852 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
6853 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6855 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6856 &mgmt_rp.bt_address, false);
6860 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6862 mgmt_pending_remove(cmd);
6864 hci_dev_unlock(hdev);
6867 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
6868 void *data, u16 len)
6870 struct mgmt_pending_cmd *cmd;
6871 struct hci_request req;
6872 struct hci_cp_set_enable_rssi cp_en = { 0, };
6875 BT_DBG("Set Disable RSSI.");
6877 cp_en.hci_le_ext_opcode = 0x01;
6878 cp_en.le_enable_cs_Features = 0x00;
6879 cp_en.data[0] = 0x00;
6880 cp_en.data[1] = 0x00;
6881 cp_en.data[2] = 0x00;
6885 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6887 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6888 MGMT_STATUS_FAILED);
6892 if (!lmp_le_capable(hdev)) {
6893 mgmt_pending_remove(cmd);
6894 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6895 MGMT_STATUS_NOT_SUPPORTED);
6899 if (!hdev_is_powered(hdev)) {
6900 BT_DBG("%s", hdev->name);
6901 mgmt_pending_remove(cmd);
6902 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6903 MGMT_STATUS_NOT_POWERED);
6907 hci_req_init(&req, hdev);
6909 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6910 sizeof(struct hci_cp_set_enable_rssi),
6911 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6912 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6914 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6915 err = hci_req_run(&req, set_rssi_disable_complete);
6918 mgmt_pending_remove(cmd);
6919 BT_ERR("Error in requesting hci_req_run");
6924 hci_dev_unlock(hdev);
6928 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
6930 struct hci_cc_rsp_enable_rssi *rp = response;
6931 struct mgmt_pending_cmd *cmd_enable = NULL;
6932 struct mgmt_pending_cmd *cmd_disable = NULL;
6933 struct mgmt_cp_set_enable_rssi *cp_en;
6934 struct mgmt_cp_disable_rssi *cp_dis;
6937 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6938 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6939 hci_dev_unlock(hdev);
6942 BT_DBG("Enable Request");
6945 BT_DBG("Disable Request");
6948 cp_en = cmd_enable->param;
6953 switch (rp->le_ext_opcode) {
6955 BT_DBG("RSSI enabled.. Setting Threshold...");
6956 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
6957 cp_en, sizeof(*cp_en));
6961 BT_DBG("Sending RSSI enable success");
6962 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
6963 cp_en, rp, rp->status);
6967 } else if (cmd_disable) {
6968 cp_dis = cmd_disable->param;
6973 switch (rp->le_ext_opcode) {
6975 BT_DBG("Sending RSSI disable success");
6976 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6977 cp_dis, rp, rp->status);
6982 * Only unset RSSI Threshold values for the Link if
6983 * RSSI is monitored for other BREDR or LE Links
6985 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
6986 BT_DBG("Unset Threshold. Other links being monitored");
6987 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6988 cp_dis, rp, rp->status);
6990 BT_DBG("Unset Threshold. Disabling...");
6991 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
6992 cp_dis, sizeof(*cp_dis));
6999 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
7002 struct mgmt_pending_cmd *cmd;
7004 BT_DBG("status 0x%02x", status);
7008 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7013 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7014 mgmt_status(status));
7016 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7019 mgmt_pending_remove(cmd);
7022 hci_dev_unlock(hdev);
7025 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7026 void *data, u16 len)
7028 struct mgmt_pending_cmd *cmd;
7029 struct hci_request req;
7030 struct mgmt_cp_set_enable_rssi *cp = data;
7031 struct hci_cp_set_enable_rssi cp_en = { 0, };
7034 BT_DBG("Set Enable RSSI.");
7036 cp_en.hci_le_ext_opcode = 0x01;
7037 cp_en.le_enable_cs_Features = 0x04;
7038 cp_en.data[0] = 0x00;
7039 cp_en.data[1] = 0x00;
7040 cp_en.data[2] = 0x00;
7044 if (!lmp_le_capable(hdev)) {
7045 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7046 MGMT_STATUS_NOT_SUPPORTED);
7050 if (!hdev_is_powered(hdev)) {
7051 BT_DBG("%s", hdev->name);
7052 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7053 MGMT_STATUS_NOT_POWERED);
7057 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
7058 BT_DBG("%s", hdev->name);
7059 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7064 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
7067 BT_DBG("%s", hdev->name);
7072 /* If RSSI is already enabled directly set Threshold values */
7073 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
7074 hci_dev_unlock(hdev);
7075 BT_DBG("RSSI Enabled. Directly set Threshold");
7076 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
7080 hci_req_init(&req, hdev);
7082 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7083 sizeof(struct hci_cp_set_enable_rssi),
7084 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7085 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7087 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7088 err = hci_req_run(&req, set_rssi_enable_complete);
7091 mgmt_pending_remove(cmd);
7092 BT_ERR("Error in requesting hci_req_run");
7097 hci_dev_unlock(hdev);
7102 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7104 struct mgmt_pending_cmd *cmd;
7106 BT_DBG("status 0x%02x", status);
7110 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
7114 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7115 MGMT_STATUS_SUCCESS, &status, 1);
7117 mgmt_pending_remove(cmd);
7120 hci_dev_unlock(hdev);
7123 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
7126 struct mgmt_pending_cmd *cmd;
7127 struct hci_request req;
7128 struct mgmt_cp_get_raw_rssi *cp = data;
7129 struct hci_cp_get_raw_rssi hci_cp;
7131 struct hci_conn *conn;
7135 BT_DBG("Get Raw RSSI.");
7139 if (!lmp_le_capable(hdev)) {
7140 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7141 MGMT_STATUS_NOT_SUPPORTED);
7145 if (cp->link_type == 0x01)
7146 dest_type = LE_LINK;
7148 dest_type = ACL_LINK;
7150 /* Get LE/BREDR link handle info */
7151 conn = hci_conn_hash_lookup_ba(hdev,
7152 dest_type, &cp->bt_address);
7154 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7155 MGMT_STATUS_NOT_CONNECTED);
7158 hci_cp.conn_handle = conn->handle;
7160 if (!hdev_is_powered(hdev)) {
7161 BT_DBG("%s", hdev->name);
7162 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7163 MGMT_STATUS_NOT_POWERED);
7167 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
7168 BT_DBG("%s", hdev->name);
7169 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7174 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
7176 BT_DBG("%s", hdev->name);
7181 hci_req_init(&req, hdev);
7183 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
7184 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
7185 err = hci_req_run(&req, get_raw_rssi_complete);
7188 mgmt_pending_remove(cmd);
7189 BT_ERR("Error in requesting hci_req_run");
7193 hci_dev_unlock(hdev);
7198 void mgmt_raw_rssi_response(struct hci_dev *hdev,
7199 struct hci_cc_rp_get_raw_rssi *rp, int success)
7201 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
7202 struct hci_conn *conn;
7204 mgmt_rp.status = rp->status;
7205 mgmt_rp.rssi_dbm = rp->rssi_dbm;
7207 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
7211 bacpy(&mgmt_rp.bt_address, &conn->dst);
7212 if (conn->type == LE_LINK)
7213 mgmt_rp.link_type = 0x01;
7215 mgmt_rp.link_type = 0x00;
7217 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
7218 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
7221 static void set_disable_threshold_complete(struct hci_dev *hdev,
7222 u8 status, u16 opcode)
7224 struct mgmt_pending_cmd *cmd;
7226 BT_DBG("status 0x%02x", status);
7230 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7234 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7235 MGMT_STATUS_SUCCESS, &status, 1);
7237 mgmt_pending_remove(cmd);
7240 hci_dev_unlock(hdev);
7243 /** Removes monitoring for a link*/
7244 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
7245 void *data, u16 len)
7248 struct hci_cp_set_rssi_threshold th = { 0, };
7249 struct mgmt_cp_disable_rssi *cp = data;
7250 struct hci_conn *conn;
7251 struct mgmt_pending_cmd *cmd;
7252 struct hci_request req;
7255 BT_DBG("Set Disable RSSI.");
7259 if (!lmp_le_capable(hdev)) {
7260 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7261 MGMT_STATUS_NOT_SUPPORTED);
7265 /* Get LE/ACL link handle info*/
7266 if (cp->link_type == 0x01)
7267 dest_type = LE_LINK;
7269 dest_type = ACL_LINK;
7271 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
7273 err = mgmt_cmd_complete(sk, hdev->id,
7274 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
7278 th.hci_le_ext_opcode = 0x0B;
7280 th.conn_handle = conn->handle;
7281 th.alert_mask = 0x00;
7283 th.in_range_th = 0x00;
7286 if (!hdev_is_powered(hdev)) {
7287 BT_DBG("%s", hdev->name);
7288 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7293 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
7294 BT_DBG("%s", hdev->name);
7295 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7300 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
7303 BT_DBG("%s", hdev->name);
7308 hci_req_init(&req, hdev);
7310 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7311 err = hci_req_run(&req, set_disable_threshold_complete);
7313 mgmt_pending_remove(cmd);
7314 BT_ERR("Error in requesting hci_req_run");
7319 hci_dev_unlock(hdev);
7324 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
7325 s8 alert_type, s8 rssi_dbm)
7327 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
7328 struct hci_conn *conn;
7330 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
7331 conn_handle, alert_type, rssi_dbm);
7333 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
7336 BT_ERR("RSSI alert Error: Device not found for handle");
7339 bacpy(&mgmt_ev.bdaddr, &conn->dst);
7341 if (conn->type == LE_LINK)
7342 mgmt_ev.link_type = 0x01;
7344 mgmt_ev.link_type = 0x00;
7346 mgmt_ev.alert_type = alert_type;
7347 mgmt_ev.rssi_dbm = rssi_dbm;
7349 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
7350 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
7354 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
7356 struct mgmt_pending_cmd *cmd;
7360 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7362 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7366 type = hdev->le_discovery.type;
7368 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7369 mgmt_status(status), &type, sizeof(type));
7370 mgmt_pending_remove(cmd);
7375 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
7378 unsigned long timeout = 0;
7380 BT_DBG("status %d", status);
7384 mgmt_start_le_discovery_failed(hdev, status);
7385 hci_dev_unlock(hdev);
7390 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
7391 hci_dev_unlock(hdev);
7393 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
7394 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
7399 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
7402 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
7403 void *data, u16 len)
7405 struct mgmt_cp_start_le_discovery *cp = data;
7406 struct mgmt_pending_cmd *cmd;
7407 struct hci_cp_le_set_scan_param param_cp;
7408 struct hci_cp_le_set_scan_enable enable_cp;
7409 struct hci_request req;
7410 u8 status, own_addr_type;
7413 BT_DBG("%s", hdev->name);
7417 if (!hdev_is_powered(hdev)) {
7418 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7419 MGMT_STATUS_NOT_POWERED);
7423 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
7424 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7429 if (cp->type != DISCOV_TYPE_LE) {
7430 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7431 MGMT_STATUS_INVALID_PARAMS);
7435 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
7441 hdev->le_discovery.type = cp->type;
7443 hci_req_init(&req, hdev);
7445 status = mgmt_le_support(hdev);
7447 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7449 mgmt_pending_remove(cmd);
7453 /* If controller is scanning, it means the background scanning
7454 * is running. Thus, we should temporarily stop it in order to
7455 * set the discovery scanning parameters.
7457 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
7458 hci_req_add_le_scan_disable(&req, false);
7460 memset(¶m_cp, 0, sizeof(param_cp));
7462 /* All active scans will be done with either a resolvable
7463 * private address (when privacy feature has been enabled)
7464 * or unresolvable private address.
7466 err = hci_update_random_address(&req, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
7468 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7469 MGMT_STATUS_FAILED);
7470 mgmt_pending_remove(cmd);
7474 param_cp.type = hdev->le_scan_type;
7475 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
7476 param_cp.window = cpu_to_le16(hdev->le_scan_window);
7477 param_cp.own_address_type = own_addr_type;
7478 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
7481 memset(&enable_cp, 0, sizeof(enable_cp));
7482 enable_cp.enable = LE_SCAN_ENABLE;
7483 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
7485 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
7488 err = hci_req_run(&req, start_le_discovery_complete);
7490 mgmt_pending_remove(cmd);
7492 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
7495 hci_dev_unlock(hdev);
7499 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
7501 struct mgmt_pending_cmd *cmd;
7504 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7508 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7509 mgmt_status(status), &hdev->le_discovery.type,
7510 sizeof(hdev->le_discovery.type));
7511 mgmt_pending_remove(cmd);
7516 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
7519 BT_DBG("status %d", status);
7524 mgmt_stop_le_discovery_failed(hdev, status);
7528 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7531 hci_dev_unlock(hdev);
7534 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
7535 void *data, u16 len)
7537 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
7538 struct mgmt_pending_cmd *cmd;
7539 struct hci_request req;
7542 BT_DBG("%s", hdev->name);
7546 if (!hci_le_discovery_active(hdev)) {
7547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7548 MGMT_STATUS_REJECTED, &mgmt_cp->type,
7549 sizeof(mgmt_cp->type));
7553 if (hdev->le_discovery.type != mgmt_cp->type) {
7554 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7555 MGMT_STATUS_INVALID_PARAMS,
7556 &mgmt_cp->type, sizeof(mgmt_cp->type));
7560 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
7566 hci_req_init(&req, hdev);
7568 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
7569 BT_DBG("unknown le discovery state %u",
7570 hdev->le_discovery.state);
7572 mgmt_pending_remove(cmd);
7573 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7574 MGMT_STATUS_FAILED, &mgmt_cp->type,
7575 sizeof(mgmt_cp->type));
7579 cancel_delayed_work(&hdev->le_scan_disable);
7580 hci_req_add_le_scan_disable(&req, false);
7582 err = hci_req_run(&req, stop_le_discovery_complete);
7584 mgmt_pending_remove(cmd);
7586 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
7589 hci_dev_unlock(hdev);
7593 /* Separate LE discovery */
7594 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
7596 struct mgmt_ev_discovering ev;
7597 struct mgmt_pending_cmd *cmd;
7599 BT_DBG("%s le discovering %u", hdev->name, discovering);
7602 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7604 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7607 u8 type = hdev->le_discovery.type;
7609 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
7611 mgmt_pending_remove(cmd);
7614 memset(&ev, 0, sizeof(ev));
7615 ev.type = hdev->le_discovery.type;
7616 ev.discovering = discovering;
7618 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7621 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
7622 void *data, u16 len)
7626 BT_DBG("%s", hdev->name);
7630 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
7632 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
7634 hci_dev_unlock(hdev);
7639 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
7644 if (min > max || min < 6 || max > 3200)
7647 if (to_multiplier < 10 || to_multiplier > 3200)
7650 if (max >= to_multiplier * 8)
7653 max_latency = (to_multiplier * 8 / max) - 1;
7655 if (latency > 499 || latency > max_latency)
7661 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
7664 struct mgmt_cp_le_conn_update *cp = data;
7666 struct hci_conn *conn;
7667 u16 min, max, latency, supervision_timeout;
7670 if (!hdev_is_powered(hdev))
7671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7672 MGMT_STATUS_NOT_POWERED);
7674 min = __le16_to_cpu(cp->conn_interval_min);
7675 max = __le16_to_cpu(cp->conn_interval_max);
7676 latency = __le16_to_cpu(cp->conn_latency);
7677 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
7679 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
7680 min, max, latency, supervision_timeout);
7682 err = check_le_conn_update_param(min, max, latency,
7683 supervision_timeout);
7686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7687 MGMT_STATUS_INVALID_PARAMS);
7691 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
7693 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7694 MGMT_STATUS_NOT_CONNECTED);
7695 hci_dev_unlock(hdev);
7699 hci_dev_unlock(hdev);
7701 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
7703 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
7707 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
7710 struct mgmt_cp_set_manufacturer_data *cp;
7711 struct mgmt_pending_cmd *cmd;
7713 BT_DBG("status 0x%02x", status);
7717 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
7724 mgmt_cmd_status(cmd->sk, hdev->id,
7725 MGMT_OP_SET_MANUFACTURER_DATA,
7726 mgmt_status(status));
7728 mgmt_cmd_complete(cmd->sk, hdev->id,
7729 MGMT_OP_SET_MANUFACTURER_DATA, 0,
7732 mgmt_pending_remove(cmd);
7735 hci_dev_unlock(hdev);
7738 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
7739 void *data, u16 len)
7741 struct mgmt_pending_cmd *cmd;
7742 struct hci_request req;
7743 struct mgmt_cp_set_manufacturer_data *cp = data;
7744 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
7748 BT_DBG("%s", hdev->name);
7750 if (!lmp_bredr_capable(hdev))
7751 return mgmt_cmd_status(sk, hdev->id,
7752 MGMT_OP_SET_MANUFACTURER_DATA,
7753 MGMT_STATUS_NOT_SUPPORTED);
7755 if (cp->data[0] == 0 ||
7756 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
7757 return mgmt_cmd_status(sk, hdev->id,
7758 MGMT_OP_SET_MANUFACTURER_DATA,
7759 MGMT_STATUS_INVALID_PARAMS);
7761 if (cp->data[1] != 0xFF)
7762 return mgmt_cmd_status(sk, hdev->id,
7763 MGMT_OP_SET_MANUFACTURER_DATA,
7764 MGMT_STATUS_NOT_SUPPORTED);
7768 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
7769 err = mgmt_cmd_status(sk, hdev->id,
7770 MGMT_OP_SET_MANUFACTURER_DATA,
7775 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
7782 hci_req_init(&req, hdev);
7784 /* if new data is same as previous data then return command
7787 if (hdev->manufacturer_len == cp->data[0] - 1 &&
7788 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
7789 mgmt_pending_remove(cmd);
7790 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
7791 0, cp, sizeof(*cp));
7796 old_len = hdev->manufacturer_len;
7798 memcpy(old_data, hdev->manufacturer_data, old_len);
7800 hdev->manufacturer_len = cp->data[0] - 1;
7801 if (hdev->manufacturer_len > 0)
7802 memcpy(hdev->manufacturer_data, cp->data + 2,
7803 hdev->manufacturer_len);
7805 __hci_req_update_eir(&req);
7807 err = hci_req_run(&req, set_manufacturer_data_complete);
7809 mgmt_pending_remove(cmd);
7814 hci_dev_unlock(hdev);
7819 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
7820 hdev->manufacturer_len = old_len;
7821 if (hdev->manufacturer_len > 0)
7822 memcpy(hdev->manufacturer_data, old_data,
7823 hdev->manufacturer_len);
7824 hci_dev_unlock(hdev);
7828 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
7829 void *data, u16 len)
7831 struct mgmt_cp_le_set_scan_params *cp = data;
7832 __u16 interval, window;
7835 BT_DBG("%s", hdev->name);
7837 if (!lmp_le_capable(hdev))
7838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7839 MGMT_STATUS_NOT_SUPPORTED);
7841 interval = __le16_to_cpu(cp->interval);
7843 if (interval < 0x0004 || interval > 0x4000)
7844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7845 MGMT_STATUS_INVALID_PARAMS);
7847 window = __le16_to_cpu(cp->window);
7849 if (window < 0x0004 || window > 0x4000)
7850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7851 MGMT_STATUS_INVALID_PARAMS);
7853 if (window > interval)
7854 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7855 MGMT_STATUS_INVALID_PARAMS);
7859 hdev->le_scan_type = cp->type;
7860 hdev->le_scan_interval = interval;
7861 hdev->le_scan_window = window;
7863 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
7866 /* If background scan is running, restart it so new parameters are
7869 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
7870 hdev->discovery.state == DISCOVERY_STOPPED) {
7871 struct hci_request req;
7873 hci_req_init(&req, hdev);
7875 hci_req_add_le_scan_disable(&req, false);
7876 hci_req_add_le_passive_scan(&req);
7878 hci_req_run(&req, NULL);
7881 hci_dev_unlock(hdev);
7886 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
7887 void *data, u16 len)
7889 struct mgmt_cp_set_voice_setting *cp = data;
7890 struct hci_conn *conn;
7891 struct hci_conn *sco_conn;
7895 BT_DBG("%s", hdev->name);
7897 if (!lmp_bredr_capable(hdev)) {
7898 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
7899 MGMT_STATUS_NOT_SUPPORTED);
7904 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
7906 err = mgmt_cmd_complete(sk, hdev->id,
7907 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
7911 conn->voice_setting = cp->voice_setting;
7912 conn->sco_role = cp->sco_role;
7914 sco_conn = hci_conn_hash_lookup_sco(hdev);
7915 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
7916 BT_ERR("There is other SCO connection.");
7920 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
7921 if (conn->voice_setting == 0x0063)
7922 sco_connect_set_wbc(hdev);
7924 sco_connect_set_nbc(hdev);
7926 if (conn->voice_setting == 0x0063)
7927 sco_connect_set_gw_wbc(hdev);
7929 sco_connect_set_gw_nbc(hdev);
7933 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
7937 hci_dev_unlock(hdev);
7941 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
7942 void *data, u16 len)
7944 struct mgmt_rp_get_adv_tx_power *rp;
7948 BT_DBG("%s", hdev->name);
7952 rp_len = sizeof(*rp);
7953 rp = kmalloc(rp_len, GFP_KERNEL);
7959 rp->adv_tx_power = hdev->adv_tx_power;
7961 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
7967 hci_dev_unlock(hdev);
7972 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
7974 struct mgmt_ev_hardware_error ev;
7976 ev.error_code = err_code;
7977 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
7980 void mgmt_tx_timeout_error(struct hci_dev *hdev)
7982 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
7985 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
7986 u8 state_change_reason, u16 connection_handle)
7988 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
7990 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
7991 adv_instance, state_change_reason, connection_handle);
7993 mgmt_ev.adv_instance = adv_instance;
7994 mgmt_ev.state_change_reason = state_change_reason;
7995 mgmt_ev.connection_handle = connection_handle;
7997 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
7998 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
8002 static int enable_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8003 void *data, u16 len)
8006 struct mgmt_cp_enable_6lowpan *cp = data;
8008 BT_DBG("%s", hdev->name);
8012 if (!hdev_is_powered(hdev)) {
8013 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8014 MGMT_STATUS_NOT_POWERED);
8018 if (!lmp_le_capable(hdev)) {
8019 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8020 MGMT_STATUS_NOT_SUPPORTED);
8024 if (cp->enable_6lowpan)
8025 bt_6lowpan_enable();
8027 bt_6lowpan_disable();
8029 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8030 MGMT_STATUS_SUCCESS, NULL, 0);
8032 hci_dev_unlock(hdev);
8036 static int connect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8037 void *data, u16 len)
8039 struct mgmt_cp_connect_6lowpan *cp = data;
8040 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
8043 BT_DBG("%s", hdev->name);
8047 if (!lmp_le_capable(hdev)) {
8048 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8049 MGMT_STATUS_NOT_SUPPORTED);
8053 if (!hdev_is_powered(hdev)) {
8054 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8055 MGMT_STATUS_REJECTED);
8059 if (bdaddr_type_is_le(cp->addr.type)) {
8060 if (cp->addr.type == BDADDR_LE_PUBLIC)
8061 addr_type = ADDR_LE_DEV_PUBLIC;
8063 addr_type = ADDR_LE_DEV_RANDOM;
8065 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8066 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
8070 hci_dev_unlock(hdev);
8072 /* 6lowpan Connect */
8073 err = _bt_6lowpan_connect(&cp->addr.bdaddr, cp->addr.type);
8078 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8079 MGMT_STATUS_REJECTED, NULL, 0);
8084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
8087 hci_dev_unlock(hdev);
8091 static int disconnect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8092 void *data, u16 len)
8094 struct mgmt_cp_disconnect_6lowpan *cp = data;
8095 struct hci_conn *conn = NULL;
8096 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
8099 BT_DBG("%s", hdev->name);
8103 if (!lmp_le_capable(hdev)) {
8104 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
8105 MGMT_STATUS_NOT_SUPPORTED);
8109 if (!hdev_is_powered(hdev)) {
8110 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
8111 MGMT_STATUS_REJECTED);
8115 if (bdaddr_type_is_le(cp->addr.type)) {
8116 if (cp->addr.type == BDADDR_LE_PUBLIC)
8117 addr_type = ADDR_LE_DEV_PUBLIC;
8119 addr_type = ADDR_LE_DEV_RANDOM;
8121 err = mgmt_cmd_complete(sk, hdev->id,
8122 MGMT_OP_DISCONNECT_6LOWPAN,
8123 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
8127 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8129 err = mgmt_cmd_complete(sk, hdev->id,
8130 MGMT_OP_DISCONNECT_6LOWPAN,
8131 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
8135 if (conn->dst_type != addr_type) {
8136 err = mgmt_cmd_complete(sk, hdev->id,
8137 MGMT_OP_DISCONNECT_6LOWPAN,
8138 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
8142 if (conn->state != BT_CONNECTED) {
8143 err = mgmt_cmd_complete(sk, hdev->id,
8144 MGMT_OP_DISCONNECT_6LOWPAN,
8145 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
8149 /* 6lowpan Disconnect */
8150 err = _bt_6lowpan_disconnect(conn->l2cap_data, cp->addr.type);
8152 err = mgmt_cmd_complete(sk, hdev->id,
8153 MGMT_OP_DISCONNECT_6LOWPAN,
8154 MGMT_STATUS_REJECTED, NULL, 0);
8158 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
8162 hci_dev_unlock(hdev);
8166 void mgmt_6lowpan_conn_changed(struct hci_dev *hdev, char if_name[16],
8167 bdaddr_t *bdaddr, u8 addr_type, bool connected)
8170 struct mgmt_ev_6lowpan_conn_state_changed *ev = (void *)buf;
8173 memset(buf, 0, sizeof(buf));
8174 bacpy(&ev->addr.bdaddr, bdaddr);
8175 ev->addr.type = addr_type;
8176 ev->connected = connected;
8177 memcpy(ev->ifname, (__u8 *)if_name, 16);
8179 ev_size = sizeof(*ev);
8181 mgmt_event(MGMT_EV_6LOWPAN_CONN_STATE_CHANGED, hdev, ev, ev_size, NULL);
8184 void mgmt_le_read_maximum_data_length_complete(struct hci_dev *hdev, u8 status)
8186 struct mgmt_pending_cmd *cmd;
8187 struct mgmt_rp_le_read_maximum_data_length rp;
8189 BT_DBG("%s status %u", hdev->name, status);
8191 cmd = pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev);
8196 mgmt_cmd_status(cmd->sk, hdev->id,
8197 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8198 mgmt_status(status));
8200 memset(&rp, 0, sizeof(rp));
8202 rp.max_tx_octets = cpu_to_le16(hdev->le_max_tx_len);
8203 rp.max_tx_time = cpu_to_le16(hdev->le_max_tx_time);
8204 rp.max_rx_octets = cpu_to_le16(hdev->le_max_rx_len);
8205 rp.max_rx_time = cpu_to_le16(hdev->le_max_rx_time);
8207 mgmt_cmd_complete(cmd->sk, hdev->id,
8208 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, 0,
8211 mgmt_pending_remove(cmd);
8214 static int read_maximum_le_data_length(struct sock *sk,
8215 struct hci_dev *hdev, void *data, u16 len)
8217 struct mgmt_pending_cmd *cmd;
8220 BT_DBG("read_maximum_le_data_length %s", hdev->name);
8224 if (!hdev_is_powered(hdev)) {
8225 err = mgmt_cmd_status(sk, hdev->id,
8226 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8227 MGMT_STATUS_NOT_POWERED);
8231 if (!lmp_le_capable(hdev)) {
8232 err = mgmt_cmd_status(sk, hdev->id,
8233 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8234 MGMT_STATUS_NOT_SUPPORTED);
8238 if (pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev)) {
8239 err = mgmt_cmd_status(sk, hdev->id,
8240 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8245 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8252 err = hci_send_cmd(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
8254 mgmt_pending_remove(cmd);
8257 hci_dev_unlock(hdev);
8261 void mgmt_le_write_host_suggested_data_length_complete(struct hci_dev *hdev,
8264 struct mgmt_pending_cmd *cmd;
8266 BT_DBG("status 0x%02x", status);
8270 cmd = pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev);
8272 BT_ERR("cmd not found in the pending list");
8277 mgmt_cmd_status(cmd->sk, hdev->id,
8278 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8279 mgmt_status(status));
8281 mgmt_cmd_complete(cmd->sk, hdev->id,
8282 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8285 mgmt_pending_remove(cmd);
8288 hci_dev_unlock(hdev);
8291 static int write_host_suggested_le_data_length(struct sock *sk,
8292 struct hci_dev *hdev, void *data, u16 len)
8294 struct mgmt_pending_cmd *cmd;
8295 struct mgmt_cp_le_write_host_suggested_data_length *cp = data;
8296 struct hci_cp_le_write_def_data_len hci_data;
8299 BT_DBG("Write host suggested data length request for %s", hdev->name);
8303 if (!hdev_is_powered(hdev)) {
8304 err = mgmt_cmd_status(sk, hdev->id,
8305 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8306 MGMT_STATUS_NOT_POWERED);
8310 if (!lmp_le_capable(hdev)) {
8311 err = mgmt_cmd_status(sk, hdev->id,
8312 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8313 MGMT_STATUS_NOT_SUPPORTED);
8317 if (pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev)) {
8318 err = mgmt_cmd_status(sk, hdev->id,
8319 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8324 cmd = mgmt_pending_add(sk, MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8331 hci_data.tx_len = cp->def_tx_octets;
8332 hci_data.tx_time = cp->def_tx_time;
8334 err = hci_send_cmd(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
8335 sizeof(hci_data), &hci_data);
8337 mgmt_pending_remove(cmd);
8340 hci_dev_unlock(hdev);
8344 #endif /* TIZEN_BT */
8346 static bool ltk_is_valid(struct mgmt_ltk_info *key)
8348 if (key->initiator != 0x00 && key->initiator != 0x01)
8351 switch (key->addr.type) {
8352 case BDADDR_LE_PUBLIC:
8355 case BDADDR_LE_RANDOM:
8356 /* Two most significant bits shall be set */
8357 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
8365 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
8366 void *cp_data, u16 len)
8368 struct mgmt_cp_load_long_term_keys *cp = cp_data;
8369 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
8370 sizeof(struct mgmt_ltk_info));
8371 u16 key_count, expected_len;
8374 bt_dev_dbg(hdev, "sock %p", sk);
8376 if (!lmp_le_capable(hdev))
8377 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8378 MGMT_STATUS_NOT_SUPPORTED);
8380 key_count = __le16_to_cpu(cp->key_count);
8381 if (key_count > max_key_count) {
8382 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
8384 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8385 MGMT_STATUS_INVALID_PARAMS);
8388 expected_len = struct_size(cp, keys, key_count);
8389 if (expected_len != len) {
8390 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
8392 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8393 MGMT_STATUS_INVALID_PARAMS);
8396 bt_dev_dbg(hdev, "key_count %u", key_count);
8398 for (i = 0; i < key_count; i++) {
8399 struct mgmt_ltk_info *key = &cp->keys[i];
8401 if (!ltk_is_valid(key))
8402 return mgmt_cmd_status(sk, hdev->id,
8403 MGMT_OP_LOAD_LONG_TERM_KEYS,
8404 MGMT_STATUS_INVALID_PARAMS);
8409 hci_smp_ltks_clear(hdev);
8411 for (i = 0; i < key_count; i++) {
8412 struct mgmt_ltk_info *key = &cp->keys[i];
8413 u8 type, authenticated;
8415 if (hci_is_blocked_key(hdev,
8416 HCI_BLOCKED_KEY_TYPE_LTK,
8418 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
8423 switch (key->type) {
8424 case MGMT_LTK_UNAUTHENTICATED:
8425 authenticated = 0x00;
8426 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8428 case MGMT_LTK_AUTHENTICATED:
8429 authenticated = 0x01;
8430 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8432 case MGMT_LTK_P256_UNAUTH:
8433 authenticated = 0x00;
8434 type = SMP_LTK_P256;
8436 case MGMT_LTK_P256_AUTH:
8437 authenticated = 0x01;
8438 type = SMP_LTK_P256;
8440 case MGMT_LTK_P256_DEBUG:
8441 authenticated = 0x00;
8442 type = SMP_LTK_P256_DEBUG;
8448 hci_add_ltk(hdev, &key->addr.bdaddr,
8449 le_addr_type(key->addr.type), type, authenticated,
8450 key->val, key->enc_size, key->ediv, key->rand);
8453 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
8456 hci_dev_unlock(hdev);
8461 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8463 struct hci_conn *conn = cmd->user_data;
8464 struct mgmt_rp_get_conn_info rp;
8467 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8469 if (status == MGMT_STATUS_SUCCESS) {
8470 rp.rssi = conn->rssi;
8471 rp.tx_power = conn->tx_power;
8472 rp.max_tx_power = conn->max_tx_power;
8474 rp.rssi = HCI_RSSI_INVALID;
8475 rp.tx_power = HCI_TX_POWER_INVALID;
8476 rp.max_tx_power = HCI_TX_POWER_INVALID;
8479 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
8480 status, &rp, sizeof(rp));
8482 hci_conn_drop(conn);
8488 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
8491 struct hci_cp_read_rssi *cp;
8492 struct mgmt_pending_cmd *cmd;
8493 struct hci_conn *conn;
8497 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
8501 /* Commands sent in request are either Read RSSI or Read Transmit Power
8502 * Level so we check which one was last sent to retrieve connection
8503 * handle. Both commands have handle as first parameter so it's safe to
8504 * cast data on the same command struct.
8506 * First command sent is always Read RSSI and we fail only if it fails.
8507 * In other case we simply override error to indicate success as we
8508 * already remembered if TX power value is actually valid.
8510 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
8512 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
8513 status = MGMT_STATUS_SUCCESS;
8515 status = mgmt_status(hci_status);
8519 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
8523 handle = __le16_to_cpu(cp->handle);
8524 conn = hci_conn_hash_lookup_handle(hdev, handle);
8526 bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
8531 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
8535 cmd->cmd_complete(cmd, status);
8536 mgmt_pending_remove(cmd);
8539 hci_dev_unlock(hdev);
8542 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
8545 struct mgmt_cp_get_conn_info *cp = data;
8546 struct mgmt_rp_get_conn_info rp;
8547 struct hci_conn *conn;
8548 unsigned long conn_info_age;
8551 bt_dev_dbg(hdev, "sock %p", sk);
8553 memset(&rp, 0, sizeof(rp));
8554 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8555 rp.addr.type = cp->addr.type;
8557 if (!bdaddr_type_is_valid(cp->addr.type))
8558 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8559 MGMT_STATUS_INVALID_PARAMS,
8564 if (!hdev_is_powered(hdev)) {
8565 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8566 MGMT_STATUS_NOT_POWERED, &rp,
8571 if (cp->addr.type == BDADDR_BREDR)
8572 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8575 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8577 if (!conn || conn->state != BT_CONNECTED) {
8578 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8579 MGMT_STATUS_NOT_CONNECTED, &rp,
8584 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
8585 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8586 MGMT_STATUS_BUSY, &rp, sizeof(rp));
8590 /* To avoid client trying to guess when to poll again for information we
8591 * calculate conn info age as random value between min/max set in hdev.
8593 conn_info_age = hdev->conn_info_min_age +
8594 prandom_u32_max(hdev->conn_info_max_age -
8595 hdev->conn_info_min_age);
8597 /* Query controller to refresh cached values if they are too old or were
8600 if (time_after(jiffies, conn->conn_info_timestamp +
8601 msecs_to_jiffies(conn_info_age)) ||
8602 !conn->conn_info_timestamp) {
8603 struct hci_request req;
8604 struct hci_cp_read_tx_power req_txp_cp;
8605 struct hci_cp_read_rssi req_rssi_cp;
8606 struct mgmt_pending_cmd *cmd;
8608 hci_req_init(&req, hdev);
8609 req_rssi_cp.handle = cpu_to_le16(conn->handle);
8610 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
8613 /* For LE links TX power does not change thus we don't need to
8614 * query for it once value is known.
8616 if (!bdaddr_type_is_le(cp->addr.type) ||
8617 conn->tx_power == HCI_TX_POWER_INVALID) {
8618 req_txp_cp.handle = cpu_to_le16(conn->handle);
8619 req_txp_cp.type = 0x00;
8620 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8621 sizeof(req_txp_cp), &req_txp_cp);
8624 /* Max TX power needs to be read only once per connection */
8625 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
8626 req_txp_cp.handle = cpu_to_le16(conn->handle);
8627 req_txp_cp.type = 0x01;
8628 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8629 sizeof(req_txp_cp), &req_txp_cp);
8632 err = hci_req_run(&req, conn_info_refresh_complete);
8636 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
8643 hci_conn_hold(conn);
8644 cmd->user_data = hci_conn_get(conn);
8645 cmd->cmd_complete = conn_info_cmd_complete;
8647 conn->conn_info_timestamp = jiffies;
8649 /* Cache is valid, just reply with values cached in hci_conn */
8650 rp.rssi = conn->rssi;
8651 rp.tx_power = conn->tx_power;
8652 rp.max_tx_power = conn->max_tx_power;
8654 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8655 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8659 hci_dev_unlock(hdev);
8663 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8665 struct hci_conn *conn = cmd->user_data;
8666 struct mgmt_rp_get_clock_info rp;
8667 struct hci_dev *hdev;
8670 memset(&rp, 0, sizeof(rp));
8671 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8676 hdev = hci_dev_get(cmd->index);
8678 rp.local_clock = cpu_to_le32(hdev->clock);
8683 rp.piconet_clock = cpu_to_le32(conn->clock);
8684 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
8688 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
8692 hci_conn_drop(conn);
8699 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8701 struct hci_cp_read_clock *hci_cp;
8702 struct mgmt_pending_cmd *cmd;
8703 struct hci_conn *conn;
8705 bt_dev_dbg(hdev, "status %u", status);
8709 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
8713 if (hci_cp->which) {
8714 u16 handle = __le16_to_cpu(hci_cp->handle);
8715 conn = hci_conn_hash_lookup_handle(hdev, handle);
8720 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
8724 cmd->cmd_complete(cmd, mgmt_status(status));
8725 mgmt_pending_remove(cmd);
8728 hci_dev_unlock(hdev);
8731 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
8734 struct mgmt_cp_get_clock_info *cp = data;
8735 struct mgmt_rp_get_clock_info rp;
8736 struct hci_cp_read_clock hci_cp;
8737 struct mgmt_pending_cmd *cmd;
8738 struct hci_request req;
8739 struct hci_conn *conn;
8742 bt_dev_dbg(hdev, "sock %p", sk);
8744 memset(&rp, 0, sizeof(rp));
8745 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8746 rp.addr.type = cp->addr.type;
8748 if (cp->addr.type != BDADDR_BREDR)
8749 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8750 MGMT_STATUS_INVALID_PARAMS,
8755 if (!hdev_is_powered(hdev)) {
8756 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8757 MGMT_STATUS_NOT_POWERED, &rp,
8762 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8763 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8765 if (!conn || conn->state != BT_CONNECTED) {
8766 err = mgmt_cmd_complete(sk, hdev->id,
8767 MGMT_OP_GET_CLOCK_INFO,
8768 MGMT_STATUS_NOT_CONNECTED,
8776 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
8782 cmd->cmd_complete = clock_info_cmd_complete;
8784 hci_req_init(&req, hdev);
8786 memset(&hci_cp, 0, sizeof(hci_cp));
8787 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8790 hci_conn_hold(conn);
8791 cmd->user_data = hci_conn_get(conn);
8793 hci_cp.handle = cpu_to_le16(conn->handle);
8794 hci_cp.which = 0x01; /* Piconet clock */
8795 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8798 err = hci_req_run(&req, get_clock_info_complete);
8800 mgmt_pending_remove(cmd);
8803 hci_dev_unlock(hdev);
8807 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
8809 struct hci_conn *conn;
8811 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
8815 if (conn->dst_type != type)
8818 if (conn->state != BT_CONNECTED)
8824 /* This function requires the caller holds hdev->lock */
8825 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
8826 u8 addr_type, u8 auto_connect)
8828 struct hci_conn_params *params;
8830 params = hci_conn_params_add(hdev, addr, addr_type);
8834 if (params->auto_connect == auto_connect)
8837 list_del_init(¶ms->action);
8839 switch (auto_connect) {
8840 case HCI_AUTO_CONN_DISABLED:
8841 case HCI_AUTO_CONN_LINK_LOSS:
8842 /* If auto connect is being disabled when we're trying to
8843 * connect to device, keep connecting.
8845 if (params->explicit_connect)
8846 list_add(¶ms->action, &hdev->pend_le_conns);
8848 case HCI_AUTO_CONN_REPORT:
8849 if (params->explicit_connect)
8850 list_add(¶ms->action, &hdev->pend_le_conns);
8852 list_add(¶ms->action, &hdev->pend_le_reports);
8854 case HCI_AUTO_CONN_DIRECT:
8855 case HCI_AUTO_CONN_ALWAYS:
8856 if (!is_connected(hdev, addr, addr_type))
8857 list_add(¶ms->action, &hdev->pend_le_conns);
8861 params->auto_connect = auto_connect;
8863 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
8864 addr, addr_type, auto_connect);
8869 static void device_added(struct sock *sk, struct hci_dev *hdev,
8870 bdaddr_t *bdaddr, u8 type, u8 action)
8872 struct mgmt_ev_device_added ev;
8874 bacpy(&ev.addr.bdaddr, bdaddr);
8875 ev.addr.type = type;
8878 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
8881 static int add_device(struct sock *sk, struct hci_dev *hdev,
8882 void *data, u16 len)
8884 struct mgmt_cp_add_device *cp = data;
8885 u8 auto_conn, addr_type;
8886 struct hci_conn_params *params;
8888 u32 current_flags = 0;
8890 bt_dev_dbg(hdev, "sock %p", sk);
8892 if (!bdaddr_type_is_valid(cp->addr.type) ||
8893 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
8894 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8895 MGMT_STATUS_INVALID_PARAMS,
8896 &cp->addr, sizeof(cp->addr));
8898 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
8899 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8900 MGMT_STATUS_INVALID_PARAMS,
8901 &cp->addr, sizeof(cp->addr));
8905 if (cp->addr.type == BDADDR_BREDR) {
8906 /* Only incoming connections action is supported for now */
8907 if (cp->action != 0x01) {
8908 err = mgmt_cmd_complete(sk, hdev->id,
8910 MGMT_STATUS_INVALID_PARAMS,
8911 &cp->addr, sizeof(cp->addr));
8915 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
8921 hci_req_update_scan(hdev);
8926 addr_type = le_addr_type(cp->addr.type);
8928 if (cp->action == 0x02)
8929 auto_conn = HCI_AUTO_CONN_ALWAYS;
8930 else if (cp->action == 0x01)
8931 auto_conn = HCI_AUTO_CONN_DIRECT;
8933 auto_conn = HCI_AUTO_CONN_REPORT;
8935 /* Kernel internally uses conn_params with resolvable private
8936 * address, but Add Device allows only identity addresses.
8937 * Make sure it is enforced before calling
8938 * hci_conn_params_lookup.
8940 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8941 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8942 MGMT_STATUS_INVALID_PARAMS,
8943 &cp->addr, sizeof(cp->addr));
8947 /* If the connection parameters don't exist for this device,
8948 * they will be created and configured with defaults.
8950 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
8952 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8953 MGMT_STATUS_FAILED, &cp->addr,
8957 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8960 current_flags = params->current_flags;
8963 hci_update_background_scan(hdev);
8966 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
8967 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
8968 SUPPORTED_DEVICE_FLAGS(), current_flags);
8970 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8971 MGMT_STATUS_SUCCESS, &cp->addr,
8975 hci_dev_unlock(hdev);
8979 static void device_removed(struct sock *sk, struct hci_dev *hdev,
8980 bdaddr_t *bdaddr, u8 type)
8982 struct mgmt_ev_device_removed ev;
8984 bacpy(&ev.addr.bdaddr, bdaddr);
8985 ev.addr.type = type;
8987 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
8990 static int remove_device(struct sock *sk, struct hci_dev *hdev,
8991 void *data, u16 len)
8993 struct mgmt_cp_remove_device *cp = data;
8996 bt_dev_dbg(hdev, "sock %p", sk);
9000 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9001 struct hci_conn_params *params;
9004 if (!bdaddr_type_is_valid(cp->addr.type)) {
9005 err = mgmt_cmd_complete(sk, hdev->id,
9006 MGMT_OP_REMOVE_DEVICE,
9007 MGMT_STATUS_INVALID_PARAMS,
9008 &cp->addr, sizeof(cp->addr));
9012 if (cp->addr.type == BDADDR_BREDR) {
9013 err = hci_bdaddr_list_del(&hdev->accept_list,
9017 err = mgmt_cmd_complete(sk, hdev->id,
9018 MGMT_OP_REMOVE_DEVICE,
9019 MGMT_STATUS_INVALID_PARAMS,
9025 hci_req_update_scan(hdev);
9027 device_removed(sk, hdev, &cp->addr.bdaddr,
9032 addr_type = le_addr_type(cp->addr.type);
9034 /* Kernel internally uses conn_params with resolvable private
9035 * address, but Remove Device allows only identity addresses.
9036 * Make sure it is enforced before calling
9037 * hci_conn_params_lookup.
9039 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9040 err = mgmt_cmd_complete(sk, hdev->id,
9041 MGMT_OP_REMOVE_DEVICE,
9042 MGMT_STATUS_INVALID_PARAMS,
9043 &cp->addr, sizeof(cp->addr));
9047 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9050 err = mgmt_cmd_complete(sk, hdev->id,
9051 MGMT_OP_REMOVE_DEVICE,
9052 MGMT_STATUS_INVALID_PARAMS,
9053 &cp->addr, sizeof(cp->addr));
9057 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
9058 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
9059 err = mgmt_cmd_complete(sk, hdev->id,
9060 MGMT_OP_REMOVE_DEVICE,
9061 MGMT_STATUS_INVALID_PARAMS,
9062 &cp->addr, sizeof(cp->addr));
9066 list_del(¶ms->action);
9067 list_del(¶ms->list);
9069 hci_update_background_scan(hdev);
9071 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
9073 struct hci_conn_params *p, *tmp;
9074 struct bdaddr_list *b, *btmp;
9076 if (cp->addr.type) {
9077 err = mgmt_cmd_complete(sk, hdev->id,
9078 MGMT_OP_REMOVE_DEVICE,
9079 MGMT_STATUS_INVALID_PARAMS,
9080 &cp->addr, sizeof(cp->addr));
9084 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
9085 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
9090 hci_req_update_scan(hdev);
9092 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
9093 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
9095 device_removed(sk, hdev, &p->addr, p->addr_type);
9096 if (p->explicit_connect) {
9097 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
9100 list_del(&p->action);
9105 bt_dev_dbg(hdev, "All LE connection parameters were removed");
9107 hci_update_background_scan(hdev);
9111 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
9112 MGMT_STATUS_SUCCESS, &cp->addr,
9115 hci_dev_unlock(hdev);
9119 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
9122 struct mgmt_cp_load_conn_param *cp = data;
9123 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
9124 sizeof(struct mgmt_conn_param));
9125 u16 param_count, expected_len;
9128 if (!lmp_le_capable(hdev))
9129 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9130 MGMT_STATUS_NOT_SUPPORTED);
9132 param_count = __le16_to_cpu(cp->param_count);
9133 if (param_count > max_param_count) {
9134 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
9136 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9137 MGMT_STATUS_INVALID_PARAMS);
9140 expected_len = struct_size(cp, params, param_count);
9141 if (expected_len != len) {
9142 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
9144 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9145 MGMT_STATUS_INVALID_PARAMS);
9148 bt_dev_dbg(hdev, "param_count %u", param_count);
9152 hci_conn_params_clear_disabled(hdev);
9154 for (i = 0; i < param_count; i++) {
9155 struct mgmt_conn_param *param = &cp->params[i];
9156 struct hci_conn_params *hci_param;
9157 u16 min, max, latency, timeout;
9160 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
9163 if (param->addr.type == BDADDR_LE_PUBLIC) {
9164 addr_type = ADDR_LE_DEV_PUBLIC;
9165 } else if (param->addr.type == BDADDR_LE_RANDOM) {
9166 addr_type = ADDR_LE_DEV_RANDOM;
9168 bt_dev_err(hdev, "ignoring invalid connection parameters");
9172 min = le16_to_cpu(param->min_interval);
9173 max = le16_to_cpu(param->max_interval);
9174 latency = le16_to_cpu(param->latency);
9175 timeout = le16_to_cpu(param->timeout);
9177 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
9178 min, max, latency, timeout);
9180 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
9181 bt_dev_err(hdev, "ignoring invalid connection parameters");
9185 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
9188 bt_dev_err(hdev, "failed to add connection parameters");
9192 hci_param->conn_min_interval = min;
9193 hci_param->conn_max_interval = max;
9194 hci_param->conn_latency = latency;
9195 hci_param->supervision_timeout = timeout;
9198 hci_dev_unlock(hdev);
9200 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
9204 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
9205 void *data, u16 len)
9207 struct mgmt_cp_set_external_config *cp = data;
9211 bt_dev_dbg(hdev, "sock %p", sk);
9213 if (hdev_is_powered(hdev))
9214 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9215 MGMT_STATUS_REJECTED);
9217 if (cp->config != 0x00 && cp->config != 0x01)
9218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9219 MGMT_STATUS_INVALID_PARAMS);
9221 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
9222 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9223 MGMT_STATUS_NOT_SUPPORTED);
9228 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
9230 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
9232 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
9239 err = new_options(hdev, sk);
9241 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
9242 mgmt_index_removed(hdev);
9244 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
9245 hci_dev_set_flag(hdev, HCI_CONFIG);
9246 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9248 queue_work(hdev->req_workqueue, &hdev->power_on);
9250 set_bit(HCI_RAW, &hdev->flags);
9251 mgmt_index_added(hdev);
9256 hci_dev_unlock(hdev);
9260 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
9261 void *data, u16 len)
9263 struct mgmt_cp_set_public_address *cp = data;
9267 bt_dev_dbg(hdev, "sock %p", sk);
9269 if (hdev_is_powered(hdev))
9270 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9271 MGMT_STATUS_REJECTED);
9273 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
9274 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9275 MGMT_STATUS_INVALID_PARAMS);
9277 if (!hdev->set_bdaddr)
9278 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9279 MGMT_STATUS_NOT_SUPPORTED);
9283 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
9284 bacpy(&hdev->public_addr, &cp->bdaddr);
9286 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
9293 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
9294 err = new_options(hdev, sk);
9296 if (is_configured(hdev)) {
9297 mgmt_index_removed(hdev);
9299 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
9301 hci_dev_set_flag(hdev, HCI_CONFIG);
9302 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9304 queue_work(hdev->req_workqueue, &hdev->power_on);
9308 hci_dev_unlock(hdev);
9313 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
9317 struct mgmt_ev_device_name_update *ev = (void *)buf;
9323 bacpy(&ev->addr.bdaddr, bdaddr);
9324 ev->addr.type = BDADDR_BREDR;
9326 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9329 ev->eir_len = cpu_to_le16(eir_len);
9331 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
9332 sizeof(*ev) + eir_len, NULL);
9335 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9336 u8 link_type, u8 addr_type, u8 status)
9338 struct mgmt_ev_conn_update_failed ev;
9340 bacpy(&ev.addr.bdaddr, bdaddr);
9341 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9344 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
9345 &ev, sizeof(ev), NULL);
9348 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
9349 u8 link_type, u8 addr_type, u16 conn_interval,
9350 u16 conn_latency, u16 supervision_timeout)
9352 struct mgmt_ev_conn_updated ev;
9354 bacpy(&ev.addr.bdaddr, bdaddr);
9355 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9356 ev.conn_interval = cpu_to_le16(conn_interval);
9357 ev.conn_latency = cpu_to_le16(conn_latency);
9358 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
9360 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
9361 &ev, sizeof(ev), NULL);
9364 /* le device found event - Pass adv type */
9365 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9366 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
9367 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
9370 struct mgmt_ev_le_device_found *ev = (void *)buf;
9373 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
9376 /* Make sure that the buffer is big enough. The 5 extra bytes
9377 * are for the potential CoD field.
9379 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9382 memset(buf, 0, sizeof(buf));
9384 bacpy(&ev->addr.bdaddr, bdaddr);
9385 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9387 ev->flags = cpu_to_le32(flags);
9388 ev->adv_type = adv_type;
9391 memcpy(ev->eir, eir, eir_len);
9393 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
9394 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9397 if (scan_rsp_len > 0)
9398 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9400 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9401 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9403 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9407 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
9408 u16 opcode, struct sk_buff *skb)
9410 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
9411 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
9412 u8 *h192, *r192, *h256, *r256;
9413 struct mgmt_pending_cmd *cmd;
9417 bt_dev_dbg(hdev, "status %u", status);
9419 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
9423 mgmt_cp = cmd->param;
9426 status = mgmt_status(status);
9433 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
9434 struct hci_rp_read_local_oob_data *rp;
9436 if (skb->len != sizeof(*rp)) {
9437 status = MGMT_STATUS_FAILED;
9440 status = MGMT_STATUS_SUCCESS;
9441 rp = (void *)skb->data;
9443 eir_len = 5 + 18 + 18;
9450 struct hci_rp_read_local_oob_ext_data *rp;
9452 if (skb->len != sizeof(*rp)) {
9453 status = MGMT_STATUS_FAILED;
9456 status = MGMT_STATUS_SUCCESS;
9457 rp = (void *)skb->data;
9459 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
9460 eir_len = 5 + 18 + 18;
9464 eir_len = 5 + 18 + 18 + 18 + 18;
9474 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
9481 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
9482 hdev->dev_class, 3);
9485 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9486 EIR_SSP_HASH_C192, h192, 16);
9487 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9488 EIR_SSP_RAND_R192, r192, 16);
9492 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9493 EIR_SSP_HASH_C256, h256, 16);
9494 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9495 EIR_SSP_RAND_R256, r256, 16);
9499 mgmt_rp->type = mgmt_cp->type;
9500 mgmt_rp->eir_len = cpu_to_le16(eir_len);
9502 err = mgmt_cmd_complete(cmd->sk, hdev->id,
9503 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
9504 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
9505 if (err < 0 || status)
9508 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
9510 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9511 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
9512 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
9515 mgmt_pending_remove(cmd);
9518 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
9519 struct mgmt_cp_read_local_oob_ext_data *cp)
9521 struct mgmt_pending_cmd *cmd;
9522 struct hci_request req;
9525 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
9530 hci_req_init(&req, hdev);
9532 if (bredr_sc_enabled(hdev))
9533 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
9535 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
9537 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
9539 mgmt_pending_remove(cmd);
9546 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
9547 void *data, u16 data_len)
9549 struct mgmt_cp_read_local_oob_ext_data *cp = data;
9550 struct mgmt_rp_read_local_oob_ext_data *rp;
9553 u8 status, flags, role, addr[7], hash[16], rand[16];
9556 bt_dev_dbg(hdev, "sock %p", sk);
9558 if (hdev_is_powered(hdev)) {
9560 case BIT(BDADDR_BREDR):
9561 status = mgmt_bredr_support(hdev);
9567 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9568 status = mgmt_le_support(hdev);
9572 eir_len = 9 + 3 + 18 + 18 + 3;
9575 status = MGMT_STATUS_INVALID_PARAMS;
9580 status = MGMT_STATUS_NOT_POWERED;
9584 rp_len = sizeof(*rp) + eir_len;
9585 rp = kmalloc(rp_len, GFP_ATOMIC);
9596 case BIT(BDADDR_BREDR):
9597 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9598 err = read_local_ssp_oob_req(hdev, sk, cp);
9599 hci_dev_unlock(hdev);
9603 status = MGMT_STATUS_FAILED;
9606 eir_len = eir_append_data(rp->eir, eir_len,
9608 hdev->dev_class, 3);
9611 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9612 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
9613 smp_generate_oob(hdev, hash, rand) < 0) {
9614 hci_dev_unlock(hdev);
9615 status = MGMT_STATUS_FAILED;
9619 /* This should return the active RPA, but since the RPA
9620 * is only programmed on demand, it is really hard to fill
9621 * this in at the moment. For now disallow retrieving
9622 * local out-of-band data when privacy is in use.
9624 * Returning the identity address will not help here since
9625 * pairing happens before the identity resolving key is
9626 * known and thus the connection establishment happens
9627 * based on the RPA and not the identity address.
9629 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
9630 hci_dev_unlock(hdev);
9631 status = MGMT_STATUS_REJECTED;
9635 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
9636 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
9637 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
9638 bacmp(&hdev->static_addr, BDADDR_ANY))) {
9639 memcpy(addr, &hdev->static_addr, 6);
9642 memcpy(addr, &hdev->bdaddr, 6);
9646 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
9647 addr, sizeof(addr));
9649 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
9654 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
9655 &role, sizeof(role));
9657 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
9658 eir_len = eir_append_data(rp->eir, eir_len,
9660 hash, sizeof(hash));
9662 eir_len = eir_append_data(rp->eir, eir_len,
9664 rand, sizeof(rand));
9667 flags = mgmt_get_adv_discov_flags(hdev);
9669 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
9670 flags |= LE_AD_NO_BREDR;
9672 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
9673 &flags, sizeof(flags));
9677 hci_dev_unlock(hdev);
9679 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
9681 status = MGMT_STATUS_SUCCESS;
9684 rp->type = cp->type;
9685 rp->eir_len = cpu_to_le16(eir_len);
9687 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
9688 status, rp, sizeof(*rp) + eir_len);
9689 if (err < 0 || status)
9692 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9693 rp, sizeof(*rp) + eir_len,
9694 HCI_MGMT_OOB_DATA_EVENTS, sk);
9702 static u32 get_supported_adv_flags(struct hci_dev *hdev)
9706 flags |= MGMT_ADV_FLAG_CONNECTABLE;
9707 flags |= MGMT_ADV_FLAG_DISCOV;
9708 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
9709 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
9710 flags |= MGMT_ADV_FLAG_APPEARANCE;
9711 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
9712 flags |= MGMT_ADV_PARAM_DURATION;
9713 flags |= MGMT_ADV_PARAM_TIMEOUT;
9714 flags |= MGMT_ADV_PARAM_INTERVALS;
9715 flags |= MGMT_ADV_PARAM_TX_POWER;
9716 flags |= MGMT_ADV_PARAM_SCAN_RSP;
9718 /* In extended adv TX_POWER returned from Set Adv Param
9719 * will be always valid.
9721 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
9722 ext_adv_capable(hdev))
9723 flags |= MGMT_ADV_FLAG_TX_POWER;
9725 if (ext_adv_capable(hdev)) {
9726 flags |= MGMT_ADV_FLAG_SEC_1M;
9727 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
9728 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
9730 if (hdev->le_features[1] & HCI_LE_PHY_2M)
9731 flags |= MGMT_ADV_FLAG_SEC_2M;
9733 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
9734 flags |= MGMT_ADV_FLAG_SEC_CODED;
9740 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
9741 void *data, u16 data_len)
9743 struct mgmt_rp_read_adv_features *rp;
9746 struct adv_info *adv_instance;
9747 u32 supported_flags;
9750 bt_dev_dbg(hdev, "sock %p", sk);
9752 if (!lmp_le_capable(hdev))
9753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9754 MGMT_STATUS_REJECTED);
9756 /* Enabling the experimental LL Privay support disables support for
9759 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9761 MGMT_STATUS_NOT_SUPPORTED);
9765 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
9766 rp = kmalloc(rp_len, GFP_ATOMIC);
9768 hci_dev_unlock(hdev);
9772 supported_flags = get_supported_adv_flags(hdev);
9774 rp->supported_flags = cpu_to_le32(supported_flags);
9775 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
9776 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
9777 rp->max_instances = hdev->le_num_of_adv_sets;
9778 rp->num_instances = hdev->adv_instance_cnt;
9780 instance = rp->instance;
9781 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
9782 *instance = adv_instance->instance;
9786 hci_dev_unlock(hdev);
9788 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9789 MGMT_STATUS_SUCCESS, rp, rp_len);
9796 static u8 calculate_name_len(struct hci_dev *hdev)
9798 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
9800 return append_local_name(hdev, buf, 0);
9803 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
9806 u8 max_len = HCI_MAX_AD_LENGTH;
9809 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
9810 MGMT_ADV_FLAG_LIMITED_DISCOV |
9811 MGMT_ADV_FLAG_MANAGED_FLAGS))
9814 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
9817 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
9818 max_len -= calculate_name_len(hdev);
9820 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
9827 static bool flags_managed(u32 adv_flags)
9829 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
9830 MGMT_ADV_FLAG_LIMITED_DISCOV |
9831 MGMT_ADV_FLAG_MANAGED_FLAGS);
9834 static bool tx_power_managed(u32 adv_flags)
9836 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
9839 static bool name_managed(u32 adv_flags)
9841 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
9844 static bool appearance_managed(u32 adv_flags)
9846 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
9849 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
9850 u8 len, bool is_adv_data)
9855 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
9860 /* Make sure that the data is correctly formatted. */
9861 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
9867 if (data[i + 1] == EIR_FLAGS &&
9868 (!is_adv_data || flags_managed(adv_flags)))
9871 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
9874 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
9877 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
9880 if (data[i + 1] == EIR_APPEARANCE &&
9881 appearance_managed(adv_flags))
9884 /* If the current field length would exceed the total data
9885 * length, then it's invalid.
9887 if (i + cur_len >= len)
9894 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
9896 u32 supported_flags, phy_flags;
9898 /* The current implementation only supports a subset of the specified
9899 * flags. Also need to check mutual exclusiveness of sec flags.
9901 supported_flags = get_supported_adv_flags(hdev);
9902 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
9903 if (adv_flags & ~supported_flags ||
9904 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
9910 static bool adv_busy(struct hci_dev *hdev)
9912 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
9913 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
9914 pending_find(MGMT_OP_SET_LE, hdev) ||
9915 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
9916 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
9919 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
9922 struct mgmt_pending_cmd *cmd;
9923 struct mgmt_cp_add_advertising *cp;
9924 struct mgmt_rp_add_advertising rp;
9925 struct adv_info *adv_instance, *n;
9928 bt_dev_dbg(hdev, "status %u", status);
9932 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
9934 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
9936 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
9937 if (!adv_instance->pending)
9941 adv_instance->pending = false;
9945 instance = adv_instance->instance;
9947 if (hdev->cur_adv_instance == instance)
9948 cancel_adv_timeout(hdev);
9950 hci_remove_adv_instance(hdev, instance);
9951 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
9958 rp.instance = cp->instance;
9961 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9962 mgmt_status(status));
9964 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9965 mgmt_status(status), &rp, sizeof(rp));
9967 mgmt_pending_remove(cmd);
9970 hci_dev_unlock(hdev);
9973 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
9974 void *data, u16 data_len)
9976 struct mgmt_cp_add_advertising *cp = data;
9977 struct mgmt_rp_add_advertising rp;
9980 u16 timeout, duration;
9981 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
9982 u8 schedule_instance = 0;
9983 struct adv_info *next_instance;
9985 struct mgmt_pending_cmd *cmd;
9986 struct hci_request req;
9988 bt_dev_dbg(hdev, "sock %p", sk);
9990 status = mgmt_le_support(hdev);
9992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9995 /* Enabling the experimental LL Privay support disables support for
9998 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9999 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10000 MGMT_STATUS_NOT_SUPPORTED);
10002 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10003 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10004 MGMT_STATUS_INVALID_PARAMS);
10006 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
10007 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10008 MGMT_STATUS_INVALID_PARAMS);
10010 flags = __le32_to_cpu(cp->flags);
10011 timeout = __le16_to_cpu(cp->timeout);
10012 duration = __le16_to_cpu(cp->duration);
10014 if (!requested_adv_flags_are_valid(hdev, flags))
10015 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10016 MGMT_STATUS_INVALID_PARAMS);
10018 hci_dev_lock(hdev);
10020 if (timeout && !hdev_is_powered(hdev)) {
10021 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10022 MGMT_STATUS_REJECTED);
10026 if (adv_busy(hdev)) {
10027 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10032 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
10033 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
10034 cp->scan_rsp_len, false)) {
10035 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10036 MGMT_STATUS_INVALID_PARAMS);
10040 err = hci_add_adv_instance(hdev, cp->instance, flags,
10041 cp->adv_data_len, cp->data,
10043 cp->data + cp->adv_data_len,
10045 HCI_ADV_TX_POWER_NO_PREFERENCE,
10046 hdev->le_adv_min_interval,
10047 hdev->le_adv_max_interval);
10049 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10050 MGMT_STATUS_FAILED);
10054 /* Only trigger an advertising added event if a new instance was
10057 if (hdev->adv_instance_cnt > prev_instance_cnt)
10058 mgmt_advertising_added(sk, hdev, cp->instance);
10060 if (hdev->cur_adv_instance == cp->instance) {
10061 /* If the currently advertised instance is being changed then
10062 * cancel the current advertising and schedule the next
10063 * instance. If there is only one instance then the overridden
10064 * advertising data will be visible right away.
10066 cancel_adv_timeout(hdev);
10068 next_instance = hci_get_next_instance(hdev, cp->instance);
10070 schedule_instance = next_instance->instance;
10071 } else if (!hdev->adv_instance_timeout) {
10072 /* Immediately advertise the new instance if no other
10073 * instance is currently being advertised.
10075 schedule_instance = cp->instance;
10078 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
10079 * there is no instance to be advertised then we have no HCI
10080 * communication to make. Simply return.
10082 if (!hdev_is_powered(hdev) ||
10083 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10084 !schedule_instance) {
10085 rp.instance = cp->instance;
10086 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10087 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10091 /* We're good to go, update advertising data, parameters, and start
10094 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
10101 hci_req_init(&req, hdev);
10103 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
10106 err = hci_req_run(&req, add_advertising_complete);
10109 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10110 MGMT_STATUS_FAILED);
10111 mgmt_pending_remove(cmd);
10115 hci_dev_unlock(hdev);
10120 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
10123 struct mgmt_pending_cmd *cmd;
10124 struct mgmt_cp_add_ext_adv_params *cp;
10125 struct mgmt_rp_add_ext_adv_params rp;
10126 struct adv_info *adv_instance;
10129 BT_DBG("%s", hdev->name);
10131 hci_dev_lock(hdev);
10133 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
10138 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10142 rp.instance = cp->instance;
10143 rp.tx_power = adv_instance->tx_power;
10145 /* While we're at it, inform userspace of the available space for this
10146 * advertisement, given the flags that will be used.
10148 flags = __le32_to_cpu(cp->flags);
10149 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10150 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10153 /* If this advertisement was previously advertising and we
10154 * failed to update it, we signal that it has been removed and
10155 * delete its structure
10157 if (!adv_instance->pending)
10158 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
10160 hci_remove_adv_instance(hdev, cp->instance);
10162 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10163 mgmt_status(status));
10166 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10167 mgmt_status(status), &rp, sizeof(rp));
10172 mgmt_pending_remove(cmd);
10174 hci_dev_unlock(hdev);
10177 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
10178 void *data, u16 data_len)
10180 struct mgmt_cp_add_ext_adv_params *cp = data;
10181 struct mgmt_rp_add_ext_adv_params rp;
10182 struct mgmt_pending_cmd *cmd = NULL;
10183 struct adv_info *adv_instance;
10184 struct hci_request req;
10185 u32 flags, min_interval, max_interval;
10186 u16 timeout, duration;
10191 BT_DBG("%s", hdev->name);
10193 status = mgmt_le_support(hdev);
10195 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10198 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10199 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10200 MGMT_STATUS_INVALID_PARAMS);
10202 /* The purpose of breaking add_advertising into two separate MGMT calls
10203 * for params and data is to allow more parameters to be added to this
10204 * structure in the future. For this reason, we verify that we have the
10205 * bare minimum structure we know of when the interface was defined. Any
10206 * extra parameters we don't know about will be ignored in this request.
10208 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
10209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10210 MGMT_STATUS_INVALID_PARAMS);
10212 flags = __le32_to_cpu(cp->flags);
10214 if (!requested_adv_flags_are_valid(hdev, flags))
10215 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10216 MGMT_STATUS_INVALID_PARAMS);
10218 hci_dev_lock(hdev);
10220 /* In new interface, we require that we are powered to register */
10221 if (!hdev_is_powered(hdev)) {
10222 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10223 MGMT_STATUS_REJECTED);
10227 if (adv_busy(hdev)) {
10228 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10233 /* Parse defined parameters from request, use defaults otherwise */
10234 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
10235 __le16_to_cpu(cp->timeout) : 0;
10237 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
10238 __le16_to_cpu(cp->duration) :
10239 hdev->def_multi_adv_rotation_duration;
10241 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10242 __le32_to_cpu(cp->min_interval) :
10243 hdev->le_adv_min_interval;
10245 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10246 __le32_to_cpu(cp->max_interval) :
10247 hdev->le_adv_max_interval;
10249 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
10251 HCI_ADV_TX_POWER_NO_PREFERENCE;
10253 /* Create advertising instance with no advertising or response data */
10254 err = hci_add_adv_instance(hdev, cp->instance, flags,
10255 0, NULL, 0, NULL, timeout, duration,
10256 tx_power, min_interval, max_interval);
10259 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10260 MGMT_STATUS_FAILED);
10264 /* Submit request for advertising params if ext adv available */
10265 if (ext_adv_capable(hdev)) {
10266 hci_req_init(&req, hdev);
10267 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10269 /* Updating parameters of an active instance will return a
10270 * Command Disallowed error, so we must first disable the
10271 * instance if it is active.
10273 if (!adv_instance->pending)
10274 __hci_req_disable_ext_adv_instance(&req, cp->instance);
10276 __hci_req_setup_ext_adv_instance(&req, cp->instance);
10278 err = hci_req_run(&req, add_ext_adv_params_complete);
10281 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
10282 hdev, data, data_len);
10285 hci_remove_adv_instance(hdev, cp->instance);
10290 rp.instance = cp->instance;
10291 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
10292 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10293 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10294 err = mgmt_cmd_complete(sk, hdev->id,
10295 MGMT_OP_ADD_EXT_ADV_PARAMS,
10296 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10300 hci_dev_unlock(hdev);
10305 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
10308 struct mgmt_cp_add_ext_adv_data *cp = data;
10309 struct mgmt_rp_add_ext_adv_data rp;
10310 u8 schedule_instance = 0;
10311 struct adv_info *next_instance;
10312 struct adv_info *adv_instance;
10314 struct mgmt_pending_cmd *cmd;
10315 struct hci_request req;
10317 BT_DBG("%s", hdev->name);
10319 hci_dev_lock(hdev);
10321 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10323 if (!adv_instance) {
10324 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10325 MGMT_STATUS_INVALID_PARAMS);
10329 /* In new interface, we require that we are powered to register */
10330 if (!hdev_is_powered(hdev)) {
10331 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10332 MGMT_STATUS_REJECTED);
10333 goto clear_new_instance;
10336 if (adv_busy(hdev)) {
10337 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10339 goto clear_new_instance;
10342 /* Validate new data */
10343 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
10344 cp->adv_data_len, true) ||
10345 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
10346 cp->adv_data_len, cp->scan_rsp_len, false)) {
10347 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10348 MGMT_STATUS_INVALID_PARAMS);
10349 goto clear_new_instance;
10352 /* Set the data in the advertising instance */
10353 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
10354 cp->data, cp->scan_rsp_len,
10355 cp->data + cp->adv_data_len);
10357 /* We're good to go, update advertising data, parameters, and start
10361 hci_req_init(&req, hdev);
10363 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
10365 if (ext_adv_capable(hdev)) {
10366 __hci_req_update_adv_data(&req, cp->instance);
10367 __hci_req_update_scan_rsp_data(&req, cp->instance);
10368 __hci_req_enable_ext_advertising(&req, cp->instance);
10371 /* If using software rotation, determine next instance to use */
10373 if (hdev->cur_adv_instance == cp->instance) {
10374 /* If the currently advertised instance is being changed
10375 * then cancel the current advertising and schedule the
10376 * next instance. If there is only one instance then the
10377 * overridden advertising data will be visible right
10380 cancel_adv_timeout(hdev);
10382 next_instance = hci_get_next_instance(hdev,
10385 schedule_instance = next_instance->instance;
10386 } else if (!hdev->adv_instance_timeout) {
10387 /* Immediately advertise the new instance if no other
10388 * instance is currently being advertised.
10390 schedule_instance = cp->instance;
10393 /* If the HCI_ADVERTISING flag is set or there is no instance to
10394 * be advertised then we have no HCI communication to make.
10397 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10398 !schedule_instance) {
10399 if (adv_instance->pending) {
10400 mgmt_advertising_added(sk, hdev, cp->instance);
10401 adv_instance->pending = false;
10403 rp.instance = cp->instance;
10404 err = mgmt_cmd_complete(sk, hdev->id,
10405 MGMT_OP_ADD_EXT_ADV_DATA,
10406 MGMT_STATUS_SUCCESS, &rp,
10411 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
10415 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
10419 goto clear_new_instance;
10423 err = hci_req_run(&req, add_advertising_complete);
10426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10427 MGMT_STATUS_FAILED);
10428 mgmt_pending_remove(cmd);
10429 goto clear_new_instance;
10432 /* We were successful in updating data, so trigger advertising_added
10433 * event if this is an instance that wasn't previously advertising. If
10434 * a failure occurs in the requests we initiated, we will remove the
10435 * instance again in add_advertising_complete
10437 if (adv_instance->pending)
10438 mgmt_advertising_added(sk, hdev, cp->instance);
10442 clear_new_instance:
10443 hci_remove_adv_instance(hdev, cp->instance);
10446 hci_dev_unlock(hdev);
10451 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
10454 struct mgmt_pending_cmd *cmd;
10455 struct mgmt_cp_remove_advertising *cp;
10456 struct mgmt_rp_remove_advertising rp;
10458 bt_dev_dbg(hdev, "status %u", status);
10460 hci_dev_lock(hdev);
10462 /* A failure status here only means that we failed to disable
10463 * advertising. Otherwise, the advertising instance has been removed,
10464 * so report success.
10466 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
10471 rp.instance = cp->instance;
10473 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
10475 mgmt_pending_remove(cmd);
10478 hci_dev_unlock(hdev);
10481 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
10482 void *data, u16 data_len)
10484 struct mgmt_cp_remove_advertising *cp = data;
10485 struct mgmt_rp_remove_advertising rp;
10486 struct mgmt_pending_cmd *cmd;
10487 struct hci_request req;
10490 bt_dev_dbg(hdev, "sock %p", sk);
10492 /* Enabling the experimental LL Privay support disables support for
10495 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
10496 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
10497 MGMT_STATUS_NOT_SUPPORTED);
10499 hci_dev_lock(hdev);
10501 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
10502 err = mgmt_cmd_status(sk, hdev->id,
10503 MGMT_OP_REMOVE_ADVERTISING,
10504 MGMT_STATUS_INVALID_PARAMS);
10508 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
10509 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
10510 pending_find(MGMT_OP_SET_LE, hdev)) {
10511 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10516 if (list_empty(&hdev->adv_instances)) {
10517 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10518 MGMT_STATUS_INVALID_PARAMS);
10522 hci_req_init(&req, hdev);
10524 /* If we use extended advertising, instance is disabled and removed */
10525 if (ext_adv_capable(hdev)) {
10526 __hci_req_disable_ext_adv_instance(&req, cp->instance);
10527 __hci_req_remove_ext_adv_instance(&req, cp->instance);
10530 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
10532 if (list_empty(&hdev->adv_instances))
10533 __hci_req_disable_advertising(&req);
10535 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
10536 * flag is set or the device isn't powered then we have no HCI
10537 * communication to make. Simply return.
10539 if (skb_queue_empty(&req.cmd_q) ||
10540 !hdev_is_powered(hdev) ||
10541 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
10542 hci_req_purge(&req);
10543 rp.instance = cp->instance;
10544 err = mgmt_cmd_complete(sk, hdev->id,
10545 MGMT_OP_REMOVE_ADVERTISING,
10546 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10550 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
10557 err = hci_req_run(&req, remove_advertising_complete);
10559 mgmt_pending_remove(cmd);
10562 hci_dev_unlock(hdev);
10567 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
10568 void *data, u16 data_len)
10570 struct mgmt_cp_get_adv_size_info *cp = data;
10571 struct mgmt_rp_get_adv_size_info rp;
10572 u32 flags, supported_flags;
10575 bt_dev_dbg(hdev, "sock %p", sk);
10577 if (!lmp_le_capable(hdev))
10578 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10579 MGMT_STATUS_REJECTED);
10581 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10582 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10583 MGMT_STATUS_INVALID_PARAMS);
10585 flags = __le32_to_cpu(cp->flags);
10587 /* The current implementation only supports a subset of the specified
10590 supported_flags = get_supported_adv_flags(hdev);
10591 if (flags & ~supported_flags)
10592 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10593 MGMT_STATUS_INVALID_PARAMS);
10595 rp.instance = cp->instance;
10596 rp.flags = cp->flags;
10597 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10598 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10600 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10601 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10606 static const struct hci_mgmt_handler mgmt_handlers[] = {
10607 { NULL }, /* 0x0000 (no command) */
10608 { read_version, MGMT_READ_VERSION_SIZE,
10610 HCI_MGMT_UNTRUSTED },
10611 { read_commands, MGMT_READ_COMMANDS_SIZE,
10613 HCI_MGMT_UNTRUSTED },
10614 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
10616 HCI_MGMT_UNTRUSTED },
10617 { read_controller_info, MGMT_READ_INFO_SIZE,
10618 HCI_MGMT_UNTRUSTED },
10619 { set_powered, MGMT_SETTING_SIZE },
10620 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
10621 { set_connectable, MGMT_SETTING_SIZE },
10622 { set_fast_connectable, MGMT_SETTING_SIZE },
10623 { set_bondable, MGMT_SETTING_SIZE },
10624 { set_link_security, MGMT_SETTING_SIZE },
10625 { set_ssp, MGMT_SETTING_SIZE },
10626 { set_hs, MGMT_SETTING_SIZE },
10627 { set_le, MGMT_SETTING_SIZE },
10628 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
10629 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
10630 { add_uuid, MGMT_ADD_UUID_SIZE },
10631 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
10632 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
10633 HCI_MGMT_VAR_LEN },
10634 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
10635 HCI_MGMT_VAR_LEN },
10636 { disconnect, MGMT_DISCONNECT_SIZE },
10637 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
10638 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
10639 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
10640 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
10641 { pair_device, MGMT_PAIR_DEVICE_SIZE },
10642 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
10643 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
10644 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
10645 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
10646 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
10647 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
10648 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
10649 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
10650 HCI_MGMT_VAR_LEN },
10651 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
10652 { start_discovery, MGMT_START_DISCOVERY_SIZE },
10653 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
10654 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
10655 { block_device, MGMT_BLOCK_DEVICE_SIZE },
10656 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
10657 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
10658 { set_advertising, MGMT_SETTING_SIZE },
10659 { set_bredr, MGMT_SETTING_SIZE },
10660 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
10661 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
10662 { set_secure_conn, MGMT_SETTING_SIZE },
10663 { set_debug_keys, MGMT_SETTING_SIZE },
10664 { set_privacy, MGMT_SET_PRIVACY_SIZE },
10665 { load_irks, MGMT_LOAD_IRKS_SIZE,
10666 HCI_MGMT_VAR_LEN },
10667 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
10668 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
10669 { add_device, MGMT_ADD_DEVICE_SIZE },
10670 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
10671 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
10672 HCI_MGMT_VAR_LEN },
10673 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
10675 HCI_MGMT_UNTRUSTED },
10676 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
10677 HCI_MGMT_UNCONFIGURED |
10678 HCI_MGMT_UNTRUSTED },
10679 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
10680 HCI_MGMT_UNCONFIGURED },
10681 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
10682 HCI_MGMT_UNCONFIGURED },
10683 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
10684 HCI_MGMT_VAR_LEN },
10685 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
10686 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
10688 HCI_MGMT_UNTRUSTED },
10689 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
10690 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
10691 HCI_MGMT_VAR_LEN },
10692 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
10693 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
10694 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
10695 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
10696 HCI_MGMT_UNTRUSTED },
10697 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
10698 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
10699 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
10700 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
10701 HCI_MGMT_VAR_LEN },
10702 { set_wideband_speech, MGMT_SETTING_SIZE },
10703 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
10704 HCI_MGMT_UNTRUSTED },
10705 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
10706 HCI_MGMT_UNTRUSTED |
10707 HCI_MGMT_HDEV_OPTIONAL },
10708 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
10710 HCI_MGMT_HDEV_OPTIONAL },
10711 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
10712 HCI_MGMT_UNTRUSTED },
10713 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
10714 HCI_MGMT_VAR_LEN },
10715 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
10716 HCI_MGMT_UNTRUSTED },
10717 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
10718 HCI_MGMT_VAR_LEN },
10719 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
10720 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
10721 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
10722 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
10723 HCI_MGMT_VAR_LEN },
10724 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
10725 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
10726 HCI_MGMT_VAR_LEN },
10727 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
10728 HCI_MGMT_VAR_LEN },
10729 { add_adv_patterns_monitor_rssi,
10730 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
10731 HCI_MGMT_VAR_LEN },
10735 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
10736 { NULL }, /* 0x0000 (no command) */
10737 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
10738 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
10739 HCI_MGMT_VAR_LEN },
10740 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
10741 HCI_MGMT_VAR_LEN },
10742 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
10743 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
10744 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
10745 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
10746 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
10747 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
10748 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
10749 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
10750 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
10751 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
10752 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
10753 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
10754 { set_voice_setting, MGMT_SET_VOICE_SETTING_SIZE },
10755 { get_adv_tx_power, MGMT_GET_ADV_TX_POWER_SIZE },
10756 { enable_bt_6lowpan, MGMT_ENABLE_BT_6LOWPAN_SIZE },
10757 { connect_bt_6lowpan, MGMT_CONNECT_6LOWPAN_SIZE },
10758 { disconnect_bt_6lowpan, MGMT_DISCONNECT_6LOWPAN_SIZE },
10759 { read_maximum_le_data_length,
10760 MGMT_LE_READ_MAXIMUM_DATA_LENGTH_SIZE },
10761 { write_host_suggested_le_data_length,
10762 MGMT_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH_SIZE },
10766 void mgmt_index_added(struct hci_dev *hdev)
10768 struct mgmt_ev_ext_index ev;
10770 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10773 switch (hdev->dev_type) {
10775 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10776 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
10777 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10780 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
10781 HCI_MGMT_INDEX_EVENTS);
10792 ev.bus = hdev->bus;
10794 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
10795 HCI_MGMT_EXT_INDEX_EVENTS);
10798 void mgmt_index_removed(struct hci_dev *hdev)
10800 struct mgmt_ev_ext_index ev;
10801 u8 status = MGMT_STATUS_INVALID_INDEX;
10803 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10806 switch (hdev->dev_type) {
10808 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10810 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10811 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
10812 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10815 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
10816 HCI_MGMT_INDEX_EVENTS);
10827 ev.bus = hdev->bus;
10829 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
10830 HCI_MGMT_EXT_INDEX_EVENTS);
10833 /* This function requires the caller holds hdev->lock */
10834 static void restart_le_actions(struct hci_dev *hdev)
10836 struct hci_conn_params *p;
10838 list_for_each_entry(p, &hdev->le_conn_params, list) {
10839 /* Needed for AUTO_OFF case where might not "really"
10840 * have been powered off.
10842 list_del_init(&p->action);
10844 switch (p->auto_connect) {
10845 case HCI_AUTO_CONN_DIRECT:
10846 case HCI_AUTO_CONN_ALWAYS:
10847 list_add(&p->action, &hdev->pend_le_conns);
10849 case HCI_AUTO_CONN_REPORT:
10850 list_add(&p->action, &hdev->pend_le_reports);
10858 void mgmt_power_on(struct hci_dev *hdev, int err)
10860 struct cmd_lookup match = { NULL, hdev };
10862 bt_dev_dbg(hdev, "err %d", err);
10864 hci_dev_lock(hdev);
10867 restart_le_actions(hdev);
10868 hci_update_background_scan(hdev);
10871 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10873 new_settings(hdev, match.sk);
10876 sock_put(match.sk);
10878 hci_dev_unlock(hdev);
10881 void __mgmt_power_off(struct hci_dev *hdev)
10883 struct cmd_lookup match = { NULL, hdev };
10884 u8 status, zero_cod[] = { 0, 0, 0 };
10886 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10888 /* If the power off is because of hdev unregistration let
10889 * use the appropriate INVALID_INDEX status. Otherwise use
10890 * NOT_POWERED. We cover both scenarios here since later in
10891 * mgmt_index_removed() any hci_conn callbacks will have already
10892 * been triggered, potentially causing misleading DISCONNECTED
10893 * status responses.
10895 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
10896 status = MGMT_STATUS_INVALID_INDEX;
10898 status = MGMT_STATUS_NOT_POWERED;
10900 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10902 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
10903 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
10904 zero_cod, sizeof(zero_cod),
10905 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10906 ext_info_changed(hdev, NULL);
10909 new_settings(hdev, match.sk);
10912 sock_put(match.sk);
10915 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
10917 struct mgmt_pending_cmd *cmd;
10920 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10924 if (err == -ERFKILL)
10925 status = MGMT_STATUS_RFKILLED;
10927 status = MGMT_STATUS_FAILED;
10929 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
10931 mgmt_pending_remove(cmd);
10934 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
10937 struct mgmt_ev_new_link_key ev;
10939 memset(&ev, 0, sizeof(ev));
10941 ev.store_hint = persistent;
10942 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10943 ev.key.addr.type = BDADDR_BREDR;
10944 ev.key.type = key->type;
10945 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
10946 ev.key.pin_len = key->pin_len;
10948 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
10951 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
10953 switch (ltk->type) {
10955 case SMP_LTK_RESPONDER:
10956 if (ltk->authenticated)
10957 return MGMT_LTK_AUTHENTICATED;
10958 return MGMT_LTK_UNAUTHENTICATED;
10960 if (ltk->authenticated)
10961 return MGMT_LTK_P256_AUTH;
10962 return MGMT_LTK_P256_UNAUTH;
10963 case SMP_LTK_P256_DEBUG:
10964 return MGMT_LTK_P256_DEBUG;
10967 return MGMT_LTK_UNAUTHENTICATED;
10970 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
10972 struct mgmt_ev_new_long_term_key ev;
10974 memset(&ev, 0, sizeof(ev));
10976 /* Devices using resolvable or non-resolvable random addresses
10977 * without providing an identity resolving key don't require
10978 * to store long term keys. Their addresses will change the
10979 * next time around.
10981 * Only when a remote device provides an identity address
10982 * make sure the long term key is stored. If the remote
10983 * identity is known, the long term keys are internally
10984 * mapped to the identity address. So allow static random
10985 * and public addresses here.
10987 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10988 (key->bdaddr.b[5] & 0xc0) != 0xc0)
10989 ev.store_hint = 0x00;
10991 ev.store_hint = persistent;
10993 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10994 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
10995 ev.key.type = mgmt_ltk_type(key);
10996 ev.key.enc_size = key->enc_size;
10997 ev.key.ediv = key->ediv;
10998 ev.key.rand = key->rand;
11000 if (key->type == SMP_LTK)
11001 ev.key.initiator = 1;
11003 /* Make sure we copy only the significant bytes based on the
11004 * encryption key size, and set the rest of the value to zeroes.
11006 memcpy(ev.key.val, key->val, key->enc_size);
11007 memset(ev.key.val + key->enc_size, 0,
11008 sizeof(ev.key.val) - key->enc_size);
11010 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
11013 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
11015 struct mgmt_ev_new_irk ev;
11017 memset(&ev, 0, sizeof(ev));
11019 ev.store_hint = persistent;
11021 bacpy(&ev.rpa, &irk->rpa);
11022 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
11023 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
11024 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
11026 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
11029 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
11032 struct mgmt_ev_new_csrk ev;
11034 memset(&ev, 0, sizeof(ev));
11036 /* Devices using resolvable or non-resolvable random addresses
11037 * without providing an identity resolving key don't require
11038 * to store signature resolving keys. Their addresses will change
11039 * the next time around.
11041 * Only when a remote device provides an identity address
11042 * make sure the signature resolving key is stored. So allow
11043 * static random and public addresses here.
11045 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11046 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
11047 ev.store_hint = 0x00;
11049 ev.store_hint = persistent;
11051 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
11052 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
11053 ev.key.type = csrk->type;
11054 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
11056 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
11059 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
11060 u8 bdaddr_type, u8 store_hint, u16 min_interval,
11061 u16 max_interval, u16 latency, u16 timeout)
11063 struct mgmt_ev_new_conn_param ev;
11065 if (!hci_is_identity_address(bdaddr, bdaddr_type))
11068 memset(&ev, 0, sizeof(ev));
11069 bacpy(&ev.addr.bdaddr, bdaddr);
11070 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
11071 ev.store_hint = store_hint;
11072 ev.min_interval = cpu_to_le16(min_interval);
11073 ev.max_interval = cpu_to_le16(max_interval);
11074 ev.latency = cpu_to_le16(latency);
11075 ev.timeout = cpu_to_le16(timeout);
11077 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
11080 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
11081 u8 *name, u8 name_len)
11084 struct mgmt_ev_device_connected *ev = (void *) buf;
11088 bacpy(&ev->addr.bdaddr, &conn->dst);
11089 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11092 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
11094 ev->flags = __cpu_to_le32(flags);
11096 /* We must ensure that the EIR Data fields are ordered and
11097 * unique. Keep it simple for now and avoid the problem by not
11098 * adding any BR/EDR data to the LE adv.
11100 if (conn->le_adv_data_len > 0) {
11101 memcpy(&ev->eir[eir_len],
11102 conn->le_adv_data, conn->le_adv_data_len);
11103 eir_len = conn->le_adv_data_len;
11106 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
11109 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
11110 eir_len = eir_append_data(ev->eir, eir_len,
11112 conn->dev_class, 3);
11115 ev->eir_len = cpu_to_le16(eir_len);
11117 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
11118 sizeof(*ev) + eir_len, NULL);
11121 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
11123 struct sock **sk = data;
11125 cmd->cmd_complete(cmd, 0);
11130 mgmt_pending_remove(cmd);
11133 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
11135 struct hci_dev *hdev = data;
11136 struct mgmt_cp_unpair_device *cp = cmd->param;
11138 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
11140 cmd->cmd_complete(cmd, 0);
11141 mgmt_pending_remove(cmd);
11144 bool mgmt_powering_down(struct hci_dev *hdev)
11146 struct mgmt_pending_cmd *cmd;
11147 struct mgmt_mode *cp;
11149 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11160 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
11161 u8 link_type, u8 addr_type, u8 reason,
11162 bool mgmt_connected)
11164 struct mgmt_ev_device_disconnected ev;
11165 struct sock *sk = NULL;
11167 /* The connection is still in hci_conn_hash so test for 1
11168 * instead of 0 to know if this is the last one.
11170 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11171 cancel_delayed_work(&hdev->power_off);
11172 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11175 if (!mgmt_connected)
11178 if (link_type != ACL_LINK && link_type != LE_LINK)
11181 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
11183 bacpy(&ev.addr.bdaddr, bdaddr);
11184 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11185 ev.reason = reason;
11187 /* Report disconnects due to suspend */
11188 if (hdev->suspended)
11189 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
11191 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
11196 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11200 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
11201 u8 link_type, u8 addr_type, u8 status)
11203 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
11204 struct mgmt_cp_disconnect *cp;
11205 struct mgmt_pending_cmd *cmd;
11207 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11210 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
11216 if (bacmp(bdaddr, &cp->addr.bdaddr))
11219 if (cp->addr.type != bdaddr_type)
11222 cmd->cmd_complete(cmd, mgmt_status(status));
11223 mgmt_pending_remove(cmd);
11226 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11227 u8 addr_type, u8 status)
11229 struct mgmt_ev_connect_failed ev;
11231 /* The connection is still in hci_conn_hash so test for 1
11232 * instead of 0 to know if this is the last one.
11234 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11235 cancel_delayed_work(&hdev->power_off);
11236 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11239 bacpy(&ev.addr.bdaddr, bdaddr);
11240 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11241 ev.status = mgmt_status(status);
11243 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
11246 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
11248 struct mgmt_ev_pin_code_request ev;
11250 bacpy(&ev.addr.bdaddr, bdaddr);
11251 ev.addr.type = BDADDR_BREDR;
11252 ev.secure = secure;
11254 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
11257 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11260 struct mgmt_pending_cmd *cmd;
11262 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
11266 cmd->cmd_complete(cmd, mgmt_status(status));
11267 mgmt_pending_remove(cmd);
11270 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11273 struct mgmt_pending_cmd *cmd;
11275 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
11279 cmd->cmd_complete(cmd, mgmt_status(status));
11280 mgmt_pending_remove(cmd);
11283 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11284 u8 link_type, u8 addr_type, u32 value,
11287 struct mgmt_ev_user_confirm_request ev;
11289 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11291 bacpy(&ev.addr.bdaddr, bdaddr);
11292 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11293 ev.confirm_hint = confirm_hint;
11294 ev.value = cpu_to_le32(value);
11296 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
11300 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11301 u8 link_type, u8 addr_type)
11303 struct mgmt_ev_user_passkey_request ev;
11305 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11307 bacpy(&ev.addr.bdaddr, bdaddr);
11308 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11310 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
11314 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11315 u8 link_type, u8 addr_type, u8 status,
11318 struct mgmt_pending_cmd *cmd;
11320 cmd = pending_find(opcode, hdev);
11324 cmd->cmd_complete(cmd, mgmt_status(status));
11325 mgmt_pending_remove(cmd);
11330 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11331 u8 link_type, u8 addr_type, u8 status)
11333 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11334 status, MGMT_OP_USER_CONFIRM_REPLY);
11337 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11338 u8 link_type, u8 addr_type, u8 status)
11340 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11342 MGMT_OP_USER_CONFIRM_NEG_REPLY);
11345 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11346 u8 link_type, u8 addr_type, u8 status)
11348 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11349 status, MGMT_OP_USER_PASSKEY_REPLY);
11352 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11353 u8 link_type, u8 addr_type, u8 status)
11355 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11357 MGMT_OP_USER_PASSKEY_NEG_REPLY);
11360 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
11361 u8 link_type, u8 addr_type, u32 passkey,
11364 struct mgmt_ev_passkey_notify ev;
11366 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11368 bacpy(&ev.addr.bdaddr, bdaddr);
11369 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11370 ev.passkey = __cpu_to_le32(passkey);
11371 ev.entered = entered;
11373 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
11376 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
11378 struct mgmt_ev_auth_failed ev;
11379 struct mgmt_pending_cmd *cmd;
11380 u8 status = mgmt_status(hci_status);
11382 bacpy(&ev.addr.bdaddr, &conn->dst);
11383 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11384 ev.status = status;
11386 cmd = find_pairing(conn);
11388 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
11389 cmd ? cmd->sk : NULL);
11392 cmd->cmd_complete(cmd, status);
11393 mgmt_pending_remove(cmd);
11397 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
11399 struct cmd_lookup match = { NULL, hdev };
11403 u8 mgmt_err = mgmt_status(status);
11404 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
11405 cmd_status_rsp, &mgmt_err);
11409 if (test_bit(HCI_AUTH, &hdev->flags))
11410 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
11412 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
11414 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
11418 new_settings(hdev, match.sk);
11421 sock_put(match.sk);
11424 static void clear_eir(struct hci_request *req)
11426 struct hci_dev *hdev = req->hdev;
11427 struct hci_cp_write_eir cp;
11429 if (!lmp_ext_inq_capable(hdev))
11432 memset(hdev->eir, 0, sizeof(hdev->eir));
11434 memset(&cp, 0, sizeof(cp));
11436 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
11439 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
11441 struct cmd_lookup match = { NULL, hdev };
11442 struct hci_request req;
11443 bool changed = false;
11446 u8 mgmt_err = mgmt_status(status);
11448 if (enable && hci_dev_test_and_clear_flag(hdev,
11449 HCI_SSP_ENABLED)) {
11450 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
11451 new_settings(hdev, NULL);
11454 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
11460 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
11462 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
11464 changed = hci_dev_test_and_clear_flag(hdev,
11467 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
11470 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
11473 new_settings(hdev, match.sk);
11476 sock_put(match.sk);
11478 hci_req_init(&req, hdev);
11480 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
11481 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
11482 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
11483 sizeof(enable), &enable);
11484 __hci_req_update_eir(&req);
11489 hci_req_run(&req, NULL);
11492 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
11494 struct cmd_lookup *match = data;
11496 if (match->sk == NULL) {
11497 match->sk = cmd->sk;
11498 sock_hold(match->sk);
11502 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
11505 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
11507 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
11508 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
11509 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
11512 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
11513 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11514 ext_info_changed(hdev, NULL);
11518 sock_put(match.sk);
11521 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
11523 struct mgmt_cp_set_local_name ev;
11524 struct mgmt_pending_cmd *cmd;
11529 memset(&ev, 0, sizeof(ev));
11530 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
11531 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
11533 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
11535 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
11537 /* If this is a HCI command related to powering on the
11538 * HCI dev don't send any mgmt signals.
11540 if (pending_find(MGMT_OP_SET_POWERED, hdev))
11544 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
11545 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
11546 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
11549 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
11553 for (i = 0; i < uuid_count; i++) {
11554 if (!memcmp(uuid, uuids[i], 16))
11561 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
11565 while (parsed < eir_len) {
11566 u8 field_len = eir[0];
11570 if (field_len == 0)
11573 if (eir_len - parsed < field_len + 1)
11577 case EIR_UUID16_ALL:
11578 case EIR_UUID16_SOME:
11579 for (i = 0; i + 3 <= field_len; i += 2) {
11580 memcpy(uuid, bluetooth_base_uuid, 16);
11581 uuid[13] = eir[i + 3];
11582 uuid[12] = eir[i + 2];
11583 if (has_uuid(uuid, uuid_count, uuids))
11587 case EIR_UUID32_ALL:
11588 case EIR_UUID32_SOME:
11589 for (i = 0; i + 5 <= field_len; i += 4) {
11590 memcpy(uuid, bluetooth_base_uuid, 16);
11591 uuid[15] = eir[i + 5];
11592 uuid[14] = eir[i + 4];
11593 uuid[13] = eir[i + 3];
11594 uuid[12] = eir[i + 2];
11595 if (has_uuid(uuid, uuid_count, uuids))
11599 case EIR_UUID128_ALL:
11600 case EIR_UUID128_SOME:
11601 for (i = 0; i + 17 <= field_len; i += 16) {
11602 memcpy(uuid, eir + i + 2, 16);
11603 if (has_uuid(uuid, uuid_count, uuids))
11609 parsed += field_len + 1;
11610 eir += field_len + 1;
11616 static void restart_le_scan(struct hci_dev *hdev)
11618 /* If controller is not scanning we are done. */
11619 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
11622 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
11623 hdev->discovery.scan_start +
11624 hdev->discovery.scan_duration))
11627 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
11628 DISCOV_LE_RESTART_DELAY);
11631 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
11632 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11634 /* If a RSSI threshold has been specified, and
11635 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
11636 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
11637 * is set, let it through for further processing, as we might need to
11638 * restart the scan.
11640 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
11641 * the results are also dropped.
11643 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11644 (rssi == HCI_RSSI_INVALID ||
11645 (rssi < hdev->discovery.rssi &&
11646 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
11649 if (hdev->discovery.uuid_count != 0) {
11650 /* If a list of UUIDs is provided in filter, results with no
11651 * matching UUID should be dropped.
11653 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
11654 hdev->discovery.uuids) &&
11655 !eir_has_uuids(scan_rsp, scan_rsp_len,
11656 hdev->discovery.uuid_count,
11657 hdev->discovery.uuids))
11661 /* If duplicate filtering does not report RSSI changes, then restart
11662 * scanning to ensure updated result with updated RSSI values.
11664 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
11665 restart_le_scan(hdev);
11667 /* Validate RSSI value against the RSSI threshold once more. */
11668 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11669 rssi < hdev->discovery.rssi)
11676 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11677 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
11678 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11681 struct mgmt_ev_device_found *ev = (void *)buf;
11684 /* Don't send events for a non-kernel initiated discovery. With
11685 * LE one exception is if we have pend_le_reports > 0 in which
11686 * case we're doing passive scanning and want these events.
11688 if (!hci_discovery_active(hdev)) {
11689 if (link_type == ACL_LINK)
11691 if (link_type == LE_LINK &&
11692 list_empty(&hdev->pend_le_reports) &&
11693 !hci_is_adv_monitoring(hdev)) {
11698 if (hdev->discovery.result_filtering) {
11699 /* We are using service discovery */
11700 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
11705 if (hdev->discovery.limited) {
11706 /* Check for limited discoverable bit */
11708 if (!(dev_class[1] & 0x20))
11711 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
11712 if (!flags || !(flags[0] & LE_AD_LIMITED))
11717 /* Make sure that the buffer is big enough. The 5 extra bytes
11718 * are for the potential CoD field.
11720 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
11723 memset(buf, 0, sizeof(buf));
11725 /* In case of device discovery with BR/EDR devices (pre 1.2), the
11726 * RSSI value was reported as 0 when not available. This behavior
11727 * is kept when using device discovery. This is required for full
11728 * backwards compatibility with the API.
11730 * However when using service discovery, the value 127 will be
11731 * returned when the RSSI is not available.
11733 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
11734 link_type == ACL_LINK)
11737 bacpy(&ev->addr.bdaddr, bdaddr);
11738 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11740 ev->flags = cpu_to_le32(flags);
11743 /* Copy EIR or advertising data into event */
11744 memcpy(ev->eir, eir, eir_len);
11746 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11748 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11751 if (scan_rsp_len > 0)
11752 /* Append scan response data to event */
11753 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
11755 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
11756 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
11758 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
11761 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11762 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
11764 struct mgmt_ev_device_found *ev;
11765 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
11768 ev = (struct mgmt_ev_device_found *) buf;
11770 memset(buf, 0, sizeof(buf));
11772 bacpy(&ev->addr.bdaddr, bdaddr);
11773 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11776 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
11779 ev->eir_len = cpu_to_le16(eir_len);
11781 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
11784 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
11786 struct mgmt_ev_discovering ev;
11788 bt_dev_dbg(hdev, "discovering %u", discovering);
11790 memset(&ev, 0, sizeof(ev));
11791 ev.type = hdev->discovery.type;
11792 ev.discovering = discovering;
11794 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
11797 void mgmt_suspending(struct hci_dev *hdev, u8 state)
11799 struct mgmt_ev_controller_suspend ev;
11801 ev.suspend_state = state;
11802 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
11805 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
11808 struct mgmt_ev_controller_resume ev;
11810 ev.wake_reason = reason;
11812 bacpy(&ev.addr.bdaddr, bdaddr);
11813 ev.addr.type = addr_type;
11815 memset(&ev.addr, 0, sizeof(ev.addr));
11818 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
11821 static struct hci_mgmt_chan chan = {
11822 .channel = HCI_CHANNEL_CONTROL,
11823 .handler_count = ARRAY_SIZE(mgmt_handlers),
11824 .handlers = mgmt_handlers,
11826 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
11827 .tizen_handlers = tizen_mgmt_handlers,
11829 .hdev_init = mgmt_init_hdev,
11832 int mgmt_init(void)
11834 return hci_mgmt_chan_register(&chan);
11837 void mgmt_exit(void)
11839 hci_mgmt_chan_unregister(&chan);