2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
40 #include "hci_request.h"
42 #include "mgmt_util.h"
43 #include "mgmt_config.h"
46 #define MGMT_VERSION 1
47 #define MGMT_REVISION 21
49 static const u16 mgmt_commands[] = {
50 MGMT_OP_READ_INDEX_LIST,
53 MGMT_OP_SET_DISCOVERABLE,
54 MGMT_OP_SET_CONNECTABLE,
55 MGMT_OP_SET_FAST_CONNECTABLE,
57 MGMT_OP_SET_LINK_SECURITY,
61 MGMT_OP_SET_DEV_CLASS,
62 MGMT_OP_SET_LOCAL_NAME,
65 MGMT_OP_LOAD_LINK_KEYS,
66 MGMT_OP_LOAD_LONG_TERM_KEYS,
68 MGMT_OP_GET_CONNECTIONS,
69 MGMT_OP_PIN_CODE_REPLY,
70 MGMT_OP_PIN_CODE_NEG_REPLY,
71 MGMT_OP_SET_IO_CAPABILITY,
73 MGMT_OP_CANCEL_PAIR_DEVICE,
74 MGMT_OP_UNPAIR_DEVICE,
75 MGMT_OP_USER_CONFIRM_REPLY,
76 MGMT_OP_USER_CONFIRM_NEG_REPLY,
77 MGMT_OP_USER_PASSKEY_REPLY,
78 MGMT_OP_USER_PASSKEY_NEG_REPLY,
79 MGMT_OP_READ_LOCAL_OOB_DATA,
80 MGMT_OP_ADD_REMOTE_OOB_DATA,
81 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
82 MGMT_OP_START_DISCOVERY,
83 MGMT_OP_STOP_DISCOVERY,
86 MGMT_OP_UNBLOCK_DEVICE,
87 MGMT_OP_SET_DEVICE_ID,
88 MGMT_OP_SET_ADVERTISING,
90 MGMT_OP_SET_STATIC_ADDRESS,
91 MGMT_OP_SET_SCAN_PARAMS,
92 MGMT_OP_SET_SECURE_CONN,
93 MGMT_OP_SET_DEBUG_KEYS,
96 MGMT_OP_GET_CONN_INFO,
97 MGMT_OP_GET_CLOCK_INFO,
99 MGMT_OP_REMOVE_DEVICE,
100 MGMT_OP_LOAD_CONN_PARAM,
101 MGMT_OP_READ_UNCONF_INDEX_LIST,
102 MGMT_OP_READ_CONFIG_INFO,
103 MGMT_OP_SET_EXTERNAL_CONFIG,
104 MGMT_OP_SET_PUBLIC_ADDRESS,
105 MGMT_OP_START_SERVICE_DISCOVERY,
106 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
107 MGMT_OP_READ_EXT_INDEX_LIST,
108 MGMT_OP_READ_ADV_FEATURES,
109 MGMT_OP_ADD_ADVERTISING,
110 MGMT_OP_REMOVE_ADVERTISING,
111 MGMT_OP_GET_ADV_SIZE_INFO,
112 MGMT_OP_START_LIMITED_DISCOVERY,
113 MGMT_OP_READ_EXT_INFO,
114 MGMT_OP_SET_APPEARANCE,
115 MGMT_OP_GET_PHY_CONFIGURATION,
116 MGMT_OP_SET_PHY_CONFIGURATION,
117 MGMT_OP_SET_BLOCKED_KEYS,
118 MGMT_OP_SET_WIDEBAND_SPEECH,
119 MGMT_OP_READ_CONTROLLER_CAP,
120 MGMT_OP_READ_EXP_FEATURES_INFO,
121 MGMT_OP_SET_EXP_FEATURE,
122 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
123 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
124 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
125 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
126 MGMT_OP_GET_DEVICE_FLAGS,
127 MGMT_OP_SET_DEVICE_FLAGS,
128 MGMT_OP_READ_ADV_MONITOR_FEATURES,
129 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
130 MGMT_OP_REMOVE_ADV_MONITOR,
131 MGMT_OP_ADD_EXT_ADV_PARAMS,
132 MGMT_OP_ADD_EXT_ADV_DATA,
133 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
136 static const u16 mgmt_events[] = {
137 MGMT_EV_CONTROLLER_ERROR,
139 MGMT_EV_INDEX_REMOVED,
140 MGMT_EV_NEW_SETTINGS,
141 MGMT_EV_CLASS_OF_DEV_CHANGED,
142 MGMT_EV_LOCAL_NAME_CHANGED,
143 MGMT_EV_NEW_LINK_KEY,
144 MGMT_EV_NEW_LONG_TERM_KEY,
145 MGMT_EV_DEVICE_CONNECTED,
146 MGMT_EV_DEVICE_DISCONNECTED,
147 MGMT_EV_CONNECT_FAILED,
148 MGMT_EV_PIN_CODE_REQUEST,
149 MGMT_EV_USER_CONFIRM_REQUEST,
150 MGMT_EV_USER_PASSKEY_REQUEST,
152 MGMT_EV_DEVICE_FOUND,
154 MGMT_EV_DEVICE_BLOCKED,
155 MGMT_EV_DEVICE_UNBLOCKED,
156 MGMT_EV_DEVICE_UNPAIRED,
157 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_DEVICE_ADDED,
161 MGMT_EV_DEVICE_REMOVED,
162 MGMT_EV_NEW_CONN_PARAM,
163 MGMT_EV_UNCONF_INDEX_ADDED,
164 MGMT_EV_UNCONF_INDEX_REMOVED,
165 MGMT_EV_NEW_CONFIG_OPTIONS,
166 MGMT_EV_EXT_INDEX_ADDED,
167 MGMT_EV_EXT_INDEX_REMOVED,
168 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
169 MGMT_EV_ADVERTISING_ADDED,
170 MGMT_EV_ADVERTISING_REMOVED,
171 MGMT_EV_EXT_INFO_CHANGED,
172 MGMT_EV_PHY_CONFIGURATION_CHANGED,
173 MGMT_EV_EXP_FEATURE_CHANGED,
174 MGMT_EV_DEVICE_FLAGS_CHANGED,
175 MGMT_EV_ADV_MONITOR_ADDED,
176 MGMT_EV_ADV_MONITOR_REMOVED,
177 MGMT_EV_CONTROLLER_SUSPEND,
178 MGMT_EV_CONTROLLER_RESUME,
181 static const u16 mgmt_untrusted_commands[] = {
182 MGMT_OP_READ_INDEX_LIST,
184 MGMT_OP_READ_UNCONF_INDEX_LIST,
185 MGMT_OP_READ_CONFIG_INFO,
186 MGMT_OP_READ_EXT_INDEX_LIST,
187 MGMT_OP_READ_EXT_INFO,
188 MGMT_OP_READ_CONTROLLER_CAP,
189 MGMT_OP_READ_EXP_FEATURES_INFO,
190 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
194 static const u16 mgmt_untrusted_events[] = {
196 MGMT_EV_INDEX_REMOVED,
197 MGMT_EV_NEW_SETTINGS,
198 MGMT_EV_CLASS_OF_DEV_CHANGED,
199 MGMT_EV_LOCAL_NAME_CHANGED,
200 MGMT_EV_UNCONF_INDEX_ADDED,
201 MGMT_EV_UNCONF_INDEX_REMOVED,
202 MGMT_EV_NEW_CONFIG_OPTIONS,
203 MGMT_EV_EXT_INDEX_ADDED,
204 MGMT_EV_EXT_INDEX_REMOVED,
205 MGMT_EV_EXT_INFO_CHANGED,
206 MGMT_EV_EXP_FEATURE_CHANGED,
209 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 "\x00\x00\x00\x00\x00\x00\x00\x00"
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
217 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
218 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
219 MGMT_STATUS_FAILED, /* Hardware Failure */
220 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
221 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
222 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
223 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
224 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
225 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
226 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
227 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
228 MGMT_STATUS_BUSY, /* Command Disallowed */
229 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
230 MGMT_STATUS_REJECTED, /* Rejected Security */
231 MGMT_STATUS_REJECTED, /* Rejected Personal */
232 MGMT_STATUS_TIMEOUT, /* Host Timeout */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
234 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
235 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
236 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
237 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
238 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
239 MGMT_STATUS_BUSY, /* Repeated Attempts */
240 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
241 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
242 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
243 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
244 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
245 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
246 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
247 MGMT_STATUS_FAILED, /* Unspecified Error */
248 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
249 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
250 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
251 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
252 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
253 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
254 MGMT_STATUS_FAILED, /* Unit Link Key Used */
255 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
256 MGMT_STATUS_TIMEOUT, /* Instant Passed */
257 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
258 MGMT_STATUS_FAILED, /* Transaction Collision */
259 MGMT_STATUS_FAILED, /* Reserved for future use */
260 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
261 MGMT_STATUS_REJECTED, /* QoS Rejected */
262 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
263 MGMT_STATUS_REJECTED, /* Insufficient Security */
264 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
265 MGMT_STATUS_FAILED, /* Reserved for future use */
266 MGMT_STATUS_BUSY, /* Role Switch Pending */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_FAILED, /* Slot Violation */
269 MGMT_STATUS_FAILED, /* Role Switch Failed */
270 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
271 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
272 MGMT_STATUS_BUSY, /* Host Busy Pairing */
273 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
274 MGMT_STATUS_BUSY, /* Controller Busy */
275 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
276 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
277 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
278 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
279 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
282 static u8 mgmt_status(u8 hci_status)
284 if (hci_status < ARRAY_SIZE(mgmt_status_table))
285 return mgmt_status_table[hci_status];
287 return MGMT_STATUS_FAILED;
290 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
293 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
297 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
298 u16 len, int flag, struct sock *skip_sk)
300 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
304 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
305 struct sock *skip_sk)
307 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
308 HCI_SOCK_TRUSTED, skip_sk);
311 static u8 le_addr_type(u8 mgmt_addr_type)
313 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
314 return ADDR_LE_DEV_PUBLIC;
316 return ADDR_LE_DEV_RANDOM;
319 void mgmt_fill_version_info(void *ver)
321 struct mgmt_rp_read_version *rp = ver;
323 rp->version = MGMT_VERSION;
324 rp->revision = cpu_to_le16(MGMT_REVISION);
327 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
330 struct mgmt_rp_read_version rp;
332 bt_dev_dbg(hdev, "sock %p", sk);
334 mgmt_fill_version_info(&rp);
336 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
340 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
343 struct mgmt_rp_read_commands *rp;
344 u16 num_commands, num_events;
348 bt_dev_dbg(hdev, "sock %p", sk);
350 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
351 num_commands = ARRAY_SIZE(mgmt_commands);
352 num_events = ARRAY_SIZE(mgmt_events);
354 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
355 num_events = ARRAY_SIZE(mgmt_untrusted_events);
358 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
360 rp = kmalloc(rp_size, GFP_KERNEL);
364 rp->num_commands = cpu_to_le16(num_commands);
365 rp->num_events = cpu_to_le16(num_events);
367 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
368 __le16 *opcode = rp->opcodes;
370 for (i = 0; i < num_commands; i++, opcode++)
371 put_unaligned_le16(mgmt_commands[i], opcode);
373 for (i = 0; i < num_events; i++, opcode++)
374 put_unaligned_le16(mgmt_events[i], opcode);
376 __le16 *opcode = rp->opcodes;
378 for (i = 0; i < num_commands; i++, opcode++)
379 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
381 for (i = 0; i < num_events; i++, opcode++)
382 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
385 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
392 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
395 struct mgmt_rp_read_index_list *rp;
401 bt_dev_dbg(hdev, "sock %p", sk);
403 read_lock(&hci_dev_list_lock);
406 list_for_each_entry(d, &hci_dev_list, list) {
407 if (d->dev_type == HCI_PRIMARY &&
408 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
412 rp_len = sizeof(*rp) + (2 * count);
413 rp = kmalloc(rp_len, GFP_ATOMIC);
415 read_unlock(&hci_dev_list_lock);
420 list_for_each_entry(d, &hci_dev_list, list) {
421 if (hci_dev_test_flag(d, HCI_SETUP) ||
422 hci_dev_test_flag(d, HCI_CONFIG) ||
423 hci_dev_test_flag(d, HCI_USER_CHANNEL))
426 /* Devices marked as raw-only are neither configured
427 * nor unconfigured controllers.
429 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
432 if (d->dev_type == HCI_PRIMARY &&
433 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
434 rp->index[count++] = cpu_to_le16(d->id);
435 bt_dev_dbg(hdev, "Added hci%u", d->id);
439 rp->num_controllers = cpu_to_le16(count);
440 rp_len = sizeof(*rp) + (2 * count);
442 read_unlock(&hci_dev_list_lock);
444 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
452 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
453 void *data, u16 data_len)
455 struct mgmt_rp_read_unconf_index_list *rp;
461 bt_dev_dbg(hdev, "sock %p", sk);
463 read_lock(&hci_dev_list_lock);
466 list_for_each_entry(d, &hci_dev_list, list) {
467 if (d->dev_type == HCI_PRIMARY &&
468 hci_dev_test_flag(d, HCI_UNCONFIGURED))
472 rp_len = sizeof(*rp) + (2 * count);
473 rp = kmalloc(rp_len, GFP_ATOMIC);
475 read_unlock(&hci_dev_list_lock);
480 list_for_each_entry(d, &hci_dev_list, list) {
481 if (hci_dev_test_flag(d, HCI_SETUP) ||
482 hci_dev_test_flag(d, HCI_CONFIG) ||
483 hci_dev_test_flag(d, HCI_USER_CHANNEL))
486 /* Devices marked as raw-only are neither configured
487 * nor unconfigured controllers.
489 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
492 if (d->dev_type == HCI_PRIMARY &&
493 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
494 rp->index[count++] = cpu_to_le16(d->id);
495 bt_dev_dbg(hdev, "Added hci%u", d->id);
499 rp->num_controllers = cpu_to_le16(count);
500 rp_len = sizeof(*rp) + (2 * count);
502 read_unlock(&hci_dev_list_lock);
504 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
505 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
512 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
513 void *data, u16 data_len)
515 struct mgmt_rp_read_ext_index_list *rp;
520 bt_dev_dbg(hdev, "sock %p", sk);
522 read_lock(&hci_dev_list_lock);
525 list_for_each_entry(d, &hci_dev_list, list) {
526 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
530 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
532 read_unlock(&hci_dev_list_lock);
537 list_for_each_entry(d, &hci_dev_list, list) {
538 if (hci_dev_test_flag(d, HCI_SETUP) ||
539 hci_dev_test_flag(d, HCI_CONFIG) ||
540 hci_dev_test_flag(d, HCI_USER_CHANNEL))
543 /* Devices marked as raw-only are neither configured
544 * nor unconfigured controllers.
546 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
549 if (d->dev_type == HCI_PRIMARY) {
550 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
551 rp->entry[count].type = 0x01;
553 rp->entry[count].type = 0x00;
554 } else if (d->dev_type == HCI_AMP) {
555 rp->entry[count].type = 0x02;
560 rp->entry[count].bus = d->bus;
561 rp->entry[count++].index = cpu_to_le16(d->id);
562 bt_dev_dbg(hdev, "Added hci%u", d->id);
565 rp->num_controllers = cpu_to_le16(count);
567 read_unlock(&hci_dev_list_lock);
569 /* If this command is called at least once, then all the
570 * default index and unconfigured index events are disabled
571 * and from now on only extended index events are used.
573 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
574 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
575 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
577 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
578 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
579 struct_size(rp, entry, count));
586 static bool is_configured(struct hci_dev *hdev)
588 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
589 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
592 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
593 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
594 !bacmp(&hdev->public_addr, BDADDR_ANY))
600 static __le32 get_missing_options(struct hci_dev *hdev)
604 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
605 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
606 options |= MGMT_OPTION_EXTERNAL_CONFIG;
608 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
609 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
610 !bacmp(&hdev->public_addr, BDADDR_ANY))
611 options |= MGMT_OPTION_PUBLIC_ADDRESS;
613 return cpu_to_le32(options);
616 static int new_options(struct hci_dev *hdev, struct sock *skip)
618 __le32 options = get_missing_options(hdev);
620 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
621 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
624 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
626 __le32 options = get_missing_options(hdev);
628 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
632 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
633 void *data, u16 data_len)
635 struct mgmt_rp_read_config_info rp;
638 bt_dev_dbg(hdev, "sock %p", sk);
642 memset(&rp, 0, sizeof(rp));
643 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
645 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
646 options |= MGMT_OPTION_EXTERNAL_CONFIG;
648 if (hdev->set_bdaddr)
649 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651 rp.supported_options = cpu_to_le32(options);
652 rp.missing_options = get_missing_options(hdev);
654 hci_dev_unlock(hdev);
656 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
660 static u32 get_supported_phys(struct hci_dev *hdev)
662 u32 supported_phys = 0;
664 if (lmp_bredr_capable(hdev)) {
665 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
667 if (hdev->features[0][0] & LMP_3SLOT)
668 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
670 if (hdev->features[0][0] & LMP_5SLOT)
671 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
673 if (lmp_edr_2m_capable(hdev)) {
674 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
676 if (lmp_edr_3slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
679 if (lmp_edr_5slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
682 if (lmp_edr_3m_capable(hdev)) {
683 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
685 if (lmp_edr_3slot_capable(hdev))
686 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
688 if (lmp_edr_5slot_capable(hdev))
689 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
694 if (lmp_le_capable(hdev)) {
695 supported_phys |= MGMT_PHY_LE_1M_TX;
696 supported_phys |= MGMT_PHY_LE_1M_RX;
698 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
699 supported_phys |= MGMT_PHY_LE_2M_TX;
700 supported_phys |= MGMT_PHY_LE_2M_RX;
703 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
704 supported_phys |= MGMT_PHY_LE_CODED_TX;
705 supported_phys |= MGMT_PHY_LE_CODED_RX;
709 return supported_phys;
712 static u32 get_selected_phys(struct hci_dev *hdev)
714 u32 selected_phys = 0;
716 if (lmp_bredr_capable(hdev)) {
717 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
719 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
720 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
722 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
723 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
725 if (lmp_edr_2m_capable(hdev)) {
726 if (!(hdev->pkt_type & HCI_2DH1))
727 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
729 if (lmp_edr_3slot_capable(hdev) &&
730 !(hdev->pkt_type & HCI_2DH3))
731 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
733 if (lmp_edr_5slot_capable(hdev) &&
734 !(hdev->pkt_type & HCI_2DH5))
735 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
737 if (lmp_edr_3m_capable(hdev)) {
738 if (!(hdev->pkt_type & HCI_3DH1))
739 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
741 if (lmp_edr_3slot_capable(hdev) &&
742 !(hdev->pkt_type & HCI_3DH3))
743 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
745 if (lmp_edr_5slot_capable(hdev) &&
746 !(hdev->pkt_type & HCI_3DH5))
747 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
752 if (lmp_le_capable(hdev)) {
753 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
754 selected_phys |= MGMT_PHY_LE_1M_TX;
756 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
757 selected_phys |= MGMT_PHY_LE_1M_RX;
759 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
760 selected_phys |= MGMT_PHY_LE_2M_TX;
762 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
763 selected_phys |= MGMT_PHY_LE_2M_RX;
765 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
766 selected_phys |= MGMT_PHY_LE_CODED_TX;
768 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
769 selected_phys |= MGMT_PHY_LE_CODED_RX;
772 return selected_phys;
775 static u32 get_configurable_phys(struct hci_dev *hdev)
777 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
778 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
781 static u32 get_supported_settings(struct hci_dev *hdev)
785 settings |= MGMT_SETTING_POWERED;
786 settings |= MGMT_SETTING_BONDABLE;
787 settings |= MGMT_SETTING_DEBUG_KEYS;
788 settings |= MGMT_SETTING_CONNECTABLE;
789 settings |= MGMT_SETTING_DISCOVERABLE;
791 if (lmp_bredr_capable(hdev)) {
792 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
793 settings |= MGMT_SETTING_FAST_CONNECTABLE;
794 settings |= MGMT_SETTING_BREDR;
795 settings |= MGMT_SETTING_LINK_SECURITY;
797 if (lmp_ssp_capable(hdev)) {
798 settings |= MGMT_SETTING_SSP;
799 if (IS_ENABLED(CONFIG_BT_HS))
800 settings |= MGMT_SETTING_HS;
803 if (lmp_sc_capable(hdev))
804 settings |= MGMT_SETTING_SECURE_CONN;
806 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
808 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
811 if (lmp_le_capable(hdev)) {
812 settings |= MGMT_SETTING_LE;
813 settings |= MGMT_SETTING_SECURE_CONN;
814 settings |= MGMT_SETTING_PRIVACY;
815 settings |= MGMT_SETTING_STATIC_ADDRESS;
817 /* When the experimental feature for LL Privacy support is
818 * enabled, then advertising is no longer supported.
820 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
821 settings |= MGMT_SETTING_ADVERTISING;
824 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
826 settings |= MGMT_SETTING_CONFIGURATION;
828 settings |= MGMT_SETTING_PHY_CONFIGURATION;
833 static u32 get_current_settings(struct hci_dev *hdev)
837 if (hdev_is_powered(hdev))
838 settings |= MGMT_SETTING_POWERED;
840 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
841 settings |= MGMT_SETTING_CONNECTABLE;
843 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
844 settings |= MGMT_SETTING_FAST_CONNECTABLE;
846 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
847 settings |= MGMT_SETTING_DISCOVERABLE;
849 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
850 settings |= MGMT_SETTING_BONDABLE;
852 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
853 settings |= MGMT_SETTING_BREDR;
855 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
856 settings |= MGMT_SETTING_LE;
858 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
859 settings |= MGMT_SETTING_LINK_SECURITY;
861 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
862 settings |= MGMT_SETTING_SSP;
864 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
865 settings |= MGMT_SETTING_HS;
867 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
868 settings |= MGMT_SETTING_ADVERTISING;
870 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
871 settings |= MGMT_SETTING_SECURE_CONN;
873 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
874 settings |= MGMT_SETTING_DEBUG_KEYS;
876 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
877 settings |= MGMT_SETTING_PRIVACY;
879 /* The current setting for static address has two purposes. The
880 * first is to indicate if the static address will be used and
881 * the second is to indicate if it is actually set.
883 * This means if the static address is not configured, this flag
884 * will never be set. If the address is configured, then if the
885 * address is actually used decides if the flag is set or not.
887 * For single mode LE only controllers and dual-mode controllers
888 * with BR/EDR disabled, the existence of the static address will
891 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
892 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
893 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
894 if (bacmp(&hdev->static_addr, BDADDR_ANY))
895 settings |= MGMT_SETTING_STATIC_ADDRESS;
898 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
899 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
904 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
906 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
909 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
910 struct hci_dev *hdev,
913 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
916 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
918 struct mgmt_pending_cmd *cmd;
920 /* If there's a pending mgmt command the flags will not yet have
921 * their final values, so check for this first.
923 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
925 struct mgmt_mode *cp = cmd->param;
927 return LE_AD_GENERAL;
928 else if (cp->val == 0x02)
929 return LE_AD_LIMITED;
931 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
932 return LE_AD_LIMITED;
933 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
934 return LE_AD_GENERAL;
940 bool mgmt_get_connectable(struct hci_dev *hdev)
942 struct mgmt_pending_cmd *cmd;
944 /* If there's a pending mgmt command the flag will not yet have
945 * it's final value, so check for this first.
947 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
949 struct mgmt_mode *cp = cmd->param;
954 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
957 static void service_cache_off(struct work_struct *work)
959 struct hci_dev *hdev = container_of(work, struct hci_dev,
961 struct hci_request req;
963 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
966 hci_req_init(&req, hdev);
970 __hci_req_update_eir(&req);
971 __hci_req_update_class(&req);
973 hci_dev_unlock(hdev);
975 hci_req_run(&req, NULL);
978 static void rpa_expired(struct work_struct *work)
980 struct hci_dev *hdev = container_of(work, struct hci_dev,
982 struct hci_request req;
984 bt_dev_dbg(hdev, "");
986 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
988 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
991 /* The generation of a new RPA and programming it into the
992 * controller happens in the hci_req_enable_advertising()
995 hci_req_init(&req, hdev);
996 if (ext_adv_capable(hdev))
997 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
999 __hci_req_enable_advertising(&req);
1000 hci_req_run(&req, NULL);
1003 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1005 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1008 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1009 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1011 /* Non-mgmt controlled devices get this bit set
1012 * implicitly so that pairing works for them, however
1013 * for mgmt we require user-space to explicitly enable
1016 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1019 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1020 void *data, u16 data_len)
1022 struct mgmt_rp_read_info rp;
1024 bt_dev_dbg(hdev, "sock %p", sk);
1028 memset(&rp, 0, sizeof(rp));
1030 bacpy(&rp.bdaddr, &hdev->bdaddr);
1032 rp.version = hdev->hci_ver;
1033 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1035 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1036 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1038 memcpy(rp.dev_class, hdev->dev_class, 3);
1040 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1041 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1043 hci_dev_unlock(hdev);
1045 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1049 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1054 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1055 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1056 hdev->dev_class, 3);
1058 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1059 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1062 name_len = strlen(hdev->dev_name);
1063 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1064 hdev->dev_name, name_len);
1066 name_len = strlen(hdev->short_name);
1067 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1068 hdev->short_name, name_len);
1073 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1074 void *data, u16 data_len)
1077 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1080 bt_dev_dbg(hdev, "sock %p", sk);
1082 memset(&buf, 0, sizeof(buf));
1086 bacpy(&rp->bdaddr, &hdev->bdaddr);
1088 rp->version = hdev->hci_ver;
1089 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1091 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1092 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1095 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1096 rp->eir_len = cpu_to_le16(eir_len);
1098 hci_dev_unlock(hdev);
1100 /* If this command is called at least once, then the events
1101 * for class of device and local name changes are disabled
1102 * and only the new extended controller information event
1105 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1106 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1107 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1109 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1110 sizeof(*rp) + eir_len);
1113 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1116 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1119 memset(buf, 0, sizeof(buf));
1121 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1122 ev->eir_len = cpu_to_le16(eir_len);
1124 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1125 sizeof(*ev) + eir_len,
1126 HCI_MGMT_EXT_INFO_EVENTS, skip);
1129 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1131 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1133 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1137 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1139 bt_dev_dbg(hdev, "status 0x%02x", status);
1141 if (hci_conn_count(hdev) == 0) {
1142 cancel_delayed_work(&hdev->power_off);
1143 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1147 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1149 struct mgmt_ev_advertising_added ev;
1151 ev.instance = instance;
1153 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1156 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1159 struct mgmt_ev_advertising_removed ev;
1161 ev.instance = instance;
1163 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1166 static void cancel_adv_timeout(struct hci_dev *hdev)
1168 if (hdev->adv_instance_timeout) {
1169 hdev->adv_instance_timeout = 0;
1170 cancel_delayed_work(&hdev->adv_instance_expire);
1174 static int clean_up_hci_state(struct hci_dev *hdev)
1176 struct hci_request req;
1177 struct hci_conn *conn;
1178 bool discov_stopped;
1181 hci_req_init(&req, hdev);
1183 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1184 test_bit(HCI_PSCAN, &hdev->flags)) {
1186 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1189 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1191 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1192 __hci_req_disable_advertising(&req);
1194 discov_stopped = hci_req_stop_discovery(&req);
1196 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1197 /* 0x15 == Terminated due to Power Off */
1198 __hci_abort_conn(&req, conn, 0x15);
1201 err = hci_req_run(&req, clean_up_hci_complete);
1202 if (!err && discov_stopped)
1203 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1208 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1211 struct mgmt_mode *cp = data;
1212 struct mgmt_pending_cmd *cmd;
1215 bt_dev_dbg(hdev, "sock %p", sk);
1217 if (cp->val != 0x00 && cp->val != 0x01)
1218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1219 MGMT_STATUS_INVALID_PARAMS);
1223 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1224 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1229 if (!!cp->val == hdev_is_powered(hdev)) {
1230 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1234 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1241 queue_work(hdev->req_workqueue, &hdev->power_on);
1244 /* Disconnect connections, stop scans, etc */
1245 err = clean_up_hci_state(hdev);
1247 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1248 HCI_POWER_OFF_TIMEOUT);
1250 /* ENODATA means there were no HCI commands queued */
1251 if (err == -ENODATA) {
1252 cancel_delayed_work(&hdev->power_off);
1253 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1259 hci_dev_unlock(hdev);
1263 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1265 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1267 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1268 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1271 int mgmt_new_settings(struct hci_dev *hdev)
1273 return new_settings(hdev, NULL);
1278 struct hci_dev *hdev;
1282 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1284 struct cmd_lookup *match = data;
1286 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1288 list_del(&cmd->list);
1290 if (match->sk == NULL) {
1291 match->sk = cmd->sk;
1292 sock_hold(match->sk);
1295 mgmt_pending_free(cmd);
1298 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1302 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1303 mgmt_pending_remove(cmd);
1306 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1308 if (cmd->cmd_complete) {
1311 cmd->cmd_complete(cmd, *status);
1312 mgmt_pending_remove(cmd);
1317 cmd_status_rsp(cmd, data);
1320 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1322 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1323 cmd->param, cmd->param_len);
1326 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1328 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1329 cmd->param, sizeof(struct mgmt_addr_info));
1332 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1334 if (!lmp_bredr_capable(hdev))
1335 return MGMT_STATUS_NOT_SUPPORTED;
1336 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1337 return MGMT_STATUS_REJECTED;
1339 return MGMT_STATUS_SUCCESS;
1342 static u8 mgmt_le_support(struct hci_dev *hdev)
1344 if (!lmp_le_capable(hdev))
1345 return MGMT_STATUS_NOT_SUPPORTED;
1346 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1347 return MGMT_STATUS_REJECTED;
1349 return MGMT_STATUS_SUCCESS;
1352 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1354 struct mgmt_pending_cmd *cmd;
1356 bt_dev_dbg(hdev, "status 0x%02x", status);
1360 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1365 u8 mgmt_err = mgmt_status(status);
1366 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1367 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1371 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1372 hdev->discov_timeout > 0) {
1373 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1374 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1377 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1378 new_settings(hdev, cmd->sk);
1381 mgmt_pending_remove(cmd);
1384 hci_dev_unlock(hdev);
1387 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1390 struct mgmt_cp_set_discoverable *cp = data;
1391 struct mgmt_pending_cmd *cmd;
1395 bt_dev_dbg(hdev, "sock %p", sk);
1397 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1398 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1400 MGMT_STATUS_REJECTED);
1402 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1404 MGMT_STATUS_INVALID_PARAMS);
1406 timeout = __le16_to_cpu(cp->timeout);
1408 /* Disabling discoverable requires that no timeout is set,
1409 * and enabling limited discoverable requires a timeout.
1411 if ((cp->val == 0x00 && timeout > 0) ||
1412 (cp->val == 0x02 && timeout == 0))
1413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1414 MGMT_STATUS_INVALID_PARAMS);
1418 if (!hdev_is_powered(hdev) && timeout > 0) {
1419 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1420 MGMT_STATUS_NOT_POWERED);
1424 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1425 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1431 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1432 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1433 MGMT_STATUS_REJECTED);
1437 if (hdev->advertising_paused) {
1438 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1443 if (!hdev_is_powered(hdev)) {
1444 bool changed = false;
1446 /* Setting limited discoverable when powered off is
1447 * not a valid operation since it requires a timeout
1448 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1450 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1451 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1455 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1460 err = new_settings(hdev, sk);
1465 /* If the current mode is the same, then just update the timeout
1466 * value with the new value. And if only the timeout gets updated,
1467 * then no need for any HCI transactions.
1469 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1470 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1471 HCI_LIMITED_DISCOVERABLE)) {
1472 cancel_delayed_work(&hdev->discov_off);
1473 hdev->discov_timeout = timeout;
1475 if (cp->val && hdev->discov_timeout > 0) {
1476 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1477 queue_delayed_work(hdev->req_workqueue,
1478 &hdev->discov_off, to);
1481 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1485 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1491 /* Cancel any potential discoverable timeout that might be
1492 * still active and store new timeout value. The arming of
1493 * the timeout happens in the complete handler.
1495 cancel_delayed_work(&hdev->discov_off);
1496 hdev->discov_timeout = timeout;
1499 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1501 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1503 /* Limited discoverable mode */
1504 if (cp->val == 0x02)
1505 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1507 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1509 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1513 hci_dev_unlock(hdev);
1517 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1519 struct mgmt_pending_cmd *cmd;
1521 bt_dev_dbg(hdev, "status 0x%02x", status);
1525 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1530 u8 mgmt_err = mgmt_status(status);
1531 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1535 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1536 new_settings(hdev, cmd->sk);
1539 mgmt_pending_remove(cmd);
1542 hci_dev_unlock(hdev);
1545 static int set_connectable_update_settings(struct hci_dev *hdev,
1546 struct sock *sk, u8 val)
1548 bool changed = false;
1551 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1555 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1557 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1558 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1561 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1566 hci_req_update_scan(hdev);
1567 hci_update_background_scan(hdev);
1568 return new_settings(hdev, sk);
1574 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1577 struct mgmt_mode *cp = data;
1578 struct mgmt_pending_cmd *cmd;
1581 bt_dev_dbg(hdev, "sock %p", sk);
1583 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1584 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1585 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1586 MGMT_STATUS_REJECTED);
1588 if (cp->val != 0x00 && cp->val != 0x01)
1589 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1590 MGMT_STATUS_INVALID_PARAMS);
1594 if (!hdev_is_powered(hdev)) {
1595 err = set_connectable_update_settings(hdev, sk, cp->val);
1599 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1600 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1601 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1606 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1613 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1615 if (hdev->discov_timeout > 0)
1616 cancel_delayed_work(&hdev->discov_off);
1618 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1619 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1620 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1623 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1627 hci_dev_unlock(hdev);
1631 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1634 struct mgmt_mode *cp = data;
1638 bt_dev_dbg(hdev, "sock %p", sk);
1640 if (cp->val != 0x00 && cp->val != 0x01)
1641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1642 MGMT_STATUS_INVALID_PARAMS);
1647 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1649 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1651 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1656 /* In limited privacy mode the change of bondable mode
1657 * may affect the local advertising address.
1659 if (hdev_is_powered(hdev) &&
1660 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1661 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1662 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1663 queue_work(hdev->req_workqueue,
1664 &hdev->discoverable_update);
1666 err = new_settings(hdev, sk);
1670 hci_dev_unlock(hdev);
1674 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1677 struct mgmt_mode *cp = data;
1678 struct mgmt_pending_cmd *cmd;
1682 bt_dev_dbg(hdev, "sock %p", sk);
1684 status = mgmt_bredr_support(hdev);
1686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1689 if (cp->val != 0x00 && cp->val != 0x01)
1690 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1691 MGMT_STATUS_INVALID_PARAMS);
1695 if (!hdev_is_powered(hdev)) {
1696 bool changed = false;
1698 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1699 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1703 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1708 err = new_settings(hdev, sk);
1713 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1721 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1722 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1726 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1732 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1734 mgmt_pending_remove(cmd);
1739 hci_dev_unlock(hdev);
1743 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1745 struct mgmt_mode *cp = data;
1746 struct mgmt_pending_cmd *cmd;
1750 bt_dev_dbg(hdev, "sock %p", sk);
1752 status = mgmt_bredr_support(hdev);
1754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1756 if (!lmp_ssp_capable(hdev))
1757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1758 MGMT_STATUS_NOT_SUPPORTED);
1760 if (cp->val != 0x00 && cp->val != 0x01)
1761 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1762 MGMT_STATUS_INVALID_PARAMS);
1766 if (!hdev_is_powered(hdev)) {
1770 changed = !hci_dev_test_and_set_flag(hdev,
1773 changed = hci_dev_test_and_clear_flag(hdev,
1776 changed = hci_dev_test_and_clear_flag(hdev,
1779 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1782 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1787 err = new_settings(hdev, sk);
1792 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1793 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1798 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1799 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1803 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1809 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1810 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1811 sizeof(cp->val), &cp->val);
1813 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1815 mgmt_pending_remove(cmd);
1820 hci_dev_unlock(hdev);
1824 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1826 struct mgmt_mode *cp = data;
1831 bt_dev_dbg(hdev, "sock %p", sk);
1833 if (!IS_ENABLED(CONFIG_BT_HS))
1834 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1835 MGMT_STATUS_NOT_SUPPORTED);
1837 status = mgmt_bredr_support(hdev);
1839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1841 if (!lmp_ssp_capable(hdev))
1842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1843 MGMT_STATUS_NOT_SUPPORTED);
1845 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1847 MGMT_STATUS_REJECTED);
1849 if (cp->val != 0x00 && cp->val != 0x01)
1850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1851 MGMT_STATUS_INVALID_PARAMS);
1855 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1862 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1864 if (hdev_is_powered(hdev)) {
1865 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1866 MGMT_STATUS_REJECTED);
1870 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1873 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1878 err = new_settings(hdev, sk);
1881 hci_dev_unlock(hdev);
1885 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1887 struct cmd_lookup match = { NULL, hdev };
1892 u8 mgmt_err = mgmt_status(status);
1894 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1899 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1901 new_settings(hdev, match.sk);
1906 /* Make sure the controller has a good default for
1907 * advertising data. Restrict the update to when LE
1908 * has actually been enabled. During power on, the
1909 * update in powered_update_hci will take care of it.
1911 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1912 struct hci_request req;
1913 hci_req_init(&req, hdev);
1914 if (ext_adv_capable(hdev)) {
1917 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1919 __hci_req_update_scan_rsp_data(&req, 0x00);
1921 __hci_req_update_adv_data(&req, 0x00);
1922 __hci_req_update_scan_rsp_data(&req, 0x00);
1924 hci_req_run(&req, NULL);
1925 hci_update_background_scan(hdev);
1929 hci_dev_unlock(hdev);
1932 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1934 struct mgmt_mode *cp = data;
1935 struct hci_cp_write_le_host_supported hci_cp;
1936 struct mgmt_pending_cmd *cmd;
1937 struct hci_request req;
1941 bt_dev_dbg(hdev, "sock %p", sk);
1943 if (!lmp_le_capable(hdev))
1944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1945 MGMT_STATUS_NOT_SUPPORTED);
1947 if (cp->val != 0x00 && cp->val != 0x01)
1948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1949 MGMT_STATUS_INVALID_PARAMS);
1951 /* Bluetooth single mode LE only controllers or dual-mode
1952 * controllers configured as LE only devices, do not allow
1953 * switching LE off. These have either LE enabled explicitly
1954 * or BR/EDR has been previously switched off.
1956 * When trying to enable an already enabled LE, then gracefully
1957 * send a positive response. Trying to disable it however will
1958 * result into rejection.
1960 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1961 if (cp->val == 0x01)
1962 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1965 MGMT_STATUS_REJECTED);
1971 enabled = lmp_host_le_capable(hdev);
1974 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1976 if (!hdev_is_powered(hdev) || val == enabled) {
1977 bool changed = false;
1979 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1980 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1984 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1985 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1989 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1994 err = new_settings(hdev, sk);
1999 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2000 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2006 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2012 hci_req_init(&req, hdev);
2014 memset(&hci_cp, 0, sizeof(hci_cp));
2018 hci_cp.simul = 0x00;
2020 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2021 __hci_req_disable_advertising(&req);
2023 if (ext_adv_capable(hdev))
2024 __hci_req_clear_ext_adv_sets(&req);
2027 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2030 err = hci_req_run(&req, le_enable_complete);
2032 mgmt_pending_remove(cmd);
2035 hci_dev_unlock(hdev);
2039 /* This is a helper function to test for pending mgmt commands that can
2040 * cause CoD or EIR HCI commands. We can only allow one such pending
2041 * mgmt command at a time since otherwise we cannot easily track what
2042 * the current values are, will be, and based on that calculate if a new
2043 * HCI command needs to be sent and if yes with what value.
2045 static bool pending_eir_or_class(struct hci_dev *hdev)
2047 struct mgmt_pending_cmd *cmd;
2049 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2050 switch (cmd->opcode) {
2051 case MGMT_OP_ADD_UUID:
2052 case MGMT_OP_REMOVE_UUID:
2053 case MGMT_OP_SET_DEV_CLASS:
2054 case MGMT_OP_SET_POWERED:
2062 static const u8 bluetooth_base_uuid[] = {
2063 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2064 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2067 static u8 get_uuid_size(const u8 *uuid)
2071 if (memcmp(uuid, bluetooth_base_uuid, 12))
2074 val = get_unaligned_le32(&uuid[12]);
2081 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2083 struct mgmt_pending_cmd *cmd;
2087 cmd = pending_find(mgmt_op, hdev);
2091 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2092 mgmt_status(status), hdev->dev_class, 3);
2094 mgmt_pending_remove(cmd);
2097 hci_dev_unlock(hdev);
2100 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2102 bt_dev_dbg(hdev, "status 0x%02x", status);
2104 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2107 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2109 struct mgmt_cp_add_uuid *cp = data;
2110 struct mgmt_pending_cmd *cmd;
2111 struct hci_request req;
2112 struct bt_uuid *uuid;
2115 bt_dev_dbg(hdev, "sock %p", sk);
2119 if (pending_eir_or_class(hdev)) {
2120 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2125 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2131 memcpy(uuid->uuid, cp->uuid, 16);
2132 uuid->svc_hint = cp->svc_hint;
2133 uuid->size = get_uuid_size(cp->uuid);
2135 list_add_tail(&uuid->list, &hdev->uuids);
2137 hci_req_init(&req, hdev);
2139 __hci_req_update_class(&req);
2140 __hci_req_update_eir(&req);
2142 err = hci_req_run(&req, add_uuid_complete);
2144 if (err != -ENODATA)
2147 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2148 hdev->dev_class, 3);
2152 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2161 hci_dev_unlock(hdev);
2165 static bool enable_service_cache(struct hci_dev *hdev)
2167 if (!hdev_is_powered(hdev))
2170 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2171 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2179 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2181 bt_dev_dbg(hdev, "status 0x%02x", status);
2183 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2186 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2189 struct mgmt_cp_remove_uuid *cp = data;
2190 struct mgmt_pending_cmd *cmd;
2191 struct bt_uuid *match, *tmp;
2192 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2193 struct hci_request req;
2196 bt_dev_dbg(hdev, "sock %p", sk);
2200 if (pending_eir_or_class(hdev)) {
2201 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2206 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2207 hci_uuids_clear(hdev);
2209 if (enable_service_cache(hdev)) {
2210 err = mgmt_cmd_complete(sk, hdev->id,
2211 MGMT_OP_REMOVE_UUID,
2212 0, hdev->dev_class, 3);
2221 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2222 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2225 list_del(&match->list);
2231 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2232 MGMT_STATUS_INVALID_PARAMS);
2237 hci_req_init(&req, hdev);
2239 __hci_req_update_class(&req);
2240 __hci_req_update_eir(&req);
2242 err = hci_req_run(&req, remove_uuid_complete);
2244 if (err != -ENODATA)
2247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2248 hdev->dev_class, 3);
2252 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2261 hci_dev_unlock(hdev);
2265 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2267 bt_dev_dbg(hdev, "status 0x%02x", status);
2269 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2272 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2275 struct mgmt_cp_set_dev_class *cp = data;
2276 struct mgmt_pending_cmd *cmd;
2277 struct hci_request req;
2280 bt_dev_dbg(hdev, "sock %p", sk);
2282 if (!lmp_bredr_capable(hdev))
2283 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2284 MGMT_STATUS_NOT_SUPPORTED);
2288 if (pending_eir_or_class(hdev)) {
2289 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2294 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2295 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2296 MGMT_STATUS_INVALID_PARAMS);
2300 hdev->major_class = cp->major;
2301 hdev->minor_class = cp->minor;
2303 if (!hdev_is_powered(hdev)) {
2304 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2305 hdev->dev_class, 3);
2309 hci_req_init(&req, hdev);
2311 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2312 hci_dev_unlock(hdev);
2313 cancel_delayed_work_sync(&hdev->service_cache);
2315 __hci_req_update_eir(&req);
2318 __hci_req_update_class(&req);
2320 err = hci_req_run(&req, set_class_complete);
2322 if (err != -ENODATA)
2325 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2326 hdev->dev_class, 3);
2330 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2339 hci_dev_unlock(hdev);
2343 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2346 struct mgmt_cp_load_link_keys *cp = data;
2347 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2348 sizeof(struct mgmt_link_key_info));
2349 u16 key_count, expected_len;
2353 bt_dev_dbg(hdev, "sock %p", sk);
2355 if (!lmp_bredr_capable(hdev))
2356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2357 MGMT_STATUS_NOT_SUPPORTED);
2359 key_count = __le16_to_cpu(cp->key_count);
2360 if (key_count > max_key_count) {
2361 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2364 MGMT_STATUS_INVALID_PARAMS);
2367 expected_len = struct_size(cp, keys, key_count);
2368 if (expected_len != len) {
2369 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2372 MGMT_STATUS_INVALID_PARAMS);
2375 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2376 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2377 MGMT_STATUS_INVALID_PARAMS);
2379 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2382 for (i = 0; i < key_count; i++) {
2383 struct mgmt_link_key_info *key = &cp->keys[i];
2385 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2386 return mgmt_cmd_status(sk, hdev->id,
2387 MGMT_OP_LOAD_LINK_KEYS,
2388 MGMT_STATUS_INVALID_PARAMS);
2393 hci_link_keys_clear(hdev);
2396 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2398 changed = hci_dev_test_and_clear_flag(hdev,
2399 HCI_KEEP_DEBUG_KEYS);
2402 new_settings(hdev, NULL);
2404 for (i = 0; i < key_count; i++) {
2405 struct mgmt_link_key_info *key = &cp->keys[i];
2407 if (hci_is_blocked_key(hdev,
2408 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2410 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2415 /* Always ignore debug keys and require a new pairing if
2416 * the user wants to use them.
2418 if (key->type == HCI_LK_DEBUG_COMBINATION)
2421 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2422 key->type, key->pin_len, NULL);
2425 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2427 hci_dev_unlock(hdev);
2432 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2433 u8 addr_type, struct sock *skip_sk)
2435 struct mgmt_ev_device_unpaired ev;
2437 bacpy(&ev.addr.bdaddr, bdaddr);
2438 ev.addr.type = addr_type;
2440 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2444 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2447 struct mgmt_cp_unpair_device *cp = data;
2448 struct mgmt_rp_unpair_device rp;
2449 struct hci_conn_params *params;
2450 struct mgmt_pending_cmd *cmd;
2451 struct hci_conn *conn;
2455 memset(&rp, 0, sizeof(rp));
2456 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2457 rp.addr.type = cp->addr.type;
2459 if (!bdaddr_type_is_valid(cp->addr.type))
2460 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2461 MGMT_STATUS_INVALID_PARAMS,
2464 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2465 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2466 MGMT_STATUS_INVALID_PARAMS,
2471 if (!hdev_is_powered(hdev)) {
2472 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2473 MGMT_STATUS_NOT_POWERED, &rp,
2478 if (cp->addr.type == BDADDR_BREDR) {
2479 /* If disconnection is requested, then look up the
2480 * connection. If the remote device is connected, it
2481 * will be later used to terminate the link.
2483 * Setting it to NULL explicitly will cause no
2484 * termination of the link.
2487 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2492 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2494 err = mgmt_cmd_complete(sk, hdev->id,
2495 MGMT_OP_UNPAIR_DEVICE,
2496 MGMT_STATUS_NOT_PAIRED, &rp,
2504 /* LE address type */
2505 addr_type = le_addr_type(cp->addr.type);
2507 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2508 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2510 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2511 MGMT_STATUS_NOT_PAIRED, &rp,
2516 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2518 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2523 /* Defer clearing up the connection parameters until closing to
2524 * give a chance of keeping them if a repairing happens.
2526 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2528 /* Disable auto-connection parameters if present */
2529 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2531 if (params->explicit_connect)
2532 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2534 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2537 /* If disconnection is not requested, then clear the connection
2538 * variable so that the link is not terminated.
2540 if (!cp->disconnect)
2544 /* If the connection variable is set, then termination of the
2545 * link is requested.
2548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2550 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2554 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2561 cmd->cmd_complete = addr_cmd_complete;
2563 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2565 mgmt_pending_remove(cmd);
2568 hci_dev_unlock(hdev);
2572 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2575 struct mgmt_cp_disconnect *cp = data;
2576 struct mgmt_rp_disconnect rp;
2577 struct mgmt_pending_cmd *cmd;
2578 struct hci_conn *conn;
2581 bt_dev_dbg(hdev, "sock %p", sk);
2583 memset(&rp, 0, sizeof(rp));
2584 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2585 rp.addr.type = cp->addr.type;
2587 if (!bdaddr_type_is_valid(cp->addr.type))
2588 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2589 MGMT_STATUS_INVALID_PARAMS,
2594 if (!test_bit(HCI_UP, &hdev->flags)) {
2595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2596 MGMT_STATUS_NOT_POWERED, &rp,
2601 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2602 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2603 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2607 if (cp->addr.type == BDADDR_BREDR)
2608 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2611 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2612 le_addr_type(cp->addr.type));
2614 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2615 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2616 MGMT_STATUS_NOT_CONNECTED, &rp,
2621 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2627 cmd->cmd_complete = generic_cmd_complete;
2629 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2631 mgmt_pending_remove(cmd);
2634 hci_dev_unlock(hdev);
2638 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2640 switch (link_type) {
2642 switch (addr_type) {
2643 case ADDR_LE_DEV_PUBLIC:
2644 return BDADDR_LE_PUBLIC;
2647 /* Fallback to LE Random address type */
2648 return BDADDR_LE_RANDOM;
2652 /* Fallback to BR/EDR type */
2653 return BDADDR_BREDR;
2657 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2660 struct mgmt_rp_get_connections *rp;
2665 bt_dev_dbg(hdev, "sock %p", sk);
2669 if (!hdev_is_powered(hdev)) {
2670 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2671 MGMT_STATUS_NOT_POWERED);
2676 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2677 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2681 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2688 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2689 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2691 bacpy(&rp->addr[i].bdaddr, &c->dst);
2692 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2693 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2698 rp->conn_count = cpu_to_le16(i);
2700 /* Recalculate length in case of filtered SCO connections, etc */
2701 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2702 struct_size(rp, addr, i));
2707 hci_dev_unlock(hdev);
2711 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2712 struct mgmt_cp_pin_code_neg_reply *cp)
2714 struct mgmt_pending_cmd *cmd;
2717 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2722 cmd->cmd_complete = addr_cmd_complete;
2724 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2725 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2727 mgmt_pending_remove(cmd);
2732 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2735 struct hci_conn *conn;
2736 struct mgmt_cp_pin_code_reply *cp = data;
2737 struct hci_cp_pin_code_reply reply;
2738 struct mgmt_pending_cmd *cmd;
2741 bt_dev_dbg(hdev, "sock %p", sk);
2745 if (!hdev_is_powered(hdev)) {
2746 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2747 MGMT_STATUS_NOT_POWERED);
2751 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2753 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2754 MGMT_STATUS_NOT_CONNECTED);
2758 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2759 struct mgmt_cp_pin_code_neg_reply ncp;
2761 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2763 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2765 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2767 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2768 MGMT_STATUS_INVALID_PARAMS);
2773 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2779 cmd->cmd_complete = addr_cmd_complete;
2781 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2782 reply.pin_len = cp->pin_len;
2783 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2785 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2787 mgmt_pending_remove(cmd);
2790 hci_dev_unlock(hdev);
2794 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2797 struct mgmt_cp_set_io_capability *cp = data;
2799 bt_dev_dbg(hdev, "sock %p", sk);
2801 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2803 MGMT_STATUS_INVALID_PARAMS);
2807 hdev->io_capability = cp->io_capability;
2809 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2811 hci_dev_unlock(hdev);
2813 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2817 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2819 struct hci_dev *hdev = conn->hdev;
2820 struct mgmt_pending_cmd *cmd;
2822 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2823 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2826 if (cmd->user_data != conn)
2835 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2837 struct mgmt_rp_pair_device rp;
2838 struct hci_conn *conn = cmd->user_data;
2841 bacpy(&rp.addr.bdaddr, &conn->dst);
2842 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2844 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2845 status, &rp, sizeof(rp));
2847 /* So we don't get further callbacks for this connection */
2848 conn->connect_cfm_cb = NULL;
2849 conn->security_cfm_cb = NULL;
2850 conn->disconn_cfm_cb = NULL;
2852 hci_conn_drop(conn);
2854 /* The device is paired so there is no need to remove
2855 * its connection parameters anymore.
2857 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2864 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2866 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2867 struct mgmt_pending_cmd *cmd;
2869 cmd = find_pairing(conn);
2871 cmd->cmd_complete(cmd, status);
2872 mgmt_pending_remove(cmd);
2876 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2878 struct mgmt_pending_cmd *cmd;
2880 BT_DBG("status %u", status);
2882 cmd = find_pairing(conn);
2884 BT_DBG("Unable to find a pending command");
2888 cmd->cmd_complete(cmd, mgmt_status(status));
2889 mgmt_pending_remove(cmd);
2892 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2894 struct mgmt_pending_cmd *cmd;
2896 BT_DBG("status %u", status);
2901 cmd = find_pairing(conn);
2903 BT_DBG("Unable to find a pending command");
2907 cmd->cmd_complete(cmd, mgmt_status(status));
2908 mgmt_pending_remove(cmd);
2911 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2914 struct mgmt_cp_pair_device *cp = data;
2915 struct mgmt_rp_pair_device rp;
2916 struct mgmt_pending_cmd *cmd;
2917 u8 sec_level, auth_type;
2918 struct hci_conn *conn;
2921 bt_dev_dbg(hdev, "sock %p", sk);
2923 memset(&rp, 0, sizeof(rp));
2924 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2925 rp.addr.type = cp->addr.type;
2927 if (!bdaddr_type_is_valid(cp->addr.type))
2928 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2929 MGMT_STATUS_INVALID_PARAMS,
2932 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2933 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2934 MGMT_STATUS_INVALID_PARAMS,
2939 if (!hdev_is_powered(hdev)) {
2940 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2941 MGMT_STATUS_NOT_POWERED, &rp,
2946 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2947 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2948 MGMT_STATUS_ALREADY_PAIRED, &rp,
2953 sec_level = BT_SECURITY_MEDIUM;
2954 auth_type = HCI_AT_DEDICATED_BONDING;
2956 if (cp->addr.type == BDADDR_BREDR) {
2957 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2958 auth_type, CONN_REASON_PAIR_DEVICE);
2960 u8 addr_type = le_addr_type(cp->addr.type);
2961 struct hci_conn_params *p;
2963 /* When pairing a new device, it is expected to remember
2964 * this device for future connections. Adding the connection
2965 * parameter information ahead of time allows tracking
2966 * of the peripheral preferred values and will speed up any
2967 * further connection establishment.
2969 * If connection parameters already exist, then they
2970 * will be kept and this function does nothing.
2972 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2974 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2975 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2977 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2978 sec_level, HCI_LE_CONN_TIMEOUT,
2979 CONN_REASON_PAIR_DEVICE);
2985 if (PTR_ERR(conn) == -EBUSY)
2986 status = MGMT_STATUS_BUSY;
2987 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2988 status = MGMT_STATUS_NOT_SUPPORTED;
2989 else if (PTR_ERR(conn) == -ECONNREFUSED)
2990 status = MGMT_STATUS_REJECTED;
2992 status = MGMT_STATUS_CONNECT_FAILED;
2994 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2995 status, &rp, sizeof(rp));
2999 if (conn->connect_cfm_cb) {
3000 hci_conn_drop(conn);
3001 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3002 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3006 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3009 hci_conn_drop(conn);
3013 cmd->cmd_complete = pairing_complete;
3015 /* For LE, just connecting isn't a proof that the pairing finished */
3016 if (cp->addr.type == BDADDR_BREDR) {
3017 conn->connect_cfm_cb = pairing_complete_cb;
3018 conn->security_cfm_cb = pairing_complete_cb;
3019 conn->disconn_cfm_cb = pairing_complete_cb;
3021 conn->connect_cfm_cb = le_pairing_complete_cb;
3022 conn->security_cfm_cb = le_pairing_complete_cb;
3023 conn->disconn_cfm_cb = le_pairing_complete_cb;
3026 conn->io_capability = cp->io_cap;
3027 cmd->user_data = hci_conn_get(conn);
3029 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3030 hci_conn_security(conn, sec_level, auth_type, true)) {
3031 cmd->cmd_complete(cmd, 0);
3032 mgmt_pending_remove(cmd);
3038 hci_dev_unlock(hdev);
3042 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3045 struct mgmt_addr_info *addr = data;
3046 struct mgmt_pending_cmd *cmd;
3047 struct hci_conn *conn;
3050 bt_dev_dbg(hdev, "sock %p", sk);
3054 if (!hdev_is_powered(hdev)) {
3055 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3056 MGMT_STATUS_NOT_POWERED);
3060 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3062 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3063 MGMT_STATUS_INVALID_PARAMS);
3067 conn = cmd->user_data;
3069 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3070 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3071 MGMT_STATUS_INVALID_PARAMS);
3075 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3076 mgmt_pending_remove(cmd);
3078 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3079 addr, sizeof(*addr));
3081 /* Since user doesn't want to proceed with the connection, abort any
3082 * ongoing pairing and then terminate the link if it was created
3083 * because of the pair device action.
3085 if (addr->type == BDADDR_BREDR)
3086 hci_remove_link_key(hdev, &addr->bdaddr);
3088 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3089 le_addr_type(addr->type));
3091 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3092 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3095 hci_dev_unlock(hdev);
3099 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3100 struct mgmt_addr_info *addr, u16 mgmt_op,
3101 u16 hci_op, __le32 passkey)
3103 struct mgmt_pending_cmd *cmd;
3104 struct hci_conn *conn;
3109 if (!hdev_is_powered(hdev)) {
3110 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3111 MGMT_STATUS_NOT_POWERED, addr,
3116 if (addr->type == BDADDR_BREDR)
3117 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3119 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3120 le_addr_type(addr->type));
3123 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3124 MGMT_STATUS_NOT_CONNECTED, addr,
3129 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3130 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3132 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3133 MGMT_STATUS_SUCCESS, addr,
3136 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3137 MGMT_STATUS_FAILED, addr,
3143 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3149 cmd->cmd_complete = addr_cmd_complete;
3151 /* Continue with pairing via HCI */
3152 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3153 struct hci_cp_user_passkey_reply cp;
3155 bacpy(&cp.bdaddr, &addr->bdaddr);
3156 cp.passkey = passkey;
3157 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3159 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3163 mgmt_pending_remove(cmd);
3166 hci_dev_unlock(hdev);
3170 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3171 void *data, u16 len)
3173 struct mgmt_cp_pin_code_neg_reply *cp = data;
3175 bt_dev_dbg(hdev, "sock %p", sk);
3177 return user_pairing_resp(sk, hdev, &cp->addr,
3178 MGMT_OP_PIN_CODE_NEG_REPLY,
3179 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3182 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3185 struct mgmt_cp_user_confirm_reply *cp = data;
3187 bt_dev_dbg(hdev, "sock %p", sk);
3189 if (len != sizeof(*cp))
3190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3191 MGMT_STATUS_INVALID_PARAMS);
3193 return user_pairing_resp(sk, hdev, &cp->addr,
3194 MGMT_OP_USER_CONFIRM_REPLY,
3195 HCI_OP_USER_CONFIRM_REPLY, 0);
3198 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3199 void *data, u16 len)
3201 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3203 bt_dev_dbg(hdev, "sock %p", sk);
3205 return user_pairing_resp(sk, hdev, &cp->addr,
3206 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3207 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3210 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3213 struct mgmt_cp_user_passkey_reply *cp = data;
3215 bt_dev_dbg(hdev, "sock %p", sk);
3217 return user_pairing_resp(sk, hdev, &cp->addr,
3218 MGMT_OP_USER_PASSKEY_REPLY,
3219 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3222 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3223 void *data, u16 len)
3225 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3227 bt_dev_dbg(hdev, "sock %p", sk);
3229 return user_pairing_resp(sk, hdev, &cp->addr,
3230 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3231 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3234 static void adv_expire(struct hci_dev *hdev, u32 flags)
3236 struct adv_info *adv_instance;
3237 struct hci_request req;
3240 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3244 /* stop if current instance doesn't need to be changed */
3245 if (!(adv_instance->flags & flags))
3248 cancel_adv_timeout(hdev);
3250 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3254 hci_req_init(&req, hdev);
3255 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3260 hci_req_run(&req, NULL);
3263 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3265 struct mgmt_cp_set_local_name *cp;
3266 struct mgmt_pending_cmd *cmd;
3268 bt_dev_dbg(hdev, "status 0x%02x", status);
3272 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3279 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3280 mgmt_status(status));
3282 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3285 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3286 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3289 mgmt_pending_remove(cmd);
3292 hci_dev_unlock(hdev);
3295 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3298 struct mgmt_cp_set_local_name *cp = data;
3299 struct mgmt_pending_cmd *cmd;
3300 struct hci_request req;
3303 bt_dev_dbg(hdev, "sock %p", sk);
3307 /* If the old values are the same as the new ones just return a
3308 * direct command complete event.
3310 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3311 !memcmp(hdev->short_name, cp->short_name,
3312 sizeof(hdev->short_name))) {
3313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3318 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3320 if (!hdev_is_powered(hdev)) {
3321 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3323 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3328 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3329 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3330 ext_info_changed(hdev, sk);
3335 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3341 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3343 hci_req_init(&req, hdev);
3345 if (lmp_bredr_capable(hdev)) {
3346 __hci_req_update_name(&req);
3347 __hci_req_update_eir(&req);
3350 /* The name is stored in the scan response data and so
3351 * no need to update the advertising data here.
3353 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3354 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3356 err = hci_req_run(&req, set_name_complete);
3358 mgmt_pending_remove(cmd);
3361 hci_dev_unlock(hdev);
3365 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3368 struct mgmt_cp_set_appearance *cp = data;
3372 bt_dev_dbg(hdev, "sock %p", sk);
3374 if (!lmp_le_capable(hdev))
3375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3376 MGMT_STATUS_NOT_SUPPORTED);
3378 appearance = le16_to_cpu(cp->appearance);
3382 if (hdev->appearance != appearance) {
3383 hdev->appearance = appearance;
3385 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3386 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3388 ext_info_changed(hdev, sk);
3391 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3394 hci_dev_unlock(hdev);
3399 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3400 void *data, u16 len)
3402 struct mgmt_rp_get_phy_configuration rp;
3404 bt_dev_dbg(hdev, "sock %p", sk);
3408 memset(&rp, 0, sizeof(rp));
3410 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3411 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3412 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3414 hci_dev_unlock(hdev);
3416 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3420 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3422 struct mgmt_ev_phy_configuration_changed ev;
3424 memset(&ev, 0, sizeof(ev));
3426 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3428 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3432 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3433 u16 opcode, struct sk_buff *skb)
3435 struct mgmt_pending_cmd *cmd;
3437 bt_dev_dbg(hdev, "status 0x%02x", status);
3441 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3446 mgmt_cmd_status(cmd->sk, hdev->id,
3447 MGMT_OP_SET_PHY_CONFIGURATION,
3448 mgmt_status(status));
3450 mgmt_cmd_complete(cmd->sk, hdev->id,
3451 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3454 mgmt_phy_configuration_changed(hdev, cmd->sk);
3457 mgmt_pending_remove(cmd);
3460 hci_dev_unlock(hdev);
3463 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3464 void *data, u16 len)
3466 struct mgmt_cp_set_phy_configuration *cp = data;
3467 struct hci_cp_le_set_default_phy cp_phy;
3468 struct mgmt_pending_cmd *cmd;
3469 struct hci_request req;
3470 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3471 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3472 bool changed = false;
3475 bt_dev_dbg(hdev, "sock %p", sk);
3477 configurable_phys = get_configurable_phys(hdev);
3478 supported_phys = get_supported_phys(hdev);
3479 selected_phys = __le32_to_cpu(cp->selected_phys);
3481 if (selected_phys & ~supported_phys)
3482 return mgmt_cmd_status(sk, hdev->id,
3483 MGMT_OP_SET_PHY_CONFIGURATION,
3484 MGMT_STATUS_INVALID_PARAMS);
3486 unconfigure_phys = supported_phys & ~configurable_phys;
3488 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3489 return mgmt_cmd_status(sk, hdev->id,
3490 MGMT_OP_SET_PHY_CONFIGURATION,
3491 MGMT_STATUS_INVALID_PARAMS);
3493 if (selected_phys == get_selected_phys(hdev))
3494 return mgmt_cmd_complete(sk, hdev->id,
3495 MGMT_OP_SET_PHY_CONFIGURATION,
3500 if (!hdev_is_powered(hdev)) {
3501 err = mgmt_cmd_status(sk, hdev->id,
3502 MGMT_OP_SET_PHY_CONFIGURATION,
3503 MGMT_STATUS_REJECTED);
3507 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3508 err = mgmt_cmd_status(sk, hdev->id,
3509 MGMT_OP_SET_PHY_CONFIGURATION,
3514 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3515 pkt_type |= (HCI_DH3 | HCI_DM3);
3517 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3519 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3520 pkt_type |= (HCI_DH5 | HCI_DM5);
3522 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3524 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3525 pkt_type &= ~HCI_2DH1;
3527 pkt_type |= HCI_2DH1;
3529 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3530 pkt_type &= ~HCI_2DH3;
3532 pkt_type |= HCI_2DH3;
3534 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3535 pkt_type &= ~HCI_2DH5;
3537 pkt_type |= HCI_2DH5;
3539 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3540 pkt_type &= ~HCI_3DH1;
3542 pkt_type |= HCI_3DH1;
3544 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3545 pkt_type &= ~HCI_3DH3;
3547 pkt_type |= HCI_3DH3;
3549 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3550 pkt_type &= ~HCI_3DH5;
3552 pkt_type |= HCI_3DH5;
3554 if (pkt_type != hdev->pkt_type) {
3555 hdev->pkt_type = pkt_type;
3559 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3560 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3562 mgmt_phy_configuration_changed(hdev, sk);
3564 err = mgmt_cmd_complete(sk, hdev->id,
3565 MGMT_OP_SET_PHY_CONFIGURATION,
3571 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3578 hci_req_init(&req, hdev);
3580 memset(&cp_phy, 0, sizeof(cp_phy));
3582 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3583 cp_phy.all_phys |= 0x01;
3585 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3586 cp_phy.all_phys |= 0x02;
3588 if (selected_phys & MGMT_PHY_LE_1M_TX)
3589 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3591 if (selected_phys & MGMT_PHY_LE_2M_TX)
3592 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3594 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3595 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3597 if (selected_phys & MGMT_PHY_LE_1M_RX)
3598 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3600 if (selected_phys & MGMT_PHY_LE_2M_RX)
3601 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3603 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3604 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3606 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3608 err = hci_req_run_skb(&req, set_default_phy_complete);
3610 mgmt_pending_remove(cmd);
3613 hci_dev_unlock(hdev);
3618 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3621 int err = MGMT_STATUS_SUCCESS;
3622 struct mgmt_cp_set_blocked_keys *keys = data;
3623 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3624 sizeof(struct mgmt_blocked_key_info));
3625 u16 key_count, expected_len;
3628 bt_dev_dbg(hdev, "sock %p", sk);
3630 key_count = __le16_to_cpu(keys->key_count);
3631 if (key_count > max_key_count) {
3632 bt_dev_err(hdev, "too big key_count value %u", key_count);
3633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3634 MGMT_STATUS_INVALID_PARAMS);
3637 expected_len = struct_size(keys, keys, key_count);
3638 if (expected_len != len) {
3639 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3642 MGMT_STATUS_INVALID_PARAMS);
3647 hci_blocked_keys_clear(hdev);
3649 for (i = 0; i < keys->key_count; ++i) {
3650 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3653 err = MGMT_STATUS_NO_RESOURCES;
3657 b->type = keys->keys[i].type;
3658 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3659 list_add_rcu(&b->list, &hdev->blocked_keys);
3661 hci_dev_unlock(hdev);
3663 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3667 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3668 void *data, u16 len)
3670 struct mgmt_mode *cp = data;
3672 bool changed = false;
3674 bt_dev_dbg(hdev, "sock %p", sk);
3676 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3677 return mgmt_cmd_status(sk, hdev->id,
3678 MGMT_OP_SET_WIDEBAND_SPEECH,
3679 MGMT_STATUS_NOT_SUPPORTED);
3681 if (cp->val != 0x00 && cp->val != 0x01)
3682 return mgmt_cmd_status(sk, hdev->id,
3683 MGMT_OP_SET_WIDEBAND_SPEECH,
3684 MGMT_STATUS_INVALID_PARAMS);
3688 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3689 err = mgmt_cmd_status(sk, hdev->id,
3690 MGMT_OP_SET_WIDEBAND_SPEECH,
3695 if (hdev_is_powered(hdev) &&
3696 !!cp->val != hci_dev_test_flag(hdev,
3697 HCI_WIDEBAND_SPEECH_ENABLED)) {
3698 err = mgmt_cmd_status(sk, hdev->id,
3699 MGMT_OP_SET_WIDEBAND_SPEECH,
3700 MGMT_STATUS_REJECTED);
3705 changed = !hci_dev_test_and_set_flag(hdev,
3706 HCI_WIDEBAND_SPEECH_ENABLED);
3708 changed = hci_dev_test_and_clear_flag(hdev,
3709 HCI_WIDEBAND_SPEECH_ENABLED);
3711 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3716 err = new_settings(hdev, sk);
3719 hci_dev_unlock(hdev);
3723 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3724 void *data, u16 data_len)
3727 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3730 u8 tx_power_range[2];
3732 bt_dev_dbg(hdev, "sock %p", sk);
3734 memset(&buf, 0, sizeof(buf));
3738 /* When the Read Simple Pairing Options command is supported, then
3739 * the remote public key validation is supported.
3741 * Alternatively, when Microsoft extensions are available, they can
3742 * indicate support for public key validation as well.
3744 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3745 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3747 flags |= 0x02; /* Remote public key validation (LE) */
3749 /* When the Read Encryption Key Size command is supported, then the
3750 * encryption key size is enforced.
3752 if (hdev->commands[20] & 0x10)
3753 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3755 flags |= 0x08; /* Encryption key size enforcement (LE) */
3757 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3760 /* When the Read Simple Pairing Options command is supported, then
3761 * also max encryption key size information is provided.
3763 if (hdev->commands[41] & 0x08)
3764 cap_len = eir_append_le16(rp->cap, cap_len,
3765 MGMT_CAP_MAX_ENC_KEY_SIZE,
3766 hdev->max_enc_key_size);
3768 cap_len = eir_append_le16(rp->cap, cap_len,
3769 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3770 SMP_MAX_ENC_KEY_SIZE);
3772 /* Append the min/max LE tx power parameters if we were able to fetch
3773 * it from the controller
3775 if (hdev->commands[38] & 0x80) {
3776 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3777 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3778 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3782 rp->cap_len = cpu_to_le16(cap_len);
3784 hci_dev_unlock(hdev);
3786 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3787 rp, sizeof(*rp) + cap_len);
3790 #ifdef CONFIG_BT_FEATURE_DEBUG
3791 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3792 static const u8 debug_uuid[16] = {
3793 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3794 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3798 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3799 static const u8 simult_central_periph_uuid[16] = {
3800 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3801 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3804 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3805 static const u8 rpa_resolution_uuid[16] = {
3806 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3807 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3810 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3811 void *data, u16 data_len)
3813 char buf[62]; /* Enough space for 3 features */
3814 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3818 bt_dev_dbg(hdev, "sock %p", sk);
3820 memset(&buf, 0, sizeof(buf));
3822 #ifdef CONFIG_BT_FEATURE_DEBUG
3824 flags = bt_dbg_get() ? BIT(0) : 0;
3826 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3827 rp->features[idx].flags = cpu_to_le32(flags);
3833 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3834 (hdev->le_states[4] & 0x08) && /* Central */
3835 (hdev->le_states[4] & 0x40) && /* Peripheral */
3836 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3841 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3842 rp->features[idx].flags = cpu_to_le32(flags);
3846 if (hdev && use_ll_privacy(hdev)) {
3847 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3848 flags = BIT(0) | BIT(1);
3852 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3853 rp->features[idx].flags = cpu_to_le32(flags);
3857 rp->feature_count = cpu_to_le16(idx);
3859 /* After reading the experimental features information, enable
3860 * the events to update client on any future change.
3862 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3864 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3865 MGMT_OP_READ_EXP_FEATURES_INFO,
3866 0, rp, sizeof(*rp) + (20 * idx));
3869 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3872 struct mgmt_ev_exp_feature_changed ev;
3874 memset(&ev, 0, sizeof(ev));
3875 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3876 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3878 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3880 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3884 #ifdef CONFIG_BT_FEATURE_DEBUG
3885 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3887 struct mgmt_ev_exp_feature_changed ev;
3889 memset(&ev, 0, sizeof(ev));
3890 memcpy(ev.uuid, debug_uuid, 16);
3891 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3893 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3895 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3899 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3900 void *data, u16 data_len)
3902 struct mgmt_cp_set_exp_feature *cp = data;
3903 struct mgmt_rp_set_exp_feature rp;
3905 bt_dev_dbg(hdev, "sock %p", sk);
3907 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3908 memset(rp.uuid, 0, 16);
3909 rp.flags = cpu_to_le32(0);
3911 #ifdef CONFIG_BT_FEATURE_DEBUG
3913 bool changed = bt_dbg_get();
3918 exp_debug_feature_changed(false, sk);
3922 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3923 bool changed = hci_dev_test_flag(hdev,
3924 HCI_ENABLE_LL_PRIVACY);
3926 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3929 exp_ll_privacy_feature_changed(false, hdev, sk);
3932 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3934 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3935 MGMT_OP_SET_EXP_FEATURE, 0,
3939 #ifdef CONFIG_BT_FEATURE_DEBUG
3940 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3944 /* Command requires to use the non-controller index */
3946 return mgmt_cmd_status(sk, hdev->id,
3947 MGMT_OP_SET_EXP_FEATURE,
3948 MGMT_STATUS_INVALID_INDEX);
3950 /* Parameters are limited to a single octet */
3951 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3952 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3953 MGMT_OP_SET_EXP_FEATURE,
3954 MGMT_STATUS_INVALID_PARAMS);
3956 /* Only boolean on/off is supported */
3957 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3958 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3959 MGMT_OP_SET_EXP_FEATURE,
3960 MGMT_STATUS_INVALID_PARAMS);
3962 val = !!cp->param[0];
3963 changed = val ? !bt_dbg_get() : bt_dbg_get();
3966 memcpy(rp.uuid, debug_uuid, 16);
3967 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3969 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3971 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3972 MGMT_OP_SET_EXP_FEATURE, 0,
3976 exp_debug_feature_changed(val, sk);
3982 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3987 /* Command requires to use the controller index */
3989 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3990 MGMT_OP_SET_EXP_FEATURE,
3991 MGMT_STATUS_INVALID_INDEX);
3993 /* Changes can only be made when controller is powered down */
3994 if (hdev_is_powered(hdev))
3995 return mgmt_cmd_status(sk, hdev->id,
3996 MGMT_OP_SET_EXP_FEATURE,
3997 MGMT_STATUS_REJECTED);
3999 /* Parameters are limited to a single octet */
4000 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4001 return mgmt_cmd_status(sk, hdev->id,
4002 MGMT_OP_SET_EXP_FEATURE,
4003 MGMT_STATUS_INVALID_PARAMS);
4005 /* Only boolean on/off is supported */
4006 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4007 return mgmt_cmd_status(sk, hdev->id,
4008 MGMT_OP_SET_EXP_FEATURE,
4009 MGMT_STATUS_INVALID_PARAMS);
4011 val = !!cp->param[0];
4014 changed = !hci_dev_test_flag(hdev,
4015 HCI_ENABLE_LL_PRIVACY);
4016 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4017 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4019 /* Enable LL privacy + supported settings changed */
4020 flags = BIT(0) | BIT(1);
4022 changed = hci_dev_test_flag(hdev,
4023 HCI_ENABLE_LL_PRIVACY);
4024 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4026 /* Disable LL privacy + supported settings changed */
4030 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4031 rp.flags = cpu_to_le32(flags);
4033 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4035 err = mgmt_cmd_complete(sk, hdev->id,
4036 MGMT_OP_SET_EXP_FEATURE, 0,
4040 exp_ll_privacy_feature_changed(val, hdev, sk);
4045 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4046 MGMT_OP_SET_EXP_FEATURE,
4047 MGMT_STATUS_NOT_SUPPORTED);
4050 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4052 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4055 struct mgmt_cp_get_device_flags *cp = data;
4056 struct mgmt_rp_get_device_flags rp;
4057 struct bdaddr_list_with_flags *br_params;
4058 struct hci_conn_params *params;
4059 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4060 u32 current_flags = 0;
4061 u8 status = MGMT_STATUS_INVALID_PARAMS;
4063 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4064 &cp->addr.bdaddr, cp->addr.type);
4068 memset(&rp, 0, sizeof(rp));
4070 if (cp->addr.type == BDADDR_BREDR) {
4071 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4077 current_flags = br_params->current_flags;
4079 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4080 le_addr_type(cp->addr.type));
4085 current_flags = params->current_flags;
4088 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4089 rp.addr.type = cp->addr.type;
4090 rp.supported_flags = cpu_to_le32(supported_flags);
4091 rp.current_flags = cpu_to_le32(current_flags);
4093 status = MGMT_STATUS_SUCCESS;
4096 hci_dev_unlock(hdev);
4098 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4102 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4103 bdaddr_t *bdaddr, u8 bdaddr_type,
4104 u32 supported_flags, u32 current_flags)
4106 struct mgmt_ev_device_flags_changed ev;
4108 bacpy(&ev.addr.bdaddr, bdaddr);
4109 ev.addr.type = bdaddr_type;
4110 ev.supported_flags = cpu_to_le32(supported_flags);
4111 ev.current_flags = cpu_to_le32(current_flags);
4113 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4116 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4119 struct mgmt_cp_set_device_flags *cp = data;
4120 struct bdaddr_list_with_flags *br_params;
4121 struct hci_conn_params *params;
4122 u8 status = MGMT_STATUS_INVALID_PARAMS;
4123 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4124 u32 current_flags = __le32_to_cpu(cp->current_flags);
4126 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4127 &cp->addr.bdaddr, cp->addr.type,
4128 __le32_to_cpu(current_flags));
4130 if ((supported_flags | current_flags) != supported_flags) {
4131 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4132 current_flags, supported_flags);
4138 if (cp->addr.type == BDADDR_BREDR) {
4139 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4144 br_params->current_flags = current_flags;
4145 status = MGMT_STATUS_SUCCESS;
4147 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4148 &cp->addr.bdaddr, cp->addr.type);
4151 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4152 le_addr_type(cp->addr.type));
4154 params->current_flags = current_flags;
4155 status = MGMT_STATUS_SUCCESS;
4157 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4159 le_addr_type(cp->addr.type));
4164 hci_dev_unlock(hdev);
4166 if (status == MGMT_STATUS_SUCCESS)
4167 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4168 supported_flags, current_flags);
4170 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4171 &cp->addr, sizeof(cp->addr));
4174 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4177 struct mgmt_ev_adv_monitor_added ev;
4179 ev.monitor_handle = cpu_to_le16(handle);
4181 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4184 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4186 struct mgmt_ev_adv_monitor_removed ev;
4187 struct mgmt_pending_cmd *cmd;
4188 struct sock *sk_skip = NULL;
4189 struct mgmt_cp_remove_adv_monitor *cp;
4191 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4195 if (cp->monitor_handle)
4199 ev.monitor_handle = cpu_to_le16(handle);
4201 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4204 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4205 void *data, u16 len)
4207 struct adv_monitor *monitor = NULL;
4208 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4211 __u32 supported = 0;
4213 __u16 num_handles = 0;
4214 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4216 BT_DBG("request for %s", hdev->name);
4220 if (msft_monitor_supported(hdev))
4221 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4223 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4224 handles[num_handles++] = monitor->handle;
4226 hci_dev_unlock(hdev);
4228 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4229 rp = kmalloc(rp_size, GFP_KERNEL);
4233 /* All supported features are currently enabled */
4234 enabled = supported;
4236 rp->supported_features = cpu_to_le32(supported);
4237 rp->enabled_features = cpu_to_le32(enabled);
4238 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4239 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4240 rp->num_handles = cpu_to_le16(num_handles);
4242 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4244 err = mgmt_cmd_complete(sk, hdev->id,
4245 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4246 MGMT_STATUS_SUCCESS, rp, rp_size);
4253 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4255 struct mgmt_rp_add_adv_patterns_monitor rp;
4256 struct mgmt_pending_cmd *cmd;
4257 struct adv_monitor *monitor;
4262 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4264 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4269 monitor = cmd->user_data;
4270 rp.monitor_handle = cpu_to_le16(monitor->handle);
4273 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4274 hdev->adv_monitors_cnt++;
4275 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4276 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4277 hci_update_background_scan(hdev);
4280 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4281 mgmt_status(status), &rp, sizeof(rp));
4282 mgmt_pending_remove(cmd);
4285 hci_dev_unlock(hdev);
4286 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4287 rp.monitor_handle, status);
4292 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4293 struct adv_monitor *m, u8 status,
4294 void *data, u16 len, u16 op)
4296 struct mgmt_rp_add_adv_patterns_monitor rp;
4297 struct mgmt_pending_cmd *cmd;
4306 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4307 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4308 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4309 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4310 status = MGMT_STATUS_BUSY;
4314 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4316 status = MGMT_STATUS_NO_RESOURCES;
4321 pending = hci_add_adv_monitor(hdev, m, &err);
4323 if (err == -ENOSPC || err == -ENOMEM)
4324 status = MGMT_STATUS_NO_RESOURCES;
4325 else if (err == -EINVAL)
4326 status = MGMT_STATUS_INVALID_PARAMS;
4328 status = MGMT_STATUS_FAILED;
4330 mgmt_pending_remove(cmd);
4335 mgmt_pending_remove(cmd);
4336 rp.monitor_handle = cpu_to_le16(m->handle);
4337 mgmt_adv_monitor_added(sk, hdev, m->handle);
4338 m->state = ADV_MONITOR_STATE_REGISTERED;
4339 hdev->adv_monitors_cnt++;
4341 hci_dev_unlock(hdev);
4342 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4346 hci_dev_unlock(hdev);
4351 hci_free_adv_monitor(hdev, m);
4352 hci_dev_unlock(hdev);
4353 return mgmt_cmd_status(sk, hdev->id, op, status);
4356 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4357 struct mgmt_adv_rssi_thresholds *rssi)
4360 m->rssi.low_threshold = rssi->low_threshold;
4361 m->rssi.low_threshold_timeout =
4362 __le16_to_cpu(rssi->low_threshold_timeout);
4363 m->rssi.high_threshold = rssi->high_threshold;
4364 m->rssi.high_threshold_timeout =
4365 __le16_to_cpu(rssi->high_threshold_timeout);
4366 m->rssi.sampling_period = rssi->sampling_period;
4368 /* Default values. These numbers are the least constricting
4369 * parameters for MSFT API to work, so it behaves as if there
4370 * are no rssi parameter to consider. May need to be changed
4371 * if other API are to be supported.
4373 m->rssi.low_threshold = -127;
4374 m->rssi.low_threshold_timeout = 60;
4375 m->rssi.high_threshold = -127;
4376 m->rssi.high_threshold_timeout = 0;
4377 m->rssi.sampling_period = 0;
4381 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4382 struct mgmt_adv_pattern *patterns)
4384 u8 offset = 0, length = 0;
4385 struct adv_pattern *p = NULL;
4388 for (i = 0; i < pattern_count; i++) {
4389 offset = patterns[i].offset;
4390 length = patterns[i].length;
4391 if (offset >= HCI_MAX_AD_LENGTH ||
4392 length > HCI_MAX_AD_LENGTH ||
4393 (offset + length) > HCI_MAX_AD_LENGTH)
4394 return MGMT_STATUS_INVALID_PARAMS;
4396 p = kmalloc(sizeof(*p), GFP_KERNEL);
4398 return MGMT_STATUS_NO_RESOURCES;
4400 p->ad_type = patterns[i].ad_type;
4401 p->offset = patterns[i].offset;
4402 p->length = patterns[i].length;
4403 memcpy(p->value, patterns[i].value, p->length);
4405 INIT_LIST_HEAD(&p->list);
4406 list_add(&p->list, &m->patterns);
4409 return MGMT_STATUS_SUCCESS;
4412 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4413 void *data, u16 len)
4415 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4416 struct adv_monitor *m = NULL;
4417 u8 status = MGMT_STATUS_SUCCESS;
4418 size_t expected_size = sizeof(*cp);
4420 BT_DBG("request for %s", hdev->name);
4422 if (len <= sizeof(*cp)) {
4423 status = MGMT_STATUS_INVALID_PARAMS;
4427 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4428 if (len != expected_size) {
4429 status = MGMT_STATUS_INVALID_PARAMS;
4433 m = kzalloc(sizeof(*m), GFP_KERNEL);
4435 status = MGMT_STATUS_NO_RESOURCES;
4439 INIT_LIST_HEAD(&m->patterns);
4441 parse_adv_monitor_rssi(m, NULL);
4442 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4445 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4446 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4449 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4450 void *data, u16 len)
4452 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4453 struct adv_monitor *m = NULL;
4454 u8 status = MGMT_STATUS_SUCCESS;
4455 size_t expected_size = sizeof(*cp);
4457 BT_DBG("request for %s", hdev->name);
4459 if (len <= sizeof(*cp)) {
4460 status = MGMT_STATUS_INVALID_PARAMS;
4464 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4465 if (len != expected_size) {
4466 status = MGMT_STATUS_INVALID_PARAMS;
4470 m = kzalloc(sizeof(*m), GFP_KERNEL);
4472 status = MGMT_STATUS_NO_RESOURCES;
4476 INIT_LIST_HEAD(&m->patterns);
4478 parse_adv_monitor_rssi(m, &cp->rssi);
4479 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4482 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4483 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4486 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4488 struct mgmt_rp_remove_adv_monitor rp;
4489 struct mgmt_cp_remove_adv_monitor *cp;
4490 struct mgmt_pending_cmd *cmd;
4495 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4500 rp.monitor_handle = cp->monitor_handle;
4503 hci_update_background_scan(hdev);
4505 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4506 mgmt_status(status), &rp, sizeof(rp));
4507 mgmt_pending_remove(cmd);
4510 hci_dev_unlock(hdev);
4511 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4512 rp.monitor_handle, status);
4517 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4518 void *data, u16 len)
4520 struct mgmt_cp_remove_adv_monitor *cp = data;
4521 struct mgmt_rp_remove_adv_monitor rp;
4522 struct mgmt_pending_cmd *cmd;
4523 u16 handle = __le16_to_cpu(cp->monitor_handle);
4527 BT_DBG("request for %s", hdev->name);
4528 rp.monitor_handle = cp->monitor_handle;
4532 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4533 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4534 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4535 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4536 status = MGMT_STATUS_BUSY;
4540 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4542 status = MGMT_STATUS_NO_RESOURCES;
4547 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4549 pending = hci_remove_all_adv_monitor(hdev, &err);
4552 mgmt_pending_remove(cmd);
4555 status = MGMT_STATUS_INVALID_INDEX;
4557 status = MGMT_STATUS_FAILED;
4562 /* monitor can be removed without forwarding request to controller */
4564 mgmt_pending_remove(cmd);
4565 hci_dev_unlock(hdev);
4567 return mgmt_cmd_complete(sk, hdev->id,
4568 MGMT_OP_REMOVE_ADV_MONITOR,
4569 MGMT_STATUS_SUCCESS,
4573 hci_dev_unlock(hdev);
4577 hci_dev_unlock(hdev);
4578 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4582 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4583 u16 opcode, struct sk_buff *skb)
4585 struct mgmt_rp_read_local_oob_data mgmt_rp;
4586 size_t rp_size = sizeof(mgmt_rp);
4587 struct mgmt_pending_cmd *cmd;
4589 bt_dev_dbg(hdev, "status %u", status);
4591 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4595 if (status || !skb) {
4596 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4597 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4601 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4603 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4604 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4606 if (skb->len < sizeof(*rp)) {
4607 mgmt_cmd_status(cmd->sk, hdev->id,
4608 MGMT_OP_READ_LOCAL_OOB_DATA,
4609 MGMT_STATUS_FAILED);
4613 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4614 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4616 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4618 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4620 if (skb->len < sizeof(*rp)) {
4621 mgmt_cmd_status(cmd->sk, hdev->id,
4622 MGMT_OP_READ_LOCAL_OOB_DATA,
4623 MGMT_STATUS_FAILED);
4627 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4628 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4630 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4631 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4634 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4635 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4638 mgmt_pending_remove(cmd);
4641 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4642 void *data, u16 data_len)
4644 struct mgmt_pending_cmd *cmd;
4645 struct hci_request req;
4648 bt_dev_dbg(hdev, "sock %p", sk);
4652 if (!hdev_is_powered(hdev)) {
4653 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4654 MGMT_STATUS_NOT_POWERED);
4658 if (!lmp_ssp_capable(hdev)) {
4659 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4660 MGMT_STATUS_NOT_SUPPORTED);
4664 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4665 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4670 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4676 hci_req_init(&req, hdev);
4678 if (bredr_sc_enabled(hdev))
4679 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4681 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4683 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4685 mgmt_pending_remove(cmd);
4688 hci_dev_unlock(hdev);
4692 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4693 void *data, u16 len)
4695 struct mgmt_addr_info *addr = data;
4698 bt_dev_dbg(hdev, "sock %p", sk);
4700 if (!bdaddr_type_is_valid(addr->type))
4701 return mgmt_cmd_complete(sk, hdev->id,
4702 MGMT_OP_ADD_REMOTE_OOB_DATA,
4703 MGMT_STATUS_INVALID_PARAMS,
4704 addr, sizeof(*addr));
4708 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4709 struct mgmt_cp_add_remote_oob_data *cp = data;
4712 if (cp->addr.type != BDADDR_BREDR) {
4713 err = mgmt_cmd_complete(sk, hdev->id,
4714 MGMT_OP_ADD_REMOTE_OOB_DATA,
4715 MGMT_STATUS_INVALID_PARAMS,
4716 &cp->addr, sizeof(cp->addr));
4720 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4721 cp->addr.type, cp->hash,
4722 cp->rand, NULL, NULL);
4724 status = MGMT_STATUS_FAILED;
4726 status = MGMT_STATUS_SUCCESS;
4728 err = mgmt_cmd_complete(sk, hdev->id,
4729 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4730 &cp->addr, sizeof(cp->addr));
4731 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4732 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4733 u8 *rand192, *hash192, *rand256, *hash256;
4736 if (bdaddr_type_is_le(cp->addr.type)) {
4737 /* Enforce zero-valued 192-bit parameters as
4738 * long as legacy SMP OOB isn't implemented.
4740 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4741 memcmp(cp->hash192, ZERO_KEY, 16)) {
4742 err = mgmt_cmd_complete(sk, hdev->id,
4743 MGMT_OP_ADD_REMOTE_OOB_DATA,
4744 MGMT_STATUS_INVALID_PARAMS,
4745 addr, sizeof(*addr));
4752 /* In case one of the P-192 values is set to zero,
4753 * then just disable OOB data for P-192.
4755 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4756 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4760 rand192 = cp->rand192;
4761 hash192 = cp->hash192;
4765 /* In case one of the P-256 values is set to zero, then just
4766 * disable OOB data for P-256.
4768 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4769 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4773 rand256 = cp->rand256;
4774 hash256 = cp->hash256;
4777 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4778 cp->addr.type, hash192, rand192,
4781 status = MGMT_STATUS_FAILED;
4783 status = MGMT_STATUS_SUCCESS;
4785 err = mgmt_cmd_complete(sk, hdev->id,
4786 MGMT_OP_ADD_REMOTE_OOB_DATA,
4787 status, &cp->addr, sizeof(cp->addr));
4789 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4791 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4792 MGMT_STATUS_INVALID_PARAMS);
4796 hci_dev_unlock(hdev);
4800 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4801 void *data, u16 len)
4803 struct mgmt_cp_remove_remote_oob_data *cp = data;
4807 bt_dev_dbg(hdev, "sock %p", sk);
4809 if (cp->addr.type != BDADDR_BREDR)
4810 return mgmt_cmd_complete(sk, hdev->id,
4811 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4812 MGMT_STATUS_INVALID_PARAMS,
4813 &cp->addr, sizeof(cp->addr));
4817 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4818 hci_remote_oob_data_clear(hdev);
4819 status = MGMT_STATUS_SUCCESS;
4823 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4825 status = MGMT_STATUS_INVALID_PARAMS;
4827 status = MGMT_STATUS_SUCCESS;
4830 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4831 status, &cp->addr, sizeof(cp->addr));
4833 hci_dev_unlock(hdev);
4837 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4839 struct mgmt_pending_cmd *cmd;
4841 bt_dev_dbg(hdev, "status %u", status);
4845 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4847 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4850 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4853 cmd->cmd_complete(cmd, mgmt_status(status));
4854 mgmt_pending_remove(cmd);
4857 hci_dev_unlock(hdev);
4859 /* Handle suspend notifier */
4860 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4861 hdev->suspend_tasks)) {
4862 bt_dev_dbg(hdev, "Unpaused discovery");
4863 wake_up(&hdev->suspend_wait_q);
4867 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4868 uint8_t *mgmt_status)
4871 case DISCOV_TYPE_LE:
4872 *mgmt_status = mgmt_le_support(hdev);
4876 case DISCOV_TYPE_INTERLEAVED:
4877 *mgmt_status = mgmt_le_support(hdev);
4881 case DISCOV_TYPE_BREDR:
4882 *mgmt_status = mgmt_bredr_support(hdev);
4887 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4894 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4895 u16 op, void *data, u16 len)
4897 struct mgmt_cp_start_discovery *cp = data;
4898 struct mgmt_pending_cmd *cmd;
4902 bt_dev_dbg(hdev, "sock %p", sk);
4906 if (!hdev_is_powered(hdev)) {
4907 err = mgmt_cmd_complete(sk, hdev->id, op,
4908 MGMT_STATUS_NOT_POWERED,
4909 &cp->type, sizeof(cp->type));
4913 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4914 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4915 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4916 &cp->type, sizeof(cp->type));
4920 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4921 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4922 &cp->type, sizeof(cp->type));
4926 /* Can't start discovery when it is paused */
4927 if (hdev->discovery_paused) {
4928 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4929 &cp->type, sizeof(cp->type));
4933 /* Clear the discovery filter first to free any previously
4934 * allocated memory for the UUID list.
4936 hci_discovery_filter_clear(hdev);
4938 hdev->discovery.type = cp->type;
4939 hdev->discovery.report_invalid_rssi = false;
4940 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4941 hdev->discovery.limited = true;
4943 hdev->discovery.limited = false;
4945 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4951 cmd->cmd_complete = generic_cmd_complete;
4953 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4954 queue_work(hdev->req_workqueue, &hdev->discov_update);
4958 hci_dev_unlock(hdev);
4962 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4963 void *data, u16 len)
4965 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4969 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4970 void *data, u16 len)
4972 return start_discovery_internal(sk, hdev,
4973 MGMT_OP_START_LIMITED_DISCOVERY,
4977 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4980 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4984 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4985 void *data, u16 len)
4987 struct mgmt_cp_start_service_discovery *cp = data;
4988 struct mgmt_pending_cmd *cmd;
4989 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4990 u16 uuid_count, expected_len;
4994 bt_dev_dbg(hdev, "sock %p", sk);
4998 if (!hdev_is_powered(hdev)) {
4999 err = mgmt_cmd_complete(sk, hdev->id,
5000 MGMT_OP_START_SERVICE_DISCOVERY,
5001 MGMT_STATUS_NOT_POWERED,
5002 &cp->type, sizeof(cp->type));
5006 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5007 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5008 err = mgmt_cmd_complete(sk, hdev->id,
5009 MGMT_OP_START_SERVICE_DISCOVERY,
5010 MGMT_STATUS_BUSY, &cp->type,
5015 if (hdev->discovery_paused) {
5016 err = mgmt_cmd_complete(sk, hdev->id,
5017 MGMT_OP_START_SERVICE_DISCOVERY,
5018 MGMT_STATUS_BUSY, &cp->type,
5023 uuid_count = __le16_to_cpu(cp->uuid_count);
5024 if (uuid_count > max_uuid_count) {
5025 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5027 err = mgmt_cmd_complete(sk, hdev->id,
5028 MGMT_OP_START_SERVICE_DISCOVERY,
5029 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5034 expected_len = sizeof(*cp) + uuid_count * 16;
5035 if (expected_len != len) {
5036 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5038 err = mgmt_cmd_complete(sk, hdev->id,
5039 MGMT_OP_START_SERVICE_DISCOVERY,
5040 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5045 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5046 err = mgmt_cmd_complete(sk, hdev->id,
5047 MGMT_OP_START_SERVICE_DISCOVERY,
5048 status, &cp->type, sizeof(cp->type));
5052 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5059 cmd->cmd_complete = service_discovery_cmd_complete;
5061 /* Clear the discovery filter first to free any previously
5062 * allocated memory for the UUID list.
5064 hci_discovery_filter_clear(hdev);
5066 hdev->discovery.result_filtering = true;
5067 hdev->discovery.type = cp->type;
5068 hdev->discovery.rssi = cp->rssi;
5069 hdev->discovery.uuid_count = uuid_count;
5071 if (uuid_count > 0) {
5072 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5074 if (!hdev->discovery.uuids) {
5075 err = mgmt_cmd_complete(sk, hdev->id,
5076 MGMT_OP_START_SERVICE_DISCOVERY,
5078 &cp->type, sizeof(cp->type));
5079 mgmt_pending_remove(cmd);
5084 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5085 queue_work(hdev->req_workqueue, &hdev->discov_update);
5089 hci_dev_unlock(hdev);
5093 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5095 struct mgmt_pending_cmd *cmd;
5097 bt_dev_dbg(hdev, "status %u", status);
5101 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5103 cmd->cmd_complete(cmd, mgmt_status(status));
5104 mgmt_pending_remove(cmd);
5107 hci_dev_unlock(hdev);
5109 /* Handle suspend notifier */
5110 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5111 bt_dev_dbg(hdev, "Paused discovery");
5112 wake_up(&hdev->suspend_wait_q);
5116 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5119 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5120 struct mgmt_pending_cmd *cmd;
5123 bt_dev_dbg(hdev, "sock %p", sk);
5127 if (!hci_discovery_active(hdev)) {
5128 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5129 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5130 sizeof(mgmt_cp->type));
5134 if (hdev->discovery.type != mgmt_cp->type) {
5135 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5136 MGMT_STATUS_INVALID_PARAMS,
5137 &mgmt_cp->type, sizeof(mgmt_cp->type));
5141 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5147 cmd->cmd_complete = generic_cmd_complete;
5149 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5150 queue_work(hdev->req_workqueue, &hdev->discov_update);
5154 hci_dev_unlock(hdev);
5158 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5161 struct mgmt_cp_confirm_name *cp = data;
5162 struct inquiry_entry *e;
5165 bt_dev_dbg(hdev, "sock %p", sk);
5169 if (!hci_discovery_active(hdev)) {
5170 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5171 MGMT_STATUS_FAILED, &cp->addr,
5176 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5178 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5179 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5184 if (cp->name_known) {
5185 e->name_state = NAME_KNOWN;
5188 e->name_state = NAME_NEEDED;
5189 hci_inquiry_cache_update_resolve(hdev, e);
5192 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5193 &cp->addr, sizeof(cp->addr));
5196 hci_dev_unlock(hdev);
5200 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5203 struct mgmt_cp_block_device *cp = data;
5207 bt_dev_dbg(hdev, "sock %p", sk);
5209 if (!bdaddr_type_is_valid(cp->addr.type))
5210 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5211 MGMT_STATUS_INVALID_PARAMS,
5212 &cp->addr, sizeof(cp->addr));
5216 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5219 status = MGMT_STATUS_FAILED;
5223 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5225 status = MGMT_STATUS_SUCCESS;
5228 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5229 &cp->addr, sizeof(cp->addr));
5231 hci_dev_unlock(hdev);
5236 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5239 struct mgmt_cp_unblock_device *cp = data;
5243 bt_dev_dbg(hdev, "sock %p", sk);
5245 if (!bdaddr_type_is_valid(cp->addr.type))
5246 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5247 MGMT_STATUS_INVALID_PARAMS,
5248 &cp->addr, sizeof(cp->addr));
5252 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5255 status = MGMT_STATUS_INVALID_PARAMS;
5259 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5261 status = MGMT_STATUS_SUCCESS;
5264 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5265 &cp->addr, sizeof(cp->addr));
5267 hci_dev_unlock(hdev);
5272 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5275 struct mgmt_cp_set_device_id *cp = data;
5276 struct hci_request req;
5280 bt_dev_dbg(hdev, "sock %p", sk);
5282 source = __le16_to_cpu(cp->source);
5284 if (source > 0x0002)
5285 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5286 MGMT_STATUS_INVALID_PARAMS);
5290 hdev->devid_source = source;
5291 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5292 hdev->devid_product = __le16_to_cpu(cp->product);
5293 hdev->devid_version = __le16_to_cpu(cp->version);
5295 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5298 hci_req_init(&req, hdev);
5299 __hci_req_update_eir(&req);
5300 hci_req_run(&req, NULL);
5302 hci_dev_unlock(hdev);
5307 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5310 bt_dev_dbg(hdev, "status %u", status);
5313 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5316 struct cmd_lookup match = { NULL, hdev };
5317 struct hci_request req;
5319 struct adv_info *adv_instance;
5325 u8 mgmt_err = mgmt_status(status);
5327 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5328 cmd_status_rsp, &mgmt_err);
5332 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5333 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5335 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5337 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5340 new_settings(hdev, match.sk);
5345 /* Handle suspend notifier */
5346 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5347 hdev->suspend_tasks)) {
5348 bt_dev_dbg(hdev, "Paused advertising");
5349 wake_up(&hdev->suspend_wait_q);
5350 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5351 hdev->suspend_tasks)) {
5352 bt_dev_dbg(hdev, "Unpaused advertising");
5353 wake_up(&hdev->suspend_wait_q);
5356 /* If "Set Advertising" was just disabled and instance advertising was
5357 * set up earlier, then re-enable multi-instance advertising.
5359 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5360 list_empty(&hdev->adv_instances))
5363 instance = hdev->cur_adv_instance;
5365 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5366 struct adv_info, list);
5370 instance = adv_instance->instance;
5373 hci_req_init(&req, hdev);
5375 err = __hci_req_schedule_adv_instance(&req, instance, true);
5378 err = hci_req_run(&req, enable_advertising_instance);
5381 bt_dev_err(hdev, "failed to re-configure advertising");
5384 hci_dev_unlock(hdev);
5387 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5390 struct mgmt_mode *cp = data;
5391 struct mgmt_pending_cmd *cmd;
5392 struct hci_request req;
5396 bt_dev_dbg(hdev, "sock %p", sk);
5398 status = mgmt_le_support(hdev);
5400 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5403 /* Enabling the experimental LL Privay support disables support for
5406 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5408 MGMT_STATUS_NOT_SUPPORTED);
5410 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5412 MGMT_STATUS_INVALID_PARAMS);
5414 if (hdev->advertising_paused)
5415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5422 /* The following conditions are ones which mean that we should
5423 * not do any HCI communication but directly send a mgmt
5424 * response to user space (after toggling the flag if
5427 if (!hdev_is_powered(hdev) ||
5428 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5429 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5430 hci_conn_num(hdev, LE_LINK) > 0 ||
5431 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5432 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5436 hdev->cur_adv_instance = 0x00;
5437 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5438 if (cp->val == 0x02)
5439 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5441 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5443 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5444 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5447 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5452 err = new_settings(hdev, sk);
5457 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5458 pending_find(MGMT_OP_SET_LE, hdev)) {
5459 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5464 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5470 hci_req_init(&req, hdev);
5472 if (cp->val == 0x02)
5473 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5475 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5477 cancel_adv_timeout(hdev);
5480 /* Switch to instance "0" for the Set Advertising setting.
5481 * We cannot use update_[adv|scan_rsp]_data() here as the
5482 * HCI_ADVERTISING flag is not yet set.
5484 hdev->cur_adv_instance = 0x00;
5486 if (ext_adv_capable(hdev)) {
5487 __hci_req_start_ext_adv(&req, 0x00);
5489 __hci_req_update_adv_data(&req, 0x00);
5490 __hci_req_update_scan_rsp_data(&req, 0x00);
5491 __hci_req_enable_advertising(&req);
5494 __hci_req_disable_advertising(&req);
5497 err = hci_req_run(&req, set_advertising_complete);
5499 mgmt_pending_remove(cmd);
5502 hci_dev_unlock(hdev);
5506 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5507 void *data, u16 len)
5509 struct mgmt_cp_set_static_address *cp = data;
5512 bt_dev_dbg(hdev, "sock %p", sk);
5514 if (!lmp_le_capable(hdev))
5515 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5516 MGMT_STATUS_NOT_SUPPORTED);
5518 if (hdev_is_powered(hdev))
5519 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5520 MGMT_STATUS_REJECTED);
5522 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5523 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5524 return mgmt_cmd_status(sk, hdev->id,
5525 MGMT_OP_SET_STATIC_ADDRESS,
5526 MGMT_STATUS_INVALID_PARAMS);
5528 /* Two most significant bits shall be set */
5529 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5530 return mgmt_cmd_status(sk, hdev->id,
5531 MGMT_OP_SET_STATIC_ADDRESS,
5532 MGMT_STATUS_INVALID_PARAMS);
5537 bacpy(&hdev->static_addr, &cp->bdaddr);
5539 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5543 err = new_settings(hdev, sk);
5546 hci_dev_unlock(hdev);
5550 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5551 void *data, u16 len)
5553 struct mgmt_cp_set_scan_params *cp = data;
5554 __u16 interval, window;
5557 bt_dev_dbg(hdev, "sock %p", sk);
5559 if (!lmp_le_capable(hdev))
5560 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5561 MGMT_STATUS_NOT_SUPPORTED);
5563 interval = __le16_to_cpu(cp->interval);
5565 if (interval < 0x0004 || interval > 0x4000)
5566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5567 MGMT_STATUS_INVALID_PARAMS);
5569 window = __le16_to_cpu(cp->window);
5571 if (window < 0x0004 || window > 0x4000)
5572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5573 MGMT_STATUS_INVALID_PARAMS);
5575 if (window > interval)
5576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5577 MGMT_STATUS_INVALID_PARAMS);
5581 hdev->le_scan_interval = interval;
5582 hdev->le_scan_window = window;
5584 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5587 /* If background scan is running, restart it so new parameters are
5590 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5591 hdev->discovery.state == DISCOVERY_STOPPED) {
5592 struct hci_request req;
5594 hci_req_init(&req, hdev);
5596 hci_req_add_le_scan_disable(&req, false);
5597 hci_req_add_le_passive_scan(&req);
5599 hci_req_run(&req, NULL);
5602 hci_dev_unlock(hdev);
5607 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5610 struct mgmt_pending_cmd *cmd;
5612 bt_dev_dbg(hdev, "status 0x%02x", status);
5616 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5621 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5622 mgmt_status(status));
5624 struct mgmt_mode *cp = cmd->param;
5627 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5629 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5631 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5632 new_settings(hdev, cmd->sk);
5635 mgmt_pending_remove(cmd);
5638 hci_dev_unlock(hdev);
5641 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5642 void *data, u16 len)
5644 struct mgmt_mode *cp = data;
5645 struct mgmt_pending_cmd *cmd;
5646 struct hci_request req;
5649 bt_dev_dbg(hdev, "sock %p", sk);
5651 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5652 hdev->hci_ver < BLUETOOTH_VER_1_2)
5653 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5654 MGMT_STATUS_NOT_SUPPORTED);
5656 if (cp->val != 0x00 && cp->val != 0x01)
5657 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5658 MGMT_STATUS_INVALID_PARAMS);
5662 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5663 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5668 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5669 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5674 if (!hdev_is_powered(hdev)) {
5675 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5676 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5678 new_settings(hdev, sk);
5682 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5689 hci_req_init(&req, hdev);
5691 __hci_req_write_fast_connectable(&req, cp->val);
5693 err = hci_req_run(&req, fast_connectable_complete);
5695 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5696 MGMT_STATUS_FAILED);
5697 mgmt_pending_remove(cmd);
5701 hci_dev_unlock(hdev);
5706 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5708 struct mgmt_pending_cmd *cmd;
5710 bt_dev_dbg(hdev, "status 0x%02x", status);
5714 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5719 u8 mgmt_err = mgmt_status(status);
5721 /* We need to restore the flag if related HCI commands
5724 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5726 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5728 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5729 new_settings(hdev, cmd->sk);
5732 mgmt_pending_remove(cmd);
5735 hci_dev_unlock(hdev);
5738 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5740 struct mgmt_mode *cp = data;
5741 struct mgmt_pending_cmd *cmd;
5742 struct hci_request req;
5745 bt_dev_dbg(hdev, "sock %p", sk);
5747 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5749 MGMT_STATUS_NOT_SUPPORTED);
5751 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5753 MGMT_STATUS_REJECTED);
5755 if (cp->val != 0x00 && cp->val != 0x01)
5756 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5757 MGMT_STATUS_INVALID_PARAMS);
5761 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5762 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5766 if (!hdev_is_powered(hdev)) {
5768 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5769 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5770 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5771 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5772 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5775 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5777 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5781 err = new_settings(hdev, sk);
5785 /* Reject disabling when powered on */
5787 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5788 MGMT_STATUS_REJECTED);
5791 /* When configuring a dual-mode controller to operate
5792 * with LE only and using a static address, then switching
5793 * BR/EDR back on is not allowed.
5795 * Dual-mode controllers shall operate with the public
5796 * address as its identity address for BR/EDR and LE. So
5797 * reject the attempt to create an invalid configuration.
5799 * The same restrictions applies when secure connections
5800 * has been enabled. For BR/EDR this is a controller feature
5801 * while for LE it is a host stack feature. This means that
5802 * switching BR/EDR back on when secure connections has been
5803 * enabled is not a supported transaction.
5805 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5806 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5807 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5808 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5809 MGMT_STATUS_REJECTED);
5814 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5815 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5820 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5826 /* We need to flip the bit already here so that
5827 * hci_req_update_adv_data generates the correct flags.
5829 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5831 hci_req_init(&req, hdev);
5833 __hci_req_write_fast_connectable(&req, false);
5834 __hci_req_update_scan(&req);
5836 /* Since only the advertising data flags will change, there
5837 * is no need to update the scan response data.
5839 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5841 err = hci_req_run(&req, set_bredr_complete);
5843 mgmt_pending_remove(cmd);
5846 hci_dev_unlock(hdev);
5850 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5852 struct mgmt_pending_cmd *cmd;
5853 struct mgmt_mode *cp;
5855 bt_dev_dbg(hdev, "status %u", status);
5859 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5864 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5865 mgmt_status(status));
5873 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5874 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5877 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5878 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5881 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5882 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5886 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5887 new_settings(hdev, cmd->sk);
5890 mgmt_pending_remove(cmd);
5892 hci_dev_unlock(hdev);
5895 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5896 void *data, u16 len)
5898 struct mgmt_mode *cp = data;
5899 struct mgmt_pending_cmd *cmd;
5900 struct hci_request req;
5904 bt_dev_dbg(hdev, "sock %p", sk);
5906 if (!lmp_sc_capable(hdev) &&
5907 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5908 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5909 MGMT_STATUS_NOT_SUPPORTED);
5911 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5912 lmp_sc_capable(hdev) &&
5913 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5914 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5915 MGMT_STATUS_REJECTED);
5917 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5918 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5919 MGMT_STATUS_INVALID_PARAMS);
5923 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5924 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5928 changed = !hci_dev_test_and_set_flag(hdev,
5930 if (cp->val == 0x02)
5931 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5933 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5935 changed = hci_dev_test_and_clear_flag(hdev,
5937 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5940 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5945 err = new_settings(hdev, sk);
5950 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5951 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5958 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5959 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5960 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5964 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5970 hci_req_init(&req, hdev);
5971 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5972 err = hci_req_run(&req, sc_enable_complete);
5974 mgmt_pending_remove(cmd);
5979 hci_dev_unlock(hdev);
5983 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5984 void *data, u16 len)
5986 struct mgmt_mode *cp = data;
5987 bool changed, use_changed;
5990 bt_dev_dbg(hdev, "sock %p", sk);
5992 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5993 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5994 MGMT_STATUS_INVALID_PARAMS);
5999 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6001 changed = hci_dev_test_and_clear_flag(hdev,
6002 HCI_KEEP_DEBUG_KEYS);
6004 if (cp->val == 0x02)
6005 use_changed = !hci_dev_test_and_set_flag(hdev,
6006 HCI_USE_DEBUG_KEYS);
6008 use_changed = hci_dev_test_and_clear_flag(hdev,
6009 HCI_USE_DEBUG_KEYS);
6011 if (hdev_is_powered(hdev) && use_changed &&
6012 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6013 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6014 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6015 sizeof(mode), &mode);
6018 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6023 err = new_settings(hdev, sk);
6026 hci_dev_unlock(hdev);
6030 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6033 struct mgmt_cp_set_privacy *cp = cp_data;
6037 bt_dev_dbg(hdev, "sock %p", sk);
6039 if (!lmp_le_capable(hdev))
6040 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6041 MGMT_STATUS_NOT_SUPPORTED);
6043 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6044 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6045 MGMT_STATUS_INVALID_PARAMS);
6048 /* commenting out since set privacy command is always rejected
6049 * if this condition is enabled.
6051 if (hdev_is_powered(hdev))
6052 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6053 MGMT_STATUS_REJECTED);
6058 /* If user space supports this command it is also expected to
6059 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6061 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6064 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6065 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6066 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6067 hci_adv_instances_set_rpa_expired(hdev, true);
6068 if (cp->privacy == 0x02)
6069 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6071 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6073 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6074 memset(hdev->irk, 0, sizeof(hdev->irk));
6075 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6076 hci_adv_instances_set_rpa_expired(hdev, false);
6077 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6080 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6085 err = new_settings(hdev, sk);
6088 hci_dev_unlock(hdev);
6092 static bool irk_is_valid(struct mgmt_irk_info *irk)
6094 switch (irk->addr.type) {
6095 case BDADDR_LE_PUBLIC:
6098 case BDADDR_LE_RANDOM:
6099 /* Two most significant bits shall be set */
6100 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6108 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6111 struct mgmt_cp_load_irks *cp = cp_data;
6112 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6113 sizeof(struct mgmt_irk_info));
6114 u16 irk_count, expected_len;
6117 bt_dev_dbg(hdev, "sock %p", sk);
6119 if (!lmp_le_capable(hdev))
6120 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6121 MGMT_STATUS_NOT_SUPPORTED);
6123 irk_count = __le16_to_cpu(cp->irk_count);
6124 if (irk_count > max_irk_count) {
6125 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6127 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6128 MGMT_STATUS_INVALID_PARAMS);
6131 expected_len = struct_size(cp, irks, irk_count);
6132 if (expected_len != len) {
6133 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6135 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6136 MGMT_STATUS_INVALID_PARAMS);
6139 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6141 for (i = 0; i < irk_count; i++) {
6142 struct mgmt_irk_info *key = &cp->irks[i];
6144 if (!irk_is_valid(key))
6145 return mgmt_cmd_status(sk, hdev->id,
6147 MGMT_STATUS_INVALID_PARAMS);
6152 hci_smp_irks_clear(hdev);
6154 for (i = 0; i < irk_count; i++) {
6155 struct mgmt_irk_info *irk = &cp->irks[i];
6157 if (hci_is_blocked_key(hdev,
6158 HCI_BLOCKED_KEY_TYPE_IRK,
6160 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6165 hci_add_irk(hdev, &irk->addr.bdaddr,
6166 le_addr_type(irk->addr.type), irk->val,
6170 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6172 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6174 hci_dev_unlock(hdev);
6180 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
6181 void *data, u16 len)
6183 struct mgmt_cp_set_advertising_params *cp = data;
6188 BT_DBG("%s", hdev->name);
6190 if (!lmp_le_capable(hdev))
6191 return mgmt_cmd_status(sk, hdev->id,
6192 MGMT_OP_SET_ADVERTISING_PARAMS,
6193 MGMT_STATUS_NOT_SUPPORTED);
6195 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6196 return mgmt_cmd_status(sk, hdev->id,
6197 MGMT_OP_SET_ADVERTISING_PARAMS,
6200 min_interval = __le16_to_cpu(cp->interval_min);
6201 max_interval = __le16_to_cpu(cp->interval_max);
6203 if (min_interval > max_interval ||
6204 min_interval < 0x0020 || max_interval > 0x4000)
6205 return mgmt_cmd_status(sk, hdev->id,
6206 MGMT_OP_SET_ADVERTISING_PARAMS,
6207 MGMT_STATUS_INVALID_PARAMS);
6211 hdev->le_adv_min_interval = min_interval;
6212 hdev->le_adv_max_interval = max_interval;
6213 hdev->adv_filter_policy = cp->filter_policy;
6214 hdev->adv_type = cp->type;
6216 err = mgmt_cmd_complete(sk, hdev->id,
6217 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
6219 hci_dev_unlock(hdev);
6224 static void set_advertising_data_complete(struct hci_dev *hdev,
6225 u8 status, u16 opcode)
6227 struct mgmt_cp_set_advertising_data *cp;
6228 struct mgmt_pending_cmd *cmd;
6230 BT_DBG("status 0x%02x", status);
6234 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
6241 mgmt_cmd_status(cmd->sk, hdev->id,
6242 MGMT_OP_SET_ADVERTISING_DATA,
6243 mgmt_status(status));
6245 mgmt_cmd_complete(cmd->sk, hdev->id,
6246 MGMT_OP_SET_ADVERTISING_DATA, 0,
6249 mgmt_pending_remove(cmd);
6252 hci_dev_unlock(hdev);
6255 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
6256 void *data, u16 len)
6258 struct mgmt_pending_cmd *cmd;
6259 struct hci_request req;
6260 struct mgmt_cp_set_advertising_data *cp = data;
6261 struct hci_cp_le_set_adv_data adv;
6264 BT_DBG("%s", hdev->name);
6266 if (!lmp_le_capable(hdev)) {
6267 return mgmt_cmd_status(sk, hdev->id,
6268 MGMT_OP_SET_ADVERTISING_DATA,
6269 MGMT_STATUS_NOT_SUPPORTED);
6274 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
6275 err = mgmt_cmd_status(sk, hdev->id,
6276 MGMT_OP_SET_ADVERTISING_DATA,
6281 if (len > HCI_MAX_AD_LENGTH) {
6282 err = mgmt_cmd_status(sk, hdev->id,
6283 MGMT_OP_SET_ADVERTISING_DATA,
6284 MGMT_STATUS_INVALID_PARAMS);
6288 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
6295 hci_req_init(&req, hdev);
6297 memset(&adv, 0, sizeof(adv));
6298 memcpy(adv.data, cp->data, len);
6301 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
6303 err = hci_req_run(&req, set_advertising_data_complete);
6305 mgmt_pending_remove(cmd);
6308 hci_dev_unlock(hdev);
6313 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
6316 struct mgmt_cp_set_scan_rsp_data *cp;
6317 struct mgmt_pending_cmd *cmd;
6319 BT_DBG("status 0x%02x", status);
6323 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
6330 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6331 mgmt_status(status));
6333 mgmt_cmd_complete(cmd->sk, hdev->id,
6334 MGMT_OP_SET_SCAN_RSP_DATA, 0,
6337 mgmt_pending_remove(cmd);
6340 hci_dev_unlock(hdev);
6343 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
6346 struct mgmt_pending_cmd *cmd;
6347 struct hci_request req;
6348 struct mgmt_cp_set_scan_rsp_data *cp = data;
6349 struct hci_cp_le_set_scan_rsp_data rsp;
6352 BT_DBG("%s", hdev->name);
6354 if (!lmp_le_capable(hdev))
6355 return mgmt_cmd_status(sk, hdev->id,
6356 MGMT_OP_SET_SCAN_RSP_DATA,
6357 MGMT_STATUS_NOT_SUPPORTED);
6361 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
6362 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6367 if (len > HCI_MAX_AD_LENGTH) {
6368 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6369 MGMT_STATUS_INVALID_PARAMS);
6373 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
6379 hci_req_init(&req, hdev);
6381 memset(&rsp, 0, sizeof(rsp));
6382 memcpy(rsp.data, cp->data, len);
6385 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
6387 err = hci_req_run(&req, set_scan_rsp_data_complete);
6389 mgmt_pending_remove(cmd);
6392 hci_dev_unlock(hdev);
6397 /* Adv White List feature */
6398 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6400 struct mgmt_cp_add_dev_white_list *cp;
6401 struct mgmt_pending_cmd *cmd;
6403 BT_DBG("status 0x%02x", status);
6407 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
6414 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6415 mgmt_status(status));
6417 mgmt_cmd_complete(cmd->sk, hdev->id,
6418 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
6420 mgmt_pending_remove(cmd);
6423 hci_dev_unlock(hdev);
6426 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
6427 void *data, u16 len)
6429 struct mgmt_pending_cmd *cmd;
6430 struct mgmt_cp_add_dev_white_list *cp = data;
6431 struct hci_request req;
6434 BT_DBG("%s", hdev->name);
6436 if (!lmp_le_capable(hdev))
6437 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6438 MGMT_STATUS_NOT_SUPPORTED);
6440 if (!hdev_is_powered(hdev))
6441 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6442 MGMT_STATUS_REJECTED);
6446 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
6447 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6452 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
6458 hci_req_init(&req, hdev);
6460 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
6462 err = hci_req_run(&req, add_white_list_complete);
6464 mgmt_pending_remove(cmd);
6469 hci_dev_unlock(hdev);
6474 static void remove_from_white_list_complete(struct hci_dev *hdev,
6475 u8 status, u16 opcode)
6477 struct mgmt_cp_remove_dev_from_white_list *cp;
6478 struct mgmt_pending_cmd *cmd;
6480 BT_DBG("status 0x%02x", status);
6484 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
6491 mgmt_cmd_status(cmd->sk, hdev->id,
6492 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6493 mgmt_status(status));
6495 mgmt_cmd_complete(cmd->sk, hdev->id,
6496 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
6499 mgmt_pending_remove(cmd);
6502 hci_dev_unlock(hdev);
6505 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
6506 void *data, u16 len)
6508 struct mgmt_pending_cmd *cmd;
6509 struct mgmt_cp_remove_dev_from_white_list *cp = data;
6510 struct hci_request req;
6513 BT_DBG("%s", hdev->name);
6515 if (!lmp_le_capable(hdev))
6516 return mgmt_cmd_status(sk, hdev->id,
6517 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6518 MGMT_STATUS_NOT_SUPPORTED);
6520 if (!hdev_is_powered(hdev))
6521 return mgmt_cmd_status(sk, hdev->id,
6522 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6523 MGMT_STATUS_REJECTED);
6527 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
6528 err = mgmt_cmd_status(sk, hdev->id,
6529 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6534 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6541 hci_req_init(&req, hdev);
6543 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
6545 err = hci_req_run(&req, remove_from_white_list_complete);
6547 mgmt_pending_remove(cmd);
6552 hci_dev_unlock(hdev);
6557 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
6560 struct mgmt_pending_cmd *cmd;
6562 BT_DBG("status 0x%02x", status);
6566 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
6571 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6572 mgmt_status(status));
6574 mgmt_cmd_complete(cmd->sk, hdev->id,
6575 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6578 mgmt_pending_remove(cmd);
6581 hci_dev_unlock(hdev);
6584 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
6585 void *data, u16 len)
6587 struct mgmt_pending_cmd *cmd;
6588 struct hci_request req;
6591 BT_DBG("%s", hdev->name);
6593 if (!lmp_le_capable(hdev))
6594 return mgmt_cmd_status(sk, hdev->id,
6595 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6596 MGMT_STATUS_NOT_SUPPORTED);
6598 if (!hdev_is_powered(hdev))
6599 return mgmt_cmd_status(sk, hdev->id,
6600 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6601 MGMT_STATUS_REJECTED);
6605 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
6606 err = mgmt_cmd_status(sk, hdev->id,
6607 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6612 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6619 hci_req_init(&req, hdev);
6621 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
6623 err = hci_req_run(&req, clear_white_list_complete);
6625 mgmt_pending_remove(cmd);
6630 hci_dev_unlock(hdev);
6635 static void set_rssi_threshold_complete(struct hci_dev *hdev,
6636 u8 status, u16 opcode)
6638 struct mgmt_pending_cmd *cmd;
6640 BT_DBG("status 0x%02x", status);
6644 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6649 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6650 mgmt_status(status));
6652 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6655 mgmt_pending_remove(cmd);
6658 hci_dev_unlock(hdev);
6661 static void set_rssi_disable_complete(struct hci_dev *hdev,
6662 u8 status, u16 opcode)
6664 struct mgmt_pending_cmd *cmd;
6666 BT_DBG("status 0x%02x", status);
6670 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6675 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6676 mgmt_status(status));
6678 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6681 mgmt_pending_remove(cmd);
6684 hci_dev_unlock(hdev);
6687 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
6688 void *data, u16 len)
6691 struct hci_cp_set_rssi_threshold th = { 0, };
6692 struct mgmt_cp_set_enable_rssi *cp = data;
6693 struct hci_conn *conn;
6694 struct mgmt_pending_cmd *cmd;
6695 struct hci_request req;
6700 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6702 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6703 MGMT_STATUS_FAILED);
6707 if (!lmp_le_capable(hdev)) {
6708 mgmt_pending_remove(cmd);
6709 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6710 MGMT_STATUS_NOT_SUPPORTED);
6714 if (!hdev_is_powered(hdev)) {
6715 BT_DBG("%s", hdev->name);
6716 mgmt_pending_remove(cmd);
6717 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6718 MGMT_STATUS_NOT_POWERED);
6722 if (cp->link_type == 0x01)
6723 dest_type = LE_LINK;
6725 dest_type = ACL_LINK;
6727 /* Get LE/ACL link handle info */
6728 conn = hci_conn_hash_lookup_ba(hdev,
6729 dest_type, &cp->bdaddr);
6732 err = mgmt_cmd_complete(sk, hdev->id,
6733 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
6734 mgmt_pending_remove(cmd);
6738 hci_req_init(&req, hdev);
6740 th.hci_le_ext_opcode = 0x0B;
6742 th.conn_handle = conn->handle;
6743 th.alert_mask = 0x07;
6744 th.low_th = cp->low_th;
6745 th.in_range_th = cp->in_range_th;
6746 th.high_th = cp->high_th;
6748 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6749 err = hci_req_run(&req, set_rssi_threshold_complete);
6752 mgmt_pending_remove(cmd);
6753 BT_ERR("Error in requesting hci_req_run");
6758 hci_dev_unlock(hdev);
6762 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
6763 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6765 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
6766 struct mgmt_cp_set_enable_rssi *cp = data;
6767 struct mgmt_pending_cmd *cmd;
6772 mgmt_rp.status = rp->status;
6773 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6774 mgmt_rp.bt_address = cp->bdaddr;
6775 mgmt_rp.link_type = cp->link_type;
6777 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6778 MGMT_STATUS_SUCCESS, &mgmt_rp,
6779 sizeof(struct mgmt_cc_rsp_enable_rssi));
6781 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
6782 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6784 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
6785 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6786 &mgmt_rp.bt_address, true);
6790 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6792 mgmt_pending_remove(cmd);
6794 hci_dev_unlock(hdev);
6797 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
6798 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6800 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
6801 struct mgmt_cp_disable_rssi *cp = data;
6802 struct mgmt_pending_cmd *cmd;
6807 mgmt_rp.status = rp->status;
6808 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6809 mgmt_rp.bt_address = cp->bdaddr;
6810 mgmt_rp.link_type = cp->link_type;
6812 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6813 MGMT_STATUS_SUCCESS, &mgmt_rp,
6814 sizeof(struct mgmt_cc_rsp_enable_rssi));
6816 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
6817 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6819 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6820 &mgmt_rp.bt_address, false);
6824 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6826 mgmt_pending_remove(cmd);
6828 hci_dev_unlock(hdev);
6831 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
6832 void *data, u16 len)
6834 struct mgmt_pending_cmd *cmd;
6835 struct hci_request req;
6836 struct hci_cp_set_enable_rssi cp_en = { 0, };
6839 BT_DBG("Set Disable RSSI.");
6841 cp_en.hci_le_ext_opcode = 0x01;
6842 cp_en.le_enable_cs_Features = 0x00;
6843 cp_en.data[0] = 0x00;
6844 cp_en.data[1] = 0x00;
6845 cp_en.data[2] = 0x00;
6849 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6851 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6852 MGMT_STATUS_FAILED);
6856 if (!lmp_le_capable(hdev)) {
6857 mgmt_pending_remove(cmd);
6858 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6859 MGMT_STATUS_NOT_SUPPORTED);
6863 if (!hdev_is_powered(hdev)) {
6864 BT_DBG("%s", hdev->name);
6865 mgmt_pending_remove(cmd);
6866 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6867 MGMT_STATUS_NOT_POWERED);
6871 hci_req_init(&req, hdev);
6873 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6874 sizeof(struct hci_cp_set_enable_rssi),
6875 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6876 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6878 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6879 err = hci_req_run(&req, set_rssi_disable_complete);
6882 mgmt_pending_remove(cmd);
6883 BT_ERR("Error in requesting hci_req_run");
6888 hci_dev_unlock(hdev);
6892 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
6894 struct hci_cc_rsp_enable_rssi *rp = response;
6895 struct mgmt_pending_cmd *cmd_enable = NULL;
6896 struct mgmt_pending_cmd *cmd_disable = NULL;
6897 struct mgmt_cp_set_enable_rssi *cp_en;
6898 struct mgmt_cp_disable_rssi *cp_dis;
6901 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6902 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6903 hci_dev_unlock(hdev);
6906 BT_DBG("Enable Request");
6909 BT_DBG("Disable Request");
6912 cp_en = cmd_enable->param;
6917 switch (rp->le_ext_opcode) {
6919 BT_DBG("RSSI enabled.. Setting Threshold...");
6920 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
6921 cp_en, sizeof(*cp_en));
6925 BT_DBG("Sending RSSI enable success");
6926 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
6927 cp_en, rp, rp->status);
6931 } else if (cmd_disable) {
6932 cp_dis = cmd_disable->param;
6937 switch (rp->le_ext_opcode) {
6939 BT_DBG("Sending RSSI disable success");
6940 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6941 cp_dis, rp, rp->status);
6946 * Only unset RSSI Threshold values for the Link if
6947 * RSSI is monitored for other BREDR or LE Links
6949 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
6950 BT_DBG("Unset Threshold. Other links being monitored");
6951 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6952 cp_dis, rp, rp->status);
6954 BT_DBG("Unset Threshold. Disabling...");
6955 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
6956 cp_dis, sizeof(*cp_dis));
6963 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
6966 struct mgmt_pending_cmd *cmd;
6968 BT_DBG("status 0x%02x", status);
6972 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6977 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6978 mgmt_status(status));
6980 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6983 mgmt_pending_remove(cmd);
6986 hci_dev_unlock(hdev);
6989 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
6990 void *data, u16 len)
6992 struct mgmt_pending_cmd *cmd;
6993 struct hci_request req;
6994 struct mgmt_cp_set_enable_rssi *cp = data;
6995 struct hci_cp_set_enable_rssi cp_en = { 0, };
6998 BT_DBG("Set Enable RSSI.");
7000 cp_en.hci_le_ext_opcode = 0x01;
7001 cp_en.le_enable_cs_Features = 0x04;
7002 cp_en.data[0] = 0x00;
7003 cp_en.data[1] = 0x00;
7004 cp_en.data[2] = 0x00;
7008 if (!lmp_le_capable(hdev)) {
7009 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7010 MGMT_STATUS_NOT_SUPPORTED);
7014 if (!hdev_is_powered(hdev)) {
7015 BT_DBG("%s", hdev->name);
7016 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7017 MGMT_STATUS_NOT_POWERED);
7021 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
7022 BT_DBG("%s", hdev->name);
7023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7028 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
7031 BT_DBG("%s", hdev->name);
7036 /* If RSSI is already enabled directly set Threshold values */
7037 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
7038 hci_dev_unlock(hdev);
7039 BT_DBG("RSSI Enabled. Directly set Threshold");
7040 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
7044 hci_req_init(&req, hdev);
7046 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7047 sizeof(struct hci_cp_set_enable_rssi),
7048 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7049 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7051 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7052 err = hci_req_run(&req, set_rssi_enable_complete);
7055 mgmt_pending_remove(cmd);
7056 BT_ERR("Error in requesting hci_req_run");
7061 hci_dev_unlock(hdev);
7066 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7068 struct mgmt_pending_cmd *cmd;
7070 BT_DBG("status 0x%02x", status);
7074 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
7078 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7079 MGMT_STATUS_SUCCESS, &status, 1);
7081 mgmt_pending_remove(cmd);
7084 hci_dev_unlock(hdev);
7087 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
7090 struct mgmt_pending_cmd *cmd;
7091 struct hci_request req;
7092 struct mgmt_cp_get_raw_rssi *cp = data;
7093 struct hci_cp_get_raw_rssi hci_cp;
7095 struct hci_conn *conn;
7099 BT_DBG("Get Raw RSSI.");
7103 if (!lmp_le_capable(hdev)) {
7104 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7105 MGMT_STATUS_NOT_SUPPORTED);
7109 if (cp->link_type == 0x01)
7110 dest_type = LE_LINK;
7112 dest_type = ACL_LINK;
7114 /* Get LE/BREDR link handle info */
7115 conn = hci_conn_hash_lookup_ba(hdev,
7116 dest_type, &cp->bt_address);
7118 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7119 MGMT_STATUS_NOT_CONNECTED);
7122 hci_cp.conn_handle = conn->handle;
7124 if (!hdev_is_powered(hdev)) {
7125 BT_DBG("%s", hdev->name);
7126 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7127 MGMT_STATUS_NOT_POWERED);
7131 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
7132 BT_DBG("%s", hdev->name);
7133 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7138 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
7140 BT_DBG("%s", hdev->name);
7145 hci_req_init(&req, hdev);
7147 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
7148 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
7149 err = hci_req_run(&req, get_raw_rssi_complete);
7152 mgmt_pending_remove(cmd);
7153 BT_ERR("Error in requesting hci_req_run");
7157 hci_dev_unlock(hdev);
7162 void mgmt_raw_rssi_response(struct hci_dev *hdev,
7163 struct hci_cc_rp_get_raw_rssi *rp, int success)
7165 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
7166 struct hci_conn *conn;
7168 mgmt_rp.status = rp->status;
7169 mgmt_rp.rssi_dbm = rp->rssi_dbm;
7171 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
7175 bacpy(&mgmt_rp.bt_address, &conn->dst);
7176 if (conn->type == LE_LINK)
7177 mgmt_rp.link_type = 0x01;
7179 mgmt_rp.link_type = 0x00;
7181 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
7182 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
7185 static void set_disable_threshold_complete(struct hci_dev *hdev,
7186 u8 status, u16 opcode)
7188 struct mgmt_pending_cmd *cmd;
7190 BT_DBG("status 0x%02x", status);
7194 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7198 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7199 MGMT_STATUS_SUCCESS, &status, 1);
7201 mgmt_pending_remove(cmd);
7204 hci_dev_unlock(hdev);
7207 /** Removes monitoring for a link*/
7208 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
7209 void *data, u16 len)
7212 struct hci_cp_set_rssi_threshold th = { 0, };
7213 struct mgmt_cp_disable_rssi *cp = data;
7214 struct hci_conn *conn;
7215 struct mgmt_pending_cmd *cmd;
7216 struct hci_request req;
7219 BT_DBG("Set Disable RSSI.");
7223 if (!lmp_le_capable(hdev)) {
7224 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7225 MGMT_STATUS_NOT_SUPPORTED);
7229 /* Get LE/ACL link handle info*/
7230 if (cp->link_type == 0x01)
7231 dest_type = LE_LINK;
7233 dest_type = ACL_LINK;
7235 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
7237 err = mgmt_cmd_complete(sk, hdev->id,
7238 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
7242 th.hci_le_ext_opcode = 0x0B;
7244 th.conn_handle = conn->handle;
7245 th.alert_mask = 0x00;
7247 th.in_range_th = 0x00;
7250 if (!hdev_is_powered(hdev)) {
7251 BT_DBG("%s", hdev->name);
7252 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7257 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
7258 BT_DBG("%s", hdev->name);
7259 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7264 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
7267 BT_DBG("%s", hdev->name);
7272 hci_req_init(&req, hdev);
7274 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7275 err = hci_req_run(&req, set_disable_threshold_complete);
7277 mgmt_pending_remove(cmd);
7278 BT_ERR("Error in requesting hci_req_run");
7283 hci_dev_unlock(hdev);
7288 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
7289 s8 alert_type, s8 rssi_dbm)
7291 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
7292 struct hci_conn *conn;
7294 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
7295 conn_handle, alert_type, rssi_dbm);
7297 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
7300 BT_ERR("RSSI alert Error: Device not found for handle");
7303 bacpy(&mgmt_ev.bdaddr, &conn->dst);
7305 if (conn->type == LE_LINK)
7306 mgmt_ev.link_type = 0x01;
7308 mgmt_ev.link_type = 0x00;
7310 mgmt_ev.alert_type = alert_type;
7311 mgmt_ev.rssi_dbm = rssi_dbm;
7313 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
7314 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
7318 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
7320 struct mgmt_pending_cmd *cmd;
7324 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7326 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7330 type = hdev->le_discovery.type;
7332 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7333 mgmt_status(status), &type, sizeof(type));
7334 mgmt_pending_remove(cmd);
7339 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
7342 unsigned long timeout = 0;
7344 BT_DBG("status %d", status);
7348 mgmt_start_le_discovery_failed(hdev, status);
7349 hci_dev_unlock(hdev);
7354 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
7355 hci_dev_unlock(hdev);
7357 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
7358 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
7363 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
7366 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
7367 void *data, u16 len)
7369 struct mgmt_cp_start_le_discovery *cp = data;
7370 struct mgmt_pending_cmd *cmd;
7371 struct hci_cp_le_set_scan_param param_cp;
7372 struct hci_cp_le_set_scan_enable enable_cp;
7373 struct hci_request req;
7374 u8 status, own_addr_type;
7377 BT_DBG("%s", hdev->name);
7381 if (!hdev_is_powered(hdev)) {
7382 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7383 MGMT_STATUS_NOT_POWERED);
7387 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
7388 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7393 if (cp->type != DISCOV_TYPE_LE) {
7394 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7395 MGMT_STATUS_INVALID_PARAMS);
7399 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
7405 hdev->le_discovery.type = cp->type;
7407 hci_req_init(&req, hdev);
7409 status = mgmt_le_support(hdev);
7411 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7413 mgmt_pending_remove(cmd);
7417 /* If controller is scanning, it means the background scanning
7418 * is running. Thus, we should temporarily stop it in order to
7419 * set the discovery scanning parameters.
7421 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
7422 hci_req_add_le_scan_disable(&req, false);
7424 memset(¶m_cp, 0, sizeof(param_cp));
7426 /* All active scans will be done with either a resolvable
7427 * private address (when privacy feature has been enabled)
7428 * or unresolvable private address.
7430 err = hci_update_random_address(&req, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
7432 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7433 MGMT_STATUS_FAILED);
7434 mgmt_pending_remove(cmd);
7438 param_cp.type = hdev->le_scan_type;
7439 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
7440 param_cp.window = cpu_to_le16(hdev->le_scan_window);
7441 param_cp.own_address_type = own_addr_type;
7442 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
7445 memset(&enable_cp, 0, sizeof(enable_cp));
7446 enable_cp.enable = LE_SCAN_ENABLE;
7447 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
7449 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
7452 err = hci_req_run(&req, start_le_discovery_complete);
7454 mgmt_pending_remove(cmd);
7456 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
7459 hci_dev_unlock(hdev);
7463 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
7465 struct mgmt_pending_cmd *cmd;
7468 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7472 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7473 mgmt_status(status), &hdev->le_discovery.type,
7474 sizeof(hdev->le_discovery.type));
7475 mgmt_pending_remove(cmd);
7480 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
7483 BT_DBG("status %d", status);
7488 mgmt_stop_le_discovery_failed(hdev, status);
7492 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7495 hci_dev_unlock(hdev);
7498 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
7499 void *data, u16 len)
7501 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
7502 struct mgmt_pending_cmd *cmd;
7503 struct hci_request req;
7506 BT_DBG("%s", hdev->name);
7510 if (!hci_le_discovery_active(hdev)) {
7511 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7512 MGMT_STATUS_REJECTED, &mgmt_cp->type,
7513 sizeof(mgmt_cp->type));
7517 if (hdev->le_discovery.type != mgmt_cp->type) {
7518 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7519 MGMT_STATUS_INVALID_PARAMS,
7520 &mgmt_cp->type, sizeof(mgmt_cp->type));
7524 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
7530 hci_req_init(&req, hdev);
7532 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
7533 BT_DBG("unknown le discovery state %u",
7534 hdev->le_discovery.state);
7536 mgmt_pending_remove(cmd);
7537 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7538 MGMT_STATUS_FAILED, &mgmt_cp->type,
7539 sizeof(mgmt_cp->type));
7543 cancel_delayed_work(&hdev->le_scan_disable);
7544 hci_req_add_le_scan_disable(&req, false);
7546 err = hci_req_run(&req, stop_le_discovery_complete);
7548 mgmt_pending_remove(cmd);
7550 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
7553 hci_dev_unlock(hdev);
7557 /* Separate LE discovery */
7558 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
7560 struct mgmt_ev_discovering ev;
7561 struct mgmt_pending_cmd *cmd;
7563 BT_DBG("%s le discovering %u", hdev->name, discovering);
7566 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7568 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7571 u8 type = hdev->le_discovery.type;
7573 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
7575 mgmt_pending_remove(cmd);
7578 memset(&ev, 0, sizeof(ev));
7579 ev.type = hdev->le_discovery.type;
7580 ev.discovering = discovering;
7582 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7585 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
7586 void *data, u16 len)
7590 BT_DBG("%s", hdev->name);
7594 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
7596 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
7598 hci_dev_unlock(hdev);
7603 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
7608 if (min > max || min < 6 || max > 3200)
7611 if (to_multiplier < 10 || to_multiplier > 3200)
7614 if (max >= to_multiplier * 8)
7617 max_latency = (to_multiplier * 8 / max) - 1;
7619 if (latency > 499 || latency > max_latency)
7625 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
7628 struct mgmt_cp_le_conn_update *cp = data;
7630 struct hci_conn *conn;
7631 u16 min, max, latency, supervision_timeout;
7634 if (!hdev_is_powered(hdev))
7635 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7636 MGMT_STATUS_NOT_POWERED);
7638 min = __le16_to_cpu(cp->conn_interval_min);
7639 max = __le16_to_cpu(cp->conn_interval_max);
7640 latency = __le16_to_cpu(cp->conn_latency);
7641 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
7643 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
7644 min, max, latency, supervision_timeout);
7646 err = check_le_conn_update_param(min, max, latency,
7647 supervision_timeout);
7650 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7651 MGMT_STATUS_INVALID_PARAMS);
7655 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
7657 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7658 MGMT_STATUS_NOT_CONNECTED);
7659 hci_dev_unlock(hdev);
7663 hci_dev_unlock(hdev);
7665 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
7667 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
7671 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
7674 struct mgmt_cp_set_manufacturer_data *cp;
7675 struct mgmt_pending_cmd *cmd;
7677 BT_DBG("status 0x%02x", status);
7681 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
7688 mgmt_cmd_status(cmd->sk, hdev->id,
7689 MGMT_OP_SET_MANUFACTURER_DATA,
7690 mgmt_status(status));
7692 mgmt_cmd_complete(cmd->sk, hdev->id,
7693 MGMT_OP_SET_MANUFACTURER_DATA, 0,
7696 mgmt_pending_remove(cmd);
7699 hci_dev_unlock(hdev);
7702 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
7703 void *data, u16 len)
7705 struct mgmt_pending_cmd *cmd;
7706 struct hci_request req;
7707 struct mgmt_cp_set_manufacturer_data *cp = data;
7708 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
7712 BT_DBG("%s", hdev->name);
7714 if (!lmp_bredr_capable(hdev))
7715 return mgmt_cmd_status(sk, hdev->id,
7716 MGMT_OP_SET_MANUFACTURER_DATA,
7717 MGMT_STATUS_NOT_SUPPORTED);
7719 if (cp->data[0] == 0 ||
7720 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
7721 return mgmt_cmd_status(sk, hdev->id,
7722 MGMT_OP_SET_MANUFACTURER_DATA,
7723 MGMT_STATUS_INVALID_PARAMS);
7725 if (cp->data[1] != 0xFF)
7726 return mgmt_cmd_status(sk, hdev->id,
7727 MGMT_OP_SET_MANUFACTURER_DATA,
7728 MGMT_STATUS_NOT_SUPPORTED);
7732 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
7733 err = mgmt_cmd_status(sk, hdev->id,
7734 MGMT_OP_SET_MANUFACTURER_DATA,
7739 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
7746 hci_req_init(&req, hdev);
7748 /* if new data is same as previous data then return command
7751 if (hdev->manufacturer_len == cp->data[0] - 1 &&
7752 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
7753 mgmt_pending_remove(cmd);
7754 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
7755 0, cp, sizeof(*cp));
7760 old_len = hdev->manufacturer_len;
7762 memcpy(old_data, hdev->manufacturer_data, old_len);
7764 hdev->manufacturer_len = cp->data[0] - 1;
7765 if (hdev->manufacturer_len > 0)
7766 memcpy(hdev->manufacturer_data, cp->data + 2,
7767 hdev->manufacturer_len);
7769 __hci_req_update_eir(&req);
7771 err = hci_req_run(&req, set_manufacturer_data_complete);
7773 mgmt_pending_remove(cmd);
7778 hci_dev_unlock(hdev);
7783 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
7784 hdev->manufacturer_len = old_len;
7785 if (hdev->manufacturer_len > 0)
7786 memcpy(hdev->manufacturer_data, old_data,
7787 hdev->manufacturer_len);
7788 hci_dev_unlock(hdev);
7792 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
7793 void *data, u16 len)
7795 struct mgmt_cp_le_set_scan_params *cp = data;
7796 __u16 interval, window;
7799 BT_DBG("%s", hdev->name);
7801 if (!lmp_le_capable(hdev))
7802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7803 MGMT_STATUS_NOT_SUPPORTED);
7805 interval = __le16_to_cpu(cp->interval);
7807 if (interval < 0x0004 || interval > 0x4000)
7808 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7809 MGMT_STATUS_INVALID_PARAMS);
7811 window = __le16_to_cpu(cp->window);
7813 if (window < 0x0004 || window > 0x4000)
7814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7815 MGMT_STATUS_INVALID_PARAMS);
7817 if (window > interval)
7818 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7819 MGMT_STATUS_INVALID_PARAMS);
7823 hdev->le_scan_type = cp->type;
7824 hdev->le_scan_interval = interval;
7825 hdev->le_scan_window = window;
7827 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
7830 /* If background scan is running, restart it so new parameters are
7833 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
7834 hdev->discovery.state == DISCOVERY_STOPPED) {
7835 struct hci_request req;
7837 hci_req_init(&req, hdev);
7839 hci_req_add_le_scan_disable(&req, false);
7840 hci_req_add_le_passive_scan(&req);
7842 hci_req_run(&req, NULL);
7845 hci_dev_unlock(hdev);
7850 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
7851 void *data, u16 len)
7853 struct mgmt_cp_set_voice_setting *cp = data;
7854 struct hci_conn *conn;
7855 struct hci_conn *sco_conn;
7859 BT_DBG("%s", hdev->name);
7861 if (!lmp_bredr_capable(hdev)) {
7862 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
7863 MGMT_STATUS_NOT_SUPPORTED);
7868 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
7870 err = mgmt_cmd_complete(sk, hdev->id,
7871 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
7875 conn->voice_setting = cp->voice_setting;
7876 conn->sco_role = cp->sco_role;
7878 sco_conn = hci_conn_hash_lookup_sco(hdev);
7879 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
7880 BT_ERR("There is other SCO connection.");
7884 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
7885 if (conn->voice_setting == 0x0063)
7886 sco_connect_set_wbc(hdev);
7888 sco_connect_set_nbc(hdev);
7890 if (conn->voice_setting == 0x0063)
7891 sco_connect_set_gw_wbc(hdev);
7893 sco_connect_set_gw_nbc(hdev);
7897 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
7901 hci_dev_unlock(hdev);
7905 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
7906 void *data, u16 len)
7908 struct mgmt_rp_get_adv_tx_power *rp;
7912 BT_DBG("%s", hdev->name);
7916 rp_len = sizeof(*rp);
7917 rp = kmalloc(rp_len, GFP_KERNEL);
7923 rp->adv_tx_power = hdev->adv_tx_power;
7925 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
7931 hci_dev_unlock(hdev);
7936 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
7938 struct mgmt_ev_hardware_error ev;
7940 ev.error_code = err_code;
7941 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
7944 void mgmt_tx_timeout_error(struct hci_dev *hdev)
7946 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
7949 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
7950 u8 state_change_reason, u16 connection_handle)
7952 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
7954 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
7955 adv_instance, state_change_reason, connection_handle);
7957 mgmt_ev.adv_instance = adv_instance;
7958 mgmt_ev.state_change_reason = state_change_reason;
7959 mgmt_ev.connection_handle = connection_handle;
7961 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
7962 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
7966 static int enable_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
7967 void *data, u16 len)
7970 struct mgmt_cp_enable_6lowpan *cp = data;
7972 BT_DBG("%s", hdev->name);
7976 if (!hdev_is_powered(hdev)) {
7977 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
7978 MGMT_STATUS_NOT_POWERED);
7982 if (!lmp_le_capable(hdev)) {
7983 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
7984 MGMT_STATUS_NOT_SUPPORTED);
7988 if (cp->enable_6lowpan)
7989 bt_6lowpan_enable();
7991 bt_6lowpan_disable();
7993 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
7994 MGMT_STATUS_SUCCESS, NULL, 0);
7996 hci_dev_unlock(hdev);
8000 static int connect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8001 void *data, u16 len)
8003 struct mgmt_cp_connect_6lowpan *cp = data;
8004 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
8007 BT_DBG("%s", hdev->name);
8011 if (!lmp_le_capable(hdev)) {
8012 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8013 MGMT_STATUS_NOT_SUPPORTED);
8017 if (!hdev_is_powered(hdev)) {
8018 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8019 MGMT_STATUS_REJECTED);
8023 if (bdaddr_type_is_le(cp->addr.type)) {
8024 if (cp->addr.type == BDADDR_LE_PUBLIC)
8025 addr_type = ADDR_LE_DEV_PUBLIC;
8027 addr_type = ADDR_LE_DEV_RANDOM;
8029 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8030 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
8034 hci_dev_unlock(hdev);
8036 /* 6lowpan Connect */
8037 err = _bt_6lowpan_connect(&cp->addr.bdaddr, cp->addr.type);
8042 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8043 MGMT_STATUS_REJECTED, NULL, 0);
8048 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
8051 hci_dev_unlock(hdev);
8055 static int disconnect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8056 void *data, u16 len)
8058 struct mgmt_cp_disconnect_6lowpan *cp = data;
8059 struct hci_conn *conn = NULL;
8060 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
8063 BT_DBG("%s", hdev->name);
8067 if (!lmp_le_capable(hdev)) {
8068 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
8069 MGMT_STATUS_NOT_SUPPORTED);
8073 if (!hdev_is_powered(hdev)) {
8074 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
8075 MGMT_STATUS_REJECTED);
8079 if (bdaddr_type_is_le(cp->addr.type)) {
8080 if (cp->addr.type == BDADDR_LE_PUBLIC)
8081 addr_type = ADDR_LE_DEV_PUBLIC;
8083 addr_type = ADDR_LE_DEV_RANDOM;
8085 err = mgmt_cmd_complete(sk, hdev->id,
8086 MGMT_OP_DISCONNECT_6LOWPAN,
8087 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
8091 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8093 err = mgmt_cmd_complete(sk, hdev->id,
8094 MGMT_OP_DISCONNECT_6LOWPAN,
8095 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
8099 if (conn->dst_type != addr_type) {
8100 err = mgmt_cmd_complete(sk, hdev->id,
8101 MGMT_OP_DISCONNECT_6LOWPAN,
8102 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
8106 if (conn->state != BT_CONNECTED) {
8107 err = mgmt_cmd_complete(sk, hdev->id,
8108 MGMT_OP_DISCONNECT_6LOWPAN,
8109 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
8113 /* 6lowpan Disconnect */
8114 err = _bt_6lowpan_disconnect(conn->l2cap_data, cp->addr.type);
8116 err = mgmt_cmd_complete(sk, hdev->id,
8117 MGMT_OP_DISCONNECT_6LOWPAN,
8118 MGMT_STATUS_REJECTED, NULL, 0);
8122 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
8126 hci_dev_unlock(hdev);
8130 void mgmt_6lowpan_conn_changed(struct hci_dev *hdev, char if_name[16],
8131 bdaddr_t *bdaddr, u8 addr_type, bool connected)
8134 struct mgmt_ev_6lowpan_conn_state_changed *ev = (void *)buf;
8137 memset(buf, 0, sizeof(buf));
8138 bacpy(&ev->addr.bdaddr, bdaddr);
8139 ev->addr.type = addr_type;
8140 ev->connected = connected;
8141 memcpy(ev->ifname, (__u8 *)if_name, 16);
8143 ev_size = sizeof(*ev);
8145 mgmt_event(MGMT_EV_6LOWPAN_CONN_STATE_CHANGED, hdev, ev, ev_size, NULL);
8148 void mgmt_le_read_maximum_data_length_complete(struct hci_dev *hdev, u8 status)
8150 struct mgmt_pending_cmd *cmd;
8151 struct mgmt_rp_le_read_maximum_data_length rp;
8153 BT_DBG("%s status %u", hdev->name, status);
8155 cmd = pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev);
8160 mgmt_cmd_status(cmd->sk, hdev->id,
8161 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8162 mgmt_status(status));
8164 memset(&rp, 0, sizeof(rp));
8166 rp.max_tx_octets = cpu_to_le16(hdev->le_max_tx_len);
8167 rp.max_tx_time = cpu_to_le16(hdev->le_max_tx_time);
8168 rp.max_rx_octets = cpu_to_le16(hdev->le_max_rx_len);
8169 rp.max_rx_time = cpu_to_le16(hdev->le_max_rx_time);
8171 mgmt_cmd_complete(cmd->sk, hdev->id,
8172 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, 0,
8175 mgmt_pending_remove(cmd);
8178 static int read_maximum_le_data_length(struct sock *sk,
8179 struct hci_dev *hdev, void *data, u16 len)
8181 struct mgmt_pending_cmd *cmd;
8184 BT_DBG("read_maximum_le_data_length %s", hdev->name);
8188 if (!hdev_is_powered(hdev)) {
8189 err = mgmt_cmd_status(sk, hdev->id,
8190 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8191 MGMT_STATUS_NOT_POWERED);
8195 if (!lmp_le_capable(hdev)) {
8196 err = mgmt_cmd_status(sk, hdev->id,
8197 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8198 MGMT_STATUS_NOT_SUPPORTED);
8202 if (pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev)) {
8203 err = mgmt_cmd_status(sk, hdev->id,
8204 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8209 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8216 err = hci_send_cmd(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
8218 mgmt_pending_remove(cmd);
8221 hci_dev_unlock(hdev);
8224 #endif /* TIZEN_BT */
8226 static bool ltk_is_valid(struct mgmt_ltk_info *key)
8228 if (key->initiator != 0x00 && key->initiator != 0x01)
8231 switch (key->addr.type) {
8232 case BDADDR_LE_PUBLIC:
8235 case BDADDR_LE_RANDOM:
8236 /* Two most significant bits shall be set */
8237 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
8245 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
8246 void *cp_data, u16 len)
8248 struct mgmt_cp_load_long_term_keys *cp = cp_data;
8249 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
8250 sizeof(struct mgmt_ltk_info));
8251 u16 key_count, expected_len;
8254 bt_dev_dbg(hdev, "sock %p", sk);
8256 if (!lmp_le_capable(hdev))
8257 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8258 MGMT_STATUS_NOT_SUPPORTED);
8260 key_count = __le16_to_cpu(cp->key_count);
8261 if (key_count > max_key_count) {
8262 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
8264 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8265 MGMT_STATUS_INVALID_PARAMS);
8268 expected_len = struct_size(cp, keys, key_count);
8269 if (expected_len != len) {
8270 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
8272 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8273 MGMT_STATUS_INVALID_PARAMS);
8276 bt_dev_dbg(hdev, "key_count %u", key_count);
8278 for (i = 0; i < key_count; i++) {
8279 struct mgmt_ltk_info *key = &cp->keys[i];
8281 if (!ltk_is_valid(key))
8282 return mgmt_cmd_status(sk, hdev->id,
8283 MGMT_OP_LOAD_LONG_TERM_KEYS,
8284 MGMT_STATUS_INVALID_PARAMS);
8289 hci_smp_ltks_clear(hdev);
8291 for (i = 0; i < key_count; i++) {
8292 struct mgmt_ltk_info *key = &cp->keys[i];
8293 u8 type, authenticated;
8295 if (hci_is_blocked_key(hdev,
8296 HCI_BLOCKED_KEY_TYPE_LTK,
8298 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
8303 switch (key->type) {
8304 case MGMT_LTK_UNAUTHENTICATED:
8305 authenticated = 0x00;
8306 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8308 case MGMT_LTK_AUTHENTICATED:
8309 authenticated = 0x01;
8310 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8312 case MGMT_LTK_P256_UNAUTH:
8313 authenticated = 0x00;
8314 type = SMP_LTK_P256;
8316 case MGMT_LTK_P256_AUTH:
8317 authenticated = 0x01;
8318 type = SMP_LTK_P256;
8320 case MGMT_LTK_P256_DEBUG:
8321 authenticated = 0x00;
8322 type = SMP_LTK_P256_DEBUG;
8328 hci_add_ltk(hdev, &key->addr.bdaddr,
8329 le_addr_type(key->addr.type), type, authenticated,
8330 key->val, key->enc_size, key->ediv, key->rand);
8333 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
8336 hci_dev_unlock(hdev);
8341 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8343 struct hci_conn *conn = cmd->user_data;
8344 struct mgmt_rp_get_conn_info rp;
8347 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8349 if (status == MGMT_STATUS_SUCCESS) {
8350 rp.rssi = conn->rssi;
8351 rp.tx_power = conn->tx_power;
8352 rp.max_tx_power = conn->max_tx_power;
8354 rp.rssi = HCI_RSSI_INVALID;
8355 rp.tx_power = HCI_TX_POWER_INVALID;
8356 rp.max_tx_power = HCI_TX_POWER_INVALID;
8359 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
8360 status, &rp, sizeof(rp));
8362 hci_conn_drop(conn);
8368 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
8371 struct hci_cp_read_rssi *cp;
8372 struct mgmt_pending_cmd *cmd;
8373 struct hci_conn *conn;
8377 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
8381 /* Commands sent in request are either Read RSSI or Read Transmit Power
8382 * Level so we check which one was last sent to retrieve connection
8383 * handle. Both commands have handle as first parameter so it's safe to
8384 * cast data on the same command struct.
8386 * First command sent is always Read RSSI and we fail only if it fails.
8387 * In other case we simply override error to indicate success as we
8388 * already remembered if TX power value is actually valid.
8390 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
8392 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
8393 status = MGMT_STATUS_SUCCESS;
8395 status = mgmt_status(hci_status);
8399 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
8403 handle = __le16_to_cpu(cp->handle);
8404 conn = hci_conn_hash_lookup_handle(hdev, handle);
8406 bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
8411 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
8415 cmd->cmd_complete(cmd, status);
8416 mgmt_pending_remove(cmd);
8419 hci_dev_unlock(hdev);
8422 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
8425 struct mgmt_cp_get_conn_info *cp = data;
8426 struct mgmt_rp_get_conn_info rp;
8427 struct hci_conn *conn;
8428 unsigned long conn_info_age;
8431 bt_dev_dbg(hdev, "sock %p", sk);
8433 memset(&rp, 0, sizeof(rp));
8434 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8435 rp.addr.type = cp->addr.type;
8437 if (!bdaddr_type_is_valid(cp->addr.type))
8438 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8439 MGMT_STATUS_INVALID_PARAMS,
8444 if (!hdev_is_powered(hdev)) {
8445 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8446 MGMT_STATUS_NOT_POWERED, &rp,
8451 if (cp->addr.type == BDADDR_BREDR)
8452 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8455 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8457 if (!conn || conn->state != BT_CONNECTED) {
8458 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8459 MGMT_STATUS_NOT_CONNECTED, &rp,
8464 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
8465 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8466 MGMT_STATUS_BUSY, &rp, sizeof(rp));
8470 /* To avoid client trying to guess when to poll again for information we
8471 * calculate conn info age as random value between min/max set in hdev.
8473 conn_info_age = hdev->conn_info_min_age +
8474 prandom_u32_max(hdev->conn_info_max_age -
8475 hdev->conn_info_min_age);
8477 /* Query controller to refresh cached values if they are too old or were
8480 if (time_after(jiffies, conn->conn_info_timestamp +
8481 msecs_to_jiffies(conn_info_age)) ||
8482 !conn->conn_info_timestamp) {
8483 struct hci_request req;
8484 struct hci_cp_read_tx_power req_txp_cp;
8485 struct hci_cp_read_rssi req_rssi_cp;
8486 struct mgmt_pending_cmd *cmd;
8488 hci_req_init(&req, hdev);
8489 req_rssi_cp.handle = cpu_to_le16(conn->handle);
8490 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
8493 /* For LE links TX power does not change thus we don't need to
8494 * query for it once value is known.
8496 if (!bdaddr_type_is_le(cp->addr.type) ||
8497 conn->tx_power == HCI_TX_POWER_INVALID) {
8498 req_txp_cp.handle = cpu_to_le16(conn->handle);
8499 req_txp_cp.type = 0x00;
8500 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8501 sizeof(req_txp_cp), &req_txp_cp);
8504 /* Max TX power needs to be read only once per connection */
8505 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
8506 req_txp_cp.handle = cpu_to_le16(conn->handle);
8507 req_txp_cp.type = 0x01;
8508 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8509 sizeof(req_txp_cp), &req_txp_cp);
8512 err = hci_req_run(&req, conn_info_refresh_complete);
8516 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
8523 hci_conn_hold(conn);
8524 cmd->user_data = hci_conn_get(conn);
8525 cmd->cmd_complete = conn_info_cmd_complete;
8527 conn->conn_info_timestamp = jiffies;
8529 /* Cache is valid, just reply with values cached in hci_conn */
8530 rp.rssi = conn->rssi;
8531 rp.tx_power = conn->tx_power;
8532 rp.max_tx_power = conn->max_tx_power;
8534 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8535 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8539 hci_dev_unlock(hdev);
8543 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8545 struct hci_conn *conn = cmd->user_data;
8546 struct mgmt_rp_get_clock_info rp;
8547 struct hci_dev *hdev;
8550 memset(&rp, 0, sizeof(rp));
8551 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8556 hdev = hci_dev_get(cmd->index);
8558 rp.local_clock = cpu_to_le32(hdev->clock);
8563 rp.piconet_clock = cpu_to_le32(conn->clock);
8564 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
8568 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
8572 hci_conn_drop(conn);
8579 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8581 struct hci_cp_read_clock *hci_cp;
8582 struct mgmt_pending_cmd *cmd;
8583 struct hci_conn *conn;
8585 bt_dev_dbg(hdev, "status %u", status);
8589 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
8593 if (hci_cp->which) {
8594 u16 handle = __le16_to_cpu(hci_cp->handle);
8595 conn = hci_conn_hash_lookup_handle(hdev, handle);
8600 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
8604 cmd->cmd_complete(cmd, mgmt_status(status));
8605 mgmt_pending_remove(cmd);
8608 hci_dev_unlock(hdev);
8611 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
8614 struct mgmt_cp_get_clock_info *cp = data;
8615 struct mgmt_rp_get_clock_info rp;
8616 struct hci_cp_read_clock hci_cp;
8617 struct mgmt_pending_cmd *cmd;
8618 struct hci_request req;
8619 struct hci_conn *conn;
8622 bt_dev_dbg(hdev, "sock %p", sk);
8624 memset(&rp, 0, sizeof(rp));
8625 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8626 rp.addr.type = cp->addr.type;
8628 if (cp->addr.type != BDADDR_BREDR)
8629 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8630 MGMT_STATUS_INVALID_PARAMS,
8635 if (!hdev_is_powered(hdev)) {
8636 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8637 MGMT_STATUS_NOT_POWERED, &rp,
8642 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8643 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8645 if (!conn || conn->state != BT_CONNECTED) {
8646 err = mgmt_cmd_complete(sk, hdev->id,
8647 MGMT_OP_GET_CLOCK_INFO,
8648 MGMT_STATUS_NOT_CONNECTED,
8656 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
8662 cmd->cmd_complete = clock_info_cmd_complete;
8664 hci_req_init(&req, hdev);
8666 memset(&hci_cp, 0, sizeof(hci_cp));
8667 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8670 hci_conn_hold(conn);
8671 cmd->user_data = hci_conn_get(conn);
8673 hci_cp.handle = cpu_to_le16(conn->handle);
8674 hci_cp.which = 0x01; /* Piconet clock */
8675 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8678 err = hci_req_run(&req, get_clock_info_complete);
8680 mgmt_pending_remove(cmd);
8683 hci_dev_unlock(hdev);
8687 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
8689 struct hci_conn *conn;
8691 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
8695 if (conn->dst_type != type)
8698 if (conn->state != BT_CONNECTED)
8704 /* This function requires the caller holds hdev->lock */
8705 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
8706 u8 addr_type, u8 auto_connect)
8708 struct hci_conn_params *params;
8710 params = hci_conn_params_add(hdev, addr, addr_type);
8714 if (params->auto_connect == auto_connect)
8717 list_del_init(¶ms->action);
8719 switch (auto_connect) {
8720 case HCI_AUTO_CONN_DISABLED:
8721 case HCI_AUTO_CONN_LINK_LOSS:
8722 /* If auto connect is being disabled when we're trying to
8723 * connect to device, keep connecting.
8725 if (params->explicit_connect)
8726 list_add(¶ms->action, &hdev->pend_le_conns);
8728 case HCI_AUTO_CONN_REPORT:
8729 if (params->explicit_connect)
8730 list_add(¶ms->action, &hdev->pend_le_conns);
8732 list_add(¶ms->action, &hdev->pend_le_reports);
8734 case HCI_AUTO_CONN_DIRECT:
8735 case HCI_AUTO_CONN_ALWAYS:
8736 if (!is_connected(hdev, addr, addr_type))
8737 list_add(¶ms->action, &hdev->pend_le_conns);
8741 params->auto_connect = auto_connect;
8743 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
8744 addr, addr_type, auto_connect);
8749 static void device_added(struct sock *sk, struct hci_dev *hdev,
8750 bdaddr_t *bdaddr, u8 type, u8 action)
8752 struct mgmt_ev_device_added ev;
8754 bacpy(&ev.addr.bdaddr, bdaddr);
8755 ev.addr.type = type;
8758 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
8761 static int add_device(struct sock *sk, struct hci_dev *hdev,
8762 void *data, u16 len)
8764 struct mgmt_cp_add_device *cp = data;
8765 u8 auto_conn, addr_type;
8766 struct hci_conn_params *params;
8768 u32 current_flags = 0;
8770 bt_dev_dbg(hdev, "sock %p", sk);
8772 if (!bdaddr_type_is_valid(cp->addr.type) ||
8773 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
8774 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8775 MGMT_STATUS_INVALID_PARAMS,
8776 &cp->addr, sizeof(cp->addr));
8778 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
8779 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8780 MGMT_STATUS_INVALID_PARAMS,
8781 &cp->addr, sizeof(cp->addr));
8785 if (cp->addr.type == BDADDR_BREDR) {
8786 /* Only incoming connections action is supported for now */
8787 if (cp->action != 0x01) {
8788 err = mgmt_cmd_complete(sk, hdev->id,
8790 MGMT_STATUS_INVALID_PARAMS,
8791 &cp->addr, sizeof(cp->addr));
8795 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
8801 hci_req_update_scan(hdev);
8806 addr_type = le_addr_type(cp->addr.type);
8808 if (cp->action == 0x02)
8809 auto_conn = HCI_AUTO_CONN_ALWAYS;
8810 else if (cp->action == 0x01)
8811 auto_conn = HCI_AUTO_CONN_DIRECT;
8813 auto_conn = HCI_AUTO_CONN_REPORT;
8815 /* Kernel internally uses conn_params with resolvable private
8816 * address, but Add Device allows only identity addresses.
8817 * Make sure it is enforced before calling
8818 * hci_conn_params_lookup.
8820 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8821 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8822 MGMT_STATUS_INVALID_PARAMS,
8823 &cp->addr, sizeof(cp->addr));
8827 /* If the connection parameters don't exist for this device,
8828 * they will be created and configured with defaults.
8830 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
8832 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8833 MGMT_STATUS_FAILED, &cp->addr,
8837 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8840 current_flags = params->current_flags;
8843 hci_update_background_scan(hdev);
8846 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
8847 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
8848 SUPPORTED_DEVICE_FLAGS(), current_flags);
8850 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8851 MGMT_STATUS_SUCCESS, &cp->addr,
8855 hci_dev_unlock(hdev);
8859 static void device_removed(struct sock *sk, struct hci_dev *hdev,
8860 bdaddr_t *bdaddr, u8 type)
8862 struct mgmt_ev_device_removed ev;
8864 bacpy(&ev.addr.bdaddr, bdaddr);
8865 ev.addr.type = type;
8867 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
8870 static int remove_device(struct sock *sk, struct hci_dev *hdev,
8871 void *data, u16 len)
8873 struct mgmt_cp_remove_device *cp = data;
8876 bt_dev_dbg(hdev, "sock %p", sk);
8880 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8881 struct hci_conn_params *params;
8884 if (!bdaddr_type_is_valid(cp->addr.type)) {
8885 err = mgmt_cmd_complete(sk, hdev->id,
8886 MGMT_OP_REMOVE_DEVICE,
8887 MGMT_STATUS_INVALID_PARAMS,
8888 &cp->addr, sizeof(cp->addr));
8892 if (cp->addr.type == BDADDR_BREDR) {
8893 err = hci_bdaddr_list_del(&hdev->accept_list,
8897 err = mgmt_cmd_complete(sk, hdev->id,
8898 MGMT_OP_REMOVE_DEVICE,
8899 MGMT_STATUS_INVALID_PARAMS,
8905 hci_req_update_scan(hdev);
8907 device_removed(sk, hdev, &cp->addr.bdaddr,
8912 addr_type = le_addr_type(cp->addr.type);
8914 /* Kernel internally uses conn_params with resolvable private
8915 * address, but Remove Device allows only identity addresses.
8916 * Make sure it is enforced before calling
8917 * hci_conn_params_lookup.
8919 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8920 err = mgmt_cmd_complete(sk, hdev->id,
8921 MGMT_OP_REMOVE_DEVICE,
8922 MGMT_STATUS_INVALID_PARAMS,
8923 &cp->addr, sizeof(cp->addr));
8927 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8930 err = mgmt_cmd_complete(sk, hdev->id,
8931 MGMT_OP_REMOVE_DEVICE,
8932 MGMT_STATUS_INVALID_PARAMS,
8933 &cp->addr, sizeof(cp->addr));
8937 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
8938 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
8939 err = mgmt_cmd_complete(sk, hdev->id,
8940 MGMT_OP_REMOVE_DEVICE,
8941 MGMT_STATUS_INVALID_PARAMS,
8942 &cp->addr, sizeof(cp->addr));
8946 list_del(¶ms->action);
8947 list_del(¶ms->list);
8949 hci_update_background_scan(hdev);
8951 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
8953 struct hci_conn_params *p, *tmp;
8954 struct bdaddr_list *b, *btmp;
8956 if (cp->addr.type) {
8957 err = mgmt_cmd_complete(sk, hdev->id,
8958 MGMT_OP_REMOVE_DEVICE,
8959 MGMT_STATUS_INVALID_PARAMS,
8960 &cp->addr, sizeof(cp->addr));
8964 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
8965 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
8970 hci_req_update_scan(hdev);
8972 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
8973 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
8975 device_removed(sk, hdev, &p->addr, p->addr_type);
8976 if (p->explicit_connect) {
8977 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
8980 list_del(&p->action);
8985 bt_dev_dbg(hdev, "All LE connection parameters were removed");
8987 hci_update_background_scan(hdev);
8991 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
8992 MGMT_STATUS_SUCCESS, &cp->addr,
8995 hci_dev_unlock(hdev);
8999 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
9002 struct mgmt_cp_load_conn_param *cp = data;
9003 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
9004 sizeof(struct mgmt_conn_param));
9005 u16 param_count, expected_len;
9008 if (!lmp_le_capable(hdev))
9009 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9010 MGMT_STATUS_NOT_SUPPORTED);
9012 param_count = __le16_to_cpu(cp->param_count);
9013 if (param_count > max_param_count) {
9014 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
9016 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9017 MGMT_STATUS_INVALID_PARAMS);
9020 expected_len = struct_size(cp, params, param_count);
9021 if (expected_len != len) {
9022 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
9024 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9025 MGMT_STATUS_INVALID_PARAMS);
9028 bt_dev_dbg(hdev, "param_count %u", param_count);
9032 hci_conn_params_clear_disabled(hdev);
9034 for (i = 0; i < param_count; i++) {
9035 struct mgmt_conn_param *param = &cp->params[i];
9036 struct hci_conn_params *hci_param;
9037 u16 min, max, latency, timeout;
9040 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
9043 if (param->addr.type == BDADDR_LE_PUBLIC) {
9044 addr_type = ADDR_LE_DEV_PUBLIC;
9045 } else if (param->addr.type == BDADDR_LE_RANDOM) {
9046 addr_type = ADDR_LE_DEV_RANDOM;
9048 bt_dev_err(hdev, "ignoring invalid connection parameters");
9052 min = le16_to_cpu(param->min_interval);
9053 max = le16_to_cpu(param->max_interval);
9054 latency = le16_to_cpu(param->latency);
9055 timeout = le16_to_cpu(param->timeout);
9057 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
9058 min, max, latency, timeout);
9060 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
9061 bt_dev_err(hdev, "ignoring invalid connection parameters");
9065 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
9068 bt_dev_err(hdev, "failed to add connection parameters");
9072 hci_param->conn_min_interval = min;
9073 hci_param->conn_max_interval = max;
9074 hci_param->conn_latency = latency;
9075 hci_param->supervision_timeout = timeout;
9078 hci_dev_unlock(hdev);
9080 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
9084 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
9085 void *data, u16 len)
9087 struct mgmt_cp_set_external_config *cp = data;
9091 bt_dev_dbg(hdev, "sock %p", sk);
9093 if (hdev_is_powered(hdev))
9094 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9095 MGMT_STATUS_REJECTED);
9097 if (cp->config != 0x00 && cp->config != 0x01)
9098 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9099 MGMT_STATUS_INVALID_PARAMS);
9101 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
9102 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9103 MGMT_STATUS_NOT_SUPPORTED);
9108 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
9110 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
9112 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
9119 err = new_options(hdev, sk);
9121 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
9122 mgmt_index_removed(hdev);
9124 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
9125 hci_dev_set_flag(hdev, HCI_CONFIG);
9126 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9128 queue_work(hdev->req_workqueue, &hdev->power_on);
9130 set_bit(HCI_RAW, &hdev->flags);
9131 mgmt_index_added(hdev);
9136 hci_dev_unlock(hdev);
9140 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
9141 void *data, u16 len)
9143 struct mgmt_cp_set_public_address *cp = data;
9147 bt_dev_dbg(hdev, "sock %p", sk);
9149 if (hdev_is_powered(hdev))
9150 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9151 MGMT_STATUS_REJECTED);
9153 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
9154 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9155 MGMT_STATUS_INVALID_PARAMS);
9157 if (!hdev->set_bdaddr)
9158 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9159 MGMT_STATUS_NOT_SUPPORTED);
9163 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
9164 bacpy(&hdev->public_addr, &cp->bdaddr);
9166 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
9173 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
9174 err = new_options(hdev, sk);
9176 if (is_configured(hdev)) {
9177 mgmt_index_removed(hdev);
9179 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
9181 hci_dev_set_flag(hdev, HCI_CONFIG);
9182 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9184 queue_work(hdev->req_workqueue, &hdev->power_on);
9188 hci_dev_unlock(hdev);
9193 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
9197 struct mgmt_ev_device_name_update *ev = (void *)buf;
9203 bacpy(&ev->addr.bdaddr, bdaddr);
9204 ev->addr.type = BDADDR_BREDR;
9206 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9209 ev->eir_len = cpu_to_le16(eir_len);
9211 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
9212 sizeof(*ev) + eir_len, NULL);
9215 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9216 u8 link_type, u8 addr_type, u8 status)
9218 struct mgmt_ev_conn_update_failed ev;
9220 bacpy(&ev.addr.bdaddr, bdaddr);
9221 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9224 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
9225 &ev, sizeof(ev), NULL);
9228 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
9229 u8 link_type, u8 addr_type, u16 conn_interval,
9230 u16 conn_latency, u16 supervision_timeout)
9232 struct mgmt_ev_conn_updated ev;
9234 bacpy(&ev.addr.bdaddr, bdaddr);
9235 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9236 ev.conn_interval = cpu_to_le16(conn_interval);
9237 ev.conn_latency = cpu_to_le16(conn_latency);
9238 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
9240 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
9241 &ev, sizeof(ev), NULL);
9244 /* le device found event - Pass adv type */
9245 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9246 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
9247 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
9250 struct mgmt_ev_le_device_found *ev = (void *)buf;
9253 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
9256 /* Make sure that the buffer is big enough. The 5 extra bytes
9257 * are for the potential CoD field.
9259 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9262 memset(buf, 0, sizeof(buf));
9264 bacpy(&ev->addr.bdaddr, bdaddr);
9265 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9267 ev->flags = cpu_to_le32(flags);
9268 ev->adv_type = adv_type;
9271 memcpy(ev->eir, eir, eir_len);
9273 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
9274 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9277 if (scan_rsp_len > 0)
9278 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9280 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9281 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9283 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9287 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
9288 u16 opcode, struct sk_buff *skb)
9290 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
9291 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
9292 u8 *h192, *r192, *h256, *r256;
9293 struct mgmt_pending_cmd *cmd;
9297 bt_dev_dbg(hdev, "status %u", status);
9299 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
9303 mgmt_cp = cmd->param;
9306 status = mgmt_status(status);
9313 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
9314 struct hci_rp_read_local_oob_data *rp;
9316 if (skb->len != sizeof(*rp)) {
9317 status = MGMT_STATUS_FAILED;
9320 status = MGMT_STATUS_SUCCESS;
9321 rp = (void *)skb->data;
9323 eir_len = 5 + 18 + 18;
9330 struct hci_rp_read_local_oob_ext_data *rp;
9332 if (skb->len != sizeof(*rp)) {
9333 status = MGMT_STATUS_FAILED;
9336 status = MGMT_STATUS_SUCCESS;
9337 rp = (void *)skb->data;
9339 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
9340 eir_len = 5 + 18 + 18;
9344 eir_len = 5 + 18 + 18 + 18 + 18;
9354 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
9361 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
9362 hdev->dev_class, 3);
9365 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9366 EIR_SSP_HASH_C192, h192, 16);
9367 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9368 EIR_SSP_RAND_R192, r192, 16);
9372 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9373 EIR_SSP_HASH_C256, h256, 16);
9374 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9375 EIR_SSP_RAND_R256, r256, 16);
9379 mgmt_rp->type = mgmt_cp->type;
9380 mgmt_rp->eir_len = cpu_to_le16(eir_len);
9382 err = mgmt_cmd_complete(cmd->sk, hdev->id,
9383 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
9384 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
9385 if (err < 0 || status)
9388 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
9390 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9391 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
9392 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
9395 mgmt_pending_remove(cmd);
9398 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
9399 struct mgmt_cp_read_local_oob_ext_data *cp)
9401 struct mgmt_pending_cmd *cmd;
9402 struct hci_request req;
9405 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
9410 hci_req_init(&req, hdev);
9412 if (bredr_sc_enabled(hdev))
9413 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
9415 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
9417 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
9419 mgmt_pending_remove(cmd);
9426 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
9427 void *data, u16 data_len)
9429 struct mgmt_cp_read_local_oob_ext_data *cp = data;
9430 struct mgmt_rp_read_local_oob_ext_data *rp;
9433 u8 status, flags, role, addr[7], hash[16], rand[16];
9436 bt_dev_dbg(hdev, "sock %p", sk);
9438 if (hdev_is_powered(hdev)) {
9440 case BIT(BDADDR_BREDR):
9441 status = mgmt_bredr_support(hdev);
9447 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9448 status = mgmt_le_support(hdev);
9452 eir_len = 9 + 3 + 18 + 18 + 3;
9455 status = MGMT_STATUS_INVALID_PARAMS;
9460 status = MGMT_STATUS_NOT_POWERED;
9464 rp_len = sizeof(*rp) + eir_len;
9465 rp = kmalloc(rp_len, GFP_ATOMIC);
9476 case BIT(BDADDR_BREDR):
9477 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9478 err = read_local_ssp_oob_req(hdev, sk, cp);
9479 hci_dev_unlock(hdev);
9483 status = MGMT_STATUS_FAILED;
9486 eir_len = eir_append_data(rp->eir, eir_len,
9488 hdev->dev_class, 3);
9491 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9492 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
9493 smp_generate_oob(hdev, hash, rand) < 0) {
9494 hci_dev_unlock(hdev);
9495 status = MGMT_STATUS_FAILED;
9499 /* This should return the active RPA, but since the RPA
9500 * is only programmed on demand, it is really hard to fill
9501 * this in at the moment. For now disallow retrieving
9502 * local out-of-band data when privacy is in use.
9504 * Returning the identity address will not help here since
9505 * pairing happens before the identity resolving key is
9506 * known and thus the connection establishment happens
9507 * based on the RPA and not the identity address.
9509 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
9510 hci_dev_unlock(hdev);
9511 status = MGMT_STATUS_REJECTED;
9515 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
9516 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
9517 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
9518 bacmp(&hdev->static_addr, BDADDR_ANY))) {
9519 memcpy(addr, &hdev->static_addr, 6);
9522 memcpy(addr, &hdev->bdaddr, 6);
9526 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
9527 addr, sizeof(addr));
9529 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
9534 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
9535 &role, sizeof(role));
9537 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
9538 eir_len = eir_append_data(rp->eir, eir_len,
9540 hash, sizeof(hash));
9542 eir_len = eir_append_data(rp->eir, eir_len,
9544 rand, sizeof(rand));
9547 flags = mgmt_get_adv_discov_flags(hdev);
9549 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
9550 flags |= LE_AD_NO_BREDR;
9552 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
9553 &flags, sizeof(flags));
9557 hci_dev_unlock(hdev);
9559 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
9561 status = MGMT_STATUS_SUCCESS;
9564 rp->type = cp->type;
9565 rp->eir_len = cpu_to_le16(eir_len);
9567 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
9568 status, rp, sizeof(*rp) + eir_len);
9569 if (err < 0 || status)
9572 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9573 rp, sizeof(*rp) + eir_len,
9574 HCI_MGMT_OOB_DATA_EVENTS, sk);
9582 static u32 get_supported_adv_flags(struct hci_dev *hdev)
9586 flags |= MGMT_ADV_FLAG_CONNECTABLE;
9587 flags |= MGMT_ADV_FLAG_DISCOV;
9588 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
9589 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
9590 flags |= MGMT_ADV_FLAG_APPEARANCE;
9591 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
9592 flags |= MGMT_ADV_PARAM_DURATION;
9593 flags |= MGMT_ADV_PARAM_TIMEOUT;
9594 flags |= MGMT_ADV_PARAM_INTERVALS;
9595 flags |= MGMT_ADV_PARAM_TX_POWER;
9596 flags |= MGMT_ADV_PARAM_SCAN_RSP;
9598 /* In extended adv TX_POWER returned from Set Adv Param
9599 * will be always valid.
9601 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
9602 ext_adv_capable(hdev))
9603 flags |= MGMT_ADV_FLAG_TX_POWER;
9605 if (ext_adv_capable(hdev)) {
9606 flags |= MGMT_ADV_FLAG_SEC_1M;
9607 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
9608 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
9610 if (hdev->le_features[1] & HCI_LE_PHY_2M)
9611 flags |= MGMT_ADV_FLAG_SEC_2M;
9613 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
9614 flags |= MGMT_ADV_FLAG_SEC_CODED;
9620 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
9621 void *data, u16 data_len)
9623 struct mgmt_rp_read_adv_features *rp;
9626 struct adv_info *adv_instance;
9627 u32 supported_flags;
9630 bt_dev_dbg(hdev, "sock %p", sk);
9632 if (!lmp_le_capable(hdev))
9633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9634 MGMT_STATUS_REJECTED);
9636 /* Enabling the experimental LL Privay support disables support for
9639 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9641 MGMT_STATUS_NOT_SUPPORTED);
9645 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
9646 rp = kmalloc(rp_len, GFP_ATOMIC);
9648 hci_dev_unlock(hdev);
9652 supported_flags = get_supported_adv_flags(hdev);
9654 rp->supported_flags = cpu_to_le32(supported_flags);
9655 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
9656 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
9657 rp->max_instances = hdev->le_num_of_adv_sets;
9658 rp->num_instances = hdev->adv_instance_cnt;
9660 instance = rp->instance;
9661 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
9662 *instance = adv_instance->instance;
9666 hci_dev_unlock(hdev);
9668 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9669 MGMT_STATUS_SUCCESS, rp, rp_len);
9676 static u8 calculate_name_len(struct hci_dev *hdev)
9678 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
9680 return append_local_name(hdev, buf, 0);
9683 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
9686 u8 max_len = HCI_MAX_AD_LENGTH;
9689 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
9690 MGMT_ADV_FLAG_LIMITED_DISCOV |
9691 MGMT_ADV_FLAG_MANAGED_FLAGS))
9694 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
9697 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
9698 max_len -= calculate_name_len(hdev);
9700 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
9707 static bool flags_managed(u32 adv_flags)
9709 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
9710 MGMT_ADV_FLAG_LIMITED_DISCOV |
9711 MGMT_ADV_FLAG_MANAGED_FLAGS);
9714 static bool tx_power_managed(u32 adv_flags)
9716 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
9719 static bool name_managed(u32 adv_flags)
9721 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
9724 static bool appearance_managed(u32 adv_flags)
9726 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
9729 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
9730 u8 len, bool is_adv_data)
9735 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
9740 /* Make sure that the data is correctly formatted. */
9741 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
9747 if (data[i + 1] == EIR_FLAGS &&
9748 (!is_adv_data || flags_managed(adv_flags)))
9751 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
9754 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
9757 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
9760 if (data[i + 1] == EIR_APPEARANCE &&
9761 appearance_managed(adv_flags))
9764 /* If the current field length would exceed the total data
9765 * length, then it's invalid.
9767 if (i + cur_len >= len)
9774 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
9776 u32 supported_flags, phy_flags;
9778 /* The current implementation only supports a subset of the specified
9779 * flags. Also need to check mutual exclusiveness of sec flags.
9781 supported_flags = get_supported_adv_flags(hdev);
9782 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
9783 if (adv_flags & ~supported_flags ||
9784 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
9790 static bool adv_busy(struct hci_dev *hdev)
9792 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
9793 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
9794 pending_find(MGMT_OP_SET_LE, hdev) ||
9795 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
9796 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
9799 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
9802 struct mgmt_pending_cmd *cmd;
9803 struct mgmt_cp_add_advertising *cp;
9804 struct mgmt_rp_add_advertising rp;
9805 struct adv_info *adv_instance, *n;
9808 bt_dev_dbg(hdev, "status %u", status);
9812 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
9814 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
9816 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
9817 if (!adv_instance->pending)
9821 adv_instance->pending = false;
9825 instance = adv_instance->instance;
9827 if (hdev->cur_adv_instance == instance)
9828 cancel_adv_timeout(hdev);
9830 hci_remove_adv_instance(hdev, instance);
9831 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
9838 rp.instance = cp->instance;
9841 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9842 mgmt_status(status));
9844 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9845 mgmt_status(status), &rp, sizeof(rp));
9847 mgmt_pending_remove(cmd);
9850 hci_dev_unlock(hdev);
9853 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
9854 void *data, u16 data_len)
9856 struct mgmt_cp_add_advertising *cp = data;
9857 struct mgmt_rp_add_advertising rp;
9860 u16 timeout, duration;
9861 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
9862 u8 schedule_instance = 0;
9863 struct adv_info *next_instance;
9865 struct mgmt_pending_cmd *cmd;
9866 struct hci_request req;
9868 bt_dev_dbg(hdev, "sock %p", sk);
9870 status = mgmt_le_support(hdev);
9872 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9875 /* Enabling the experimental LL Privay support disables support for
9878 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9879 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9880 MGMT_STATUS_NOT_SUPPORTED);
9882 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9883 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9884 MGMT_STATUS_INVALID_PARAMS);
9886 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
9887 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9888 MGMT_STATUS_INVALID_PARAMS);
9890 flags = __le32_to_cpu(cp->flags);
9891 timeout = __le16_to_cpu(cp->timeout);
9892 duration = __le16_to_cpu(cp->duration);
9894 if (!requested_adv_flags_are_valid(hdev, flags))
9895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9896 MGMT_STATUS_INVALID_PARAMS);
9900 if (timeout && !hdev_is_powered(hdev)) {
9901 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9902 MGMT_STATUS_REJECTED);
9906 if (adv_busy(hdev)) {
9907 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9912 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
9913 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
9914 cp->scan_rsp_len, false)) {
9915 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9916 MGMT_STATUS_INVALID_PARAMS);
9920 err = hci_add_adv_instance(hdev, cp->instance, flags,
9921 cp->adv_data_len, cp->data,
9923 cp->data + cp->adv_data_len,
9925 HCI_ADV_TX_POWER_NO_PREFERENCE,
9926 hdev->le_adv_min_interval,
9927 hdev->le_adv_max_interval);
9929 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9930 MGMT_STATUS_FAILED);
9934 /* Only trigger an advertising added event if a new instance was
9937 if (hdev->adv_instance_cnt > prev_instance_cnt)
9938 mgmt_advertising_added(sk, hdev, cp->instance);
9940 if (hdev->cur_adv_instance == cp->instance) {
9941 /* If the currently advertised instance is being changed then
9942 * cancel the current advertising and schedule the next
9943 * instance. If there is only one instance then the overridden
9944 * advertising data will be visible right away.
9946 cancel_adv_timeout(hdev);
9948 next_instance = hci_get_next_instance(hdev, cp->instance);
9950 schedule_instance = next_instance->instance;
9951 } else if (!hdev->adv_instance_timeout) {
9952 /* Immediately advertise the new instance if no other
9953 * instance is currently being advertised.
9955 schedule_instance = cp->instance;
9958 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
9959 * there is no instance to be advertised then we have no HCI
9960 * communication to make. Simply return.
9962 if (!hdev_is_powered(hdev) ||
9963 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
9964 !schedule_instance) {
9965 rp.instance = cp->instance;
9966 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9967 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9971 /* We're good to go, update advertising data, parameters, and start
9974 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
9981 hci_req_init(&req, hdev);
9983 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
9986 err = hci_req_run(&req, add_advertising_complete);
9989 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9990 MGMT_STATUS_FAILED);
9991 mgmt_pending_remove(cmd);
9995 hci_dev_unlock(hdev);
10000 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
10003 struct mgmt_pending_cmd *cmd;
10004 struct mgmt_cp_add_ext_adv_params *cp;
10005 struct mgmt_rp_add_ext_adv_params rp;
10006 struct adv_info *adv_instance;
10009 BT_DBG("%s", hdev->name);
10011 hci_dev_lock(hdev);
10013 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
10018 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10022 rp.instance = cp->instance;
10023 rp.tx_power = adv_instance->tx_power;
10025 /* While we're at it, inform userspace of the available space for this
10026 * advertisement, given the flags that will be used.
10028 flags = __le32_to_cpu(cp->flags);
10029 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10030 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10033 /* If this advertisement was previously advertising and we
10034 * failed to update it, we signal that it has been removed and
10035 * delete its structure
10037 if (!adv_instance->pending)
10038 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
10040 hci_remove_adv_instance(hdev, cp->instance);
10042 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10043 mgmt_status(status));
10046 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10047 mgmt_status(status), &rp, sizeof(rp));
10052 mgmt_pending_remove(cmd);
10054 hci_dev_unlock(hdev);
10057 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
10058 void *data, u16 data_len)
10060 struct mgmt_cp_add_ext_adv_params *cp = data;
10061 struct mgmt_rp_add_ext_adv_params rp;
10062 struct mgmt_pending_cmd *cmd = NULL;
10063 struct adv_info *adv_instance;
10064 struct hci_request req;
10065 u32 flags, min_interval, max_interval;
10066 u16 timeout, duration;
10071 BT_DBG("%s", hdev->name);
10073 status = mgmt_le_support(hdev);
10075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10078 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10080 MGMT_STATUS_INVALID_PARAMS);
10082 /* The purpose of breaking add_advertising into two separate MGMT calls
10083 * for params and data is to allow more parameters to be added to this
10084 * structure in the future. For this reason, we verify that we have the
10085 * bare minimum structure we know of when the interface was defined. Any
10086 * extra parameters we don't know about will be ignored in this request.
10088 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
10089 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10090 MGMT_STATUS_INVALID_PARAMS);
10092 flags = __le32_to_cpu(cp->flags);
10094 if (!requested_adv_flags_are_valid(hdev, flags))
10095 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10096 MGMT_STATUS_INVALID_PARAMS);
10098 hci_dev_lock(hdev);
10100 /* In new interface, we require that we are powered to register */
10101 if (!hdev_is_powered(hdev)) {
10102 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10103 MGMT_STATUS_REJECTED);
10107 if (adv_busy(hdev)) {
10108 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10113 /* Parse defined parameters from request, use defaults otherwise */
10114 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
10115 __le16_to_cpu(cp->timeout) : 0;
10117 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
10118 __le16_to_cpu(cp->duration) :
10119 hdev->def_multi_adv_rotation_duration;
10121 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10122 __le32_to_cpu(cp->min_interval) :
10123 hdev->le_adv_min_interval;
10125 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10126 __le32_to_cpu(cp->max_interval) :
10127 hdev->le_adv_max_interval;
10129 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
10131 HCI_ADV_TX_POWER_NO_PREFERENCE;
10133 /* Create advertising instance with no advertising or response data */
10134 err = hci_add_adv_instance(hdev, cp->instance, flags,
10135 0, NULL, 0, NULL, timeout, duration,
10136 tx_power, min_interval, max_interval);
10139 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10140 MGMT_STATUS_FAILED);
10144 /* Submit request for advertising params if ext adv available */
10145 if (ext_adv_capable(hdev)) {
10146 hci_req_init(&req, hdev);
10147 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10149 /* Updating parameters of an active instance will return a
10150 * Command Disallowed error, so we must first disable the
10151 * instance if it is active.
10153 if (!adv_instance->pending)
10154 __hci_req_disable_ext_adv_instance(&req, cp->instance);
10156 __hci_req_setup_ext_adv_instance(&req, cp->instance);
10158 err = hci_req_run(&req, add_ext_adv_params_complete);
10161 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
10162 hdev, data, data_len);
10165 hci_remove_adv_instance(hdev, cp->instance);
10170 rp.instance = cp->instance;
10171 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
10172 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10173 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10174 err = mgmt_cmd_complete(sk, hdev->id,
10175 MGMT_OP_ADD_EXT_ADV_PARAMS,
10176 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10180 hci_dev_unlock(hdev);
10185 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
10188 struct mgmt_cp_add_ext_adv_data *cp = data;
10189 struct mgmt_rp_add_ext_adv_data rp;
10190 u8 schedule_instance = 0;
10191 struct adv_info *next_instance;
10192 struct adv_info *adv_instance;
10194 struct mgmt_pending_cmd *cmd;
10195 struct hci_request req;
10197 BT_DBG("%s", hdev->name);
10199 hci_dev_lock(hdev);
10201 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10203 if (!adv_instance) {
10204 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10205 MGMT_STATUS_INVALID_PARAMS);
10209 /* In new interface, we require that we are powered to register */
10210 if (!hdev_is_powered(hdev)) {
10211 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10212 MGMT_STATUS_REJECTED);
10213 goto clear_new_instance;
10216 if (adv_busy(hdev)) {
10217 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10219 goto clear_new_instance;
10222 /* Validate new data */
10223 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
10224 cp->adv_data_len, true) ||
10225 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
10226 cp->adv_data_len, cp->scan_rsp_len, false)) {
10227 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10228 MGMT_STATUS_INVALID_PARAMS);
10229 goto clear_new_instance;
10232 /* Set the data in the advertising instance */
10233 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
10234 cp->data, cp->scan_rsp_len,
10235 cp->data + cp->adv_data_len);
10237 /* We're good to go, update advertising data, parameters, and start
10241 hci_req_init(&req, hdev);
10243 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
10245 if (ext_adv_capable(hdev)) {
10246 __hci_req_update_adv_data(&req, cp->instance);
10247 __hci_req_update_scan_rsp_data(&req, cp->instance);
10248 __hci_req_enable_ext_advertising(&req, cp->instance);
10251 /* If using software rotation, determine next instance to use */
10253 if (hdev->cur_adv_instance == cp->instance) {
10254 /* If the currently advertised instance is being changed
10255 * then cancel the current advertising and schedule the
10256 * next instance. If there is only one instance then the
10257 * overridden advertising data will be visible right
10260 cancel_adv_timeout(hdev);
10262 next_instance = hci_get_next_instance(hdev,
10265 schedule_instance = next_instance->instance;
10266 } else if (!hdev->adv_instance_timeout) {
10267 /* Immediately advertise the new instance if no other
10268 * instance is currently being advertised.
10270 schedule_instance = cp->instance;
10273 /* If the HCI_ADVERTISING flag is set or there is no instance to
10274 * be advertised then we have no HCI communication to make.
10277 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10278 !schedule_instance) {
10279 if (adv_instance->pending) {
10280 mgmt_advertising_added(sk, hdev, cp->instance);
10281 adv_instance->pending = false;
10283 rp.instance = cp->instance;
10284 err = mgmt_cmd_complete(sk, hdev->id,
10285 MGMT_OP_ADD_EXT_ADV_DATA,
10286 MGMT_STATUS_SUCCESS, &rp,
10291 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
10295 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
10299 goto clear_new_instance;
10303 err = hci_req_run(&req, add_advertising_complete);
10306 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10307 MGMT_STATUS_FAILED);
10308 mgmt_pending_remove(cmd);
10309 goto clear_new_instance;
10312 /* We were successful in updating data, so trigger advertising_added
10313 * event if this is an instance that wasn't previously advertising. If
10314 * a failure occurs in the requests we initiated, we will remove the
10315 * instance again in add_advertising_complete
10317 if (adv_instance->pending)
10318 mgmt_advertising_added(sk, hdev, cp->instance);
10322 clear_new_instance:
10323 hci_remove_adv_instance(hdev, cp->instance);
10326 hci_dev_unlock(hdev);
10331 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
10334 struct mgmt_pending_cmd *cmd;
10335 struct mgmt_cp_remove_advertising *cp;
10336 struct mgmt_rp_remove_advertising rp;
10338 bt_dev_dbg(hdev, "status %u", status);
10340 hci_dev_lock(hdev);
10342 /* A failure status here only means that we failed to disable
10343 * advertising. Otherwise, the advertising instance has been removed,
10344 * so report success.
10346 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
10351 rp.instance = cp->instance;
10353 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
10355 mgmt_pending_remove(cmd);
10358 hci_dev_unlock(hdev);
10361 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
10362 void *data, u16 data_len)
10364 struct mgmt_cp_remove_advertising *cp = data;
10365 struct mgmt_rp_remove_advertising rp;
10366 struct mgmt_pending_cmd *cmd;
10367 struct hci_request req;
10370 bt_dev_dbg(hdev, "sock %p", sk);
10372 /* Enabling the experimental LL Privay support disables support for
10375 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
10376 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
10377 MGMT_STATUS_NOT_SUPPORTED);
10379 hci_dev_lock(hdev);
10381 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
10382 err = mgmt_cmd_status(sk, hdev->id,
10383 MGMT_OP_REMOVE_ADVERTISING,
10384 MGMT_STATUS_INVALID_PARAMS);
10388 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
10389 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
10390 pending_find(MGMT_OP_SET_LE, hdev)) {
10391 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10396 if (list_empty(&hdev->adv_instances)) {
10397 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10398 MGMT_STATUS_INVALID_PARAMS);
10402 hci_req_init(&req, hdev);
10404 /* If we use extended advertising, instance is disabled and removed */
10405 if (ext_adv_capable(hdev)) {
10406 __hci_req_disable_ext_adv_instance(&req, cp->instance);
10407 __hci_req_remove_ext_adv_instance(&req, cp->instance);
10410 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
10412 if (list_empty(&hdev->adv_instances))
10413 __hci_req_disable_advertising(&req);
10415 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
10416 * flag is set or the device isn't powered then we have no HCI
10417 * communication to make. Simply return.
10419 if (skb_queue_empty(&req.cmd_q) ||
10420 !hdev_is_powered(hdev) ||
10421 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
10422 hci_req_purge(&req);
10423 rp.instance = cp->instance;
10424 err = mgmt_cmd_complete(sk, hdev->id,
10425 MGMT_OP_REMOVE_ADVERTISING,
10426 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10430 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
10437 err = hci_req_run(&req, remove_advertising_complete);
10439 mgmt_pending_remove(cmd);
10442 hci_dev_unlock(hdev);
10447 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
10448 void *data, u16 data_len)
10450 struct mgmt_cp_get_adv_size_info *cp = data;
10451 struct mgmt_rp_get_adv_size_info rp;
10452 u32 flags, supported_flags;
10455 bt_dev_dbg(hdev, "sock %p", sk);
10457 if (!lmp_le_capable(hdev))
10458 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10459 MGMT_STATUS_REJECTED);
10461 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10462 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10463 MGMT_STATUS_INVALID_PARAMS);
10465 flags = __le32_to_cpu(cp->flags);
10467 /* The current implementation only supports a subset of the specified
10470 supported_flags = get_supported_adv_flags(hdev);
10471 if (flags & ~supported_flags)
10472 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10473 MGMT_STATUS_INVALID_PARAMS);
10475 rp.instance = cp->instance;
10476 rp.flags = cp->flags;
10477 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10478 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10480 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10481 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10486 static const struct hci_mgmt_handler mgmt_handlers[] = {
10487 { NULL }, /* 0x0000 (no command) */
10488 { read_version, MGMT_READ_VERSION_SIZE,
10490 HCI_MGMT_UNTRUSTED },
10491 { read_commands, MGMT_READ_COMMANDS_SIZE,
10493 HCI_MGMT_UNTRUSTED },
10494 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
10496 HCI_MGMT_UNTRUSTED },
10497 { read_controller_info, MGMT_READ_INFO_SIZE,
10498 HCI_MGMT_UNTRUSTED },
10499 { set_powered, MGMT_SETTING_SIZE },
10500 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
10501 { set_connectable, MGMT_SETTING_SIZE },
10502 { set_fast_connectable, MGMT_SETTING_SIZE },
10503 { set_bondable, MGMT_SETTING_SIZE },
10504 { set_link_security, MGMT_SETTING_SIZE },
10505 { set_ssp, MGMT_SETTING_SIZE },
10506 { set_hs, MGMT_SETTING_SIZE },
10507 { set_le, MGMT_SETTING_SIZE },
10508 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
10509 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
10510 { add_uuid, MGMT_ADD_UUID_SIZE },
10511 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
10512 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
10513 HCI_MGMT_VAR_LEN },
10514 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
10515 HCI_MGMT_VAR_LEN },
10516 { disconnect, MGMT_DISCONNECT_SIZE },
10517 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
10518 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
10519 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
10520 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
10521 { pair_device, MGMT_PAIR_DEVICE_SIZE },
10522 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
10523 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
10524 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
10525 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
10526 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
10527 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
10528 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
10529 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
10530 HCI_MGMT_VAR_LEN },
10531 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
10532 { start_discovery, MGMT_START_DISCOVERY_SIZE },
10533 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
10534 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
10535 { block_device, MGMT_BLOCK_DEVICE_SIZE },
10536 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
10537 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
10538 { set_advertising, MGMT_SETTING_SIZE },
10539 { set_bredr, MGMT_SETTING_SIZE },
10540 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
10541 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
10542 { set_secure_conn, MGMT_SETTING_SIZE },
10543 { set_debug_keys, MGMT_SETTING_SIZE },
10544 { set_privacy, MGMT_SET_PRIVACY_SIZE },
10545 { load_irks, MGMT_LOAD_IRKS_SIZE,
10546 HCI_MGMT_VAR_LEN },
10547 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
10548 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
10549 { add_device, MGMT_ADD_DEVICE_SIZE },
10550 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
10551 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
10552 HCI_MGMT_VAR_LEN },
10553 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
10555 HCI_MGMT_UNTRUSTED },
10556 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
10557 HCI_MGMT_UNCONFIGURED |
10558 HCI_MGMT_UNTRUSTED },
10559 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
10560 HCI_MGMT_UNCONFIGURED },
10561 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
10562 HCI_MGMT_UNCONFIGURED },
10563 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
10564 HCI_MGMT_VAR_LEN },
10565 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
10566 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
10568 HCI_MGMT_UNTRUSTED },
10569 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
10570 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
10571 HCI_MGMT_VAR_LEN },
10572 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
10573 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
10574 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
10575 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
10576 HCI_MGMT_UNTRUSTED },
10577 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
10578 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
10579 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
10580 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
10581 HCI_MGMT_VAR_LEN },
10582 { set_wideband_speech, MGMT_SETTING_SIZE },
10583 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
10584 HCI_MGMT_UNTRUSTED },
10585 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
10586 HCI_MGMT_UNTRUSTED |
10587 HCI_MGMT_HDEV_OPTIONAL },
10588 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
10590 HCI_MGMT_HDEV_OPTIONAL },
10591 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
10592 HCI_MGMT_UNTRUSTED },
10593 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
10594 HCI_MGMT_VAR_LEN },
10595 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
10596 HCI_MGMT_UNTRUSTED },
10597 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
10598 HCI_MGMT_VAR_LEN },
10599 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
10600 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
10601 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
10602 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
10603 HCI_MGMT_VAR_LEN },
10604 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
10605 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
10606 HCI_MGMT_VAR_LEN },
10607 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
10608 HCI_MGMT_VAR_LEN },
10609 { add_adv_patterns_monitor_rssi,
10610 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
10611 HCI_MGMT_VAR_LEN },
10615 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
10616 { NULL }, /* 0x0000 (no command) */
10617 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
10618 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
10619 HCI_MGMT_VAR_LEN },
10620 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
10621 HCI_MGMT_VAR_LEN },
10622 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
10623 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
10624 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
10625 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
10626 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
10627 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
10628 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
10629 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
10630 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
10631 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
10632 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
10633 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
10634 { set_voice_setting, MGMT_SET_VOICE_SETTING_SIZE },
10635 { get_adv_tx_power, MGMT_GET_ADV_TX_POWER_SIZE },
10636 { enable_bt_6lowpan, MGMT_ENABLE_BT_6LOWPAN_SIZE },
10637 { connect_bt_6lowpan, MGMT_CONNECT_6LOWPAN_SIZE },
10638 { disconnect_bt_6lowpan, MGMT_DISCONNECT_6LOWPAN_SIZE },
10639 { read_maximum_le_data_length,
10640 MGMT_LE_READ_MAXIMUM_DATA_LENGTH_SIZE },
10644 void mgmt_index_added(struct hci_dev *hdev)
10646 struct mgmt_ev_ext_index ev;
10648 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10651 switch (hdev->dev_type) {
10653 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10654 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
10655 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10658 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
10659 HCI_MGMT_INDEX_EVENTS);
10670 ev.bus = hdev->bus;
10672 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
10673 HCI_MGMT_EXT_INDEX_EVENTS);
10676 void mgmt_index_removed(struct hci_dev *hdev)
10678 struct mgmt_ev_ext_index ev;
10679 u8 status = MGMT_STATUS_INVALID_INDEX;
10681 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10684 switch (hdev->dev_type) {
10686 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10688 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10689 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
10690 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10693 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
10694 HCI_MGMT_INDEX_EVENTS);
10705 ev.bus = hdev->bus;
10707 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
10708 HCI_MGMT_EXT_INDEX_EVENTS);
10711 /* This function requires the caller holds hdev->lock */
10712 static void restart_le_actions(struct hci_dev *hdev)
10714 struct hci_conn_params *p;
10716 list_for_each_entry(p, &hdev->le_conn_params, list) {
10717 /* Needed for AUTO_OFF case where might not "really"
10718 * have been powered off.
10720 list_del_init(&p->action);
10722 switch (p->auto_connect) {
10723 case HCI_AUTO_CONN_DIRECT:
10724 case HCI_AUTO_CONN_ALWAYS:
10725 list_add(&p->action, &hdev->pend_le_conns);
10727 case HCI_AUTO_CONN_REPORT:
10728 list_add(&p->action, &hdev->pend_le_reports);
10736 void mgmt_power_on(struct hci_dev *hdev, int err)
10738 struct cmd_lookup match = { NULL, hdev };
10740 bt_dev_dbg(hdev, "err %d", err);
10742 hci_dev_lock(hdev);
10745 restart_le_actions(hdev);
10746 hci_update_background_scan(hdev);
10749 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10751 new_settings(hdev, match.sk);
10754 sock_put(match.sk);
10756 hci_dev_unlock(hdev);
10759 void __mgmt_power_off(struct hci_dev *hdev)
10761 struct cmd_lookup match = { NULL, hdev };
10762 u8 status, zero_cod[] = { 0, 0, 0 };
10764 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10766 /* If the power off is because of hdev unregistration let
10767 * use the appropriate INVALID_INDEX status. Otherwise use
10768 * NOT_POWERED. We cover both scenarios here since later in
10769 * mgmt_index_removed() any hci_conn callbacks will have already
10770 * been triggered, potentially causing misleading DISCONNECTED
10771 * status responses.
10773 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
10774 status = MGMT_STATUS_INVALID_INDEX;
10776 status = MGMT_STATUS_NOT_POWERED;
10778 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10780 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
10781 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
10782 zero_cod, sizeof(zero_cod),
10783 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10784 ext_info_changed(hdev, NULL);
10787 new_settings(hdev, match.sk);
10790 sock_put(match.sk);
10793 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
10795 struct mgmt_pending_cmd *cmd;
10798 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10802 if (err == -ERFKILL)
10803 status = MGMT_STATUS_RFKILLED;
10805 status = MGMT_STATUS_FAILED;
10807 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
10809 mgmt_pending_remove(cmd);
10812 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
10815 struct mgmt_ev_new_link_key ev;
10817 memset(&ev, 0, sizeof(ev));
10819 ev.store_hint = persistent;
10820 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10821 ev.key.addr.type = BDADDR_BREDR;
10822 ev.key.type = key->type;
10823 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
10824 ev.key.pin_len = key->pin_len;
10826 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
10829 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
10831 switch (ltk->type) {
10833 case SMP_LTK_RESPONDER:
10834 if (ltk->authenticated)
10835 return MGMT_LTK_AUTHENTICATED;
10836 return MGMT_LTK_UNAUTHENTICATED;
10838 if (ltk->authenticated)
10839 return MGMT_LTK_P256_AUTH;
10840 return MGMT_LTK_P256_UNAUTH;
10841 case SMP_LTK_P256_DEBUG:
10842 return MGMT_LTK_P256_DEBUG;
10845 return MGMT_LTK_UNAUTHENTICATED;
10848 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
10850 struct mgmt_ev_new_long_term_key ev;
10852 memset(&ev, 0, sizeof(ev));
10854 /* Devices using resolvable or non-resolvable random addresses
10855 * without providing an identity resolving key don't require
10856 * to store long term keys. Their addresses will change the
10857 * next time around.
10859 * Only when a remote device provides an identity address
10860 * make sure the long term key is stored. If the remote
10861 * identity is known, the long term keys are internally
10862 * mapped to the identity address. So allow static random
10863 * and public addresses here.
10865 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10866 (key->bdaddr.b[5] & 0xc0) != 0xc0)
10867 ev.store_hint = 0x00;
10869 ev.store_hint = persistent;
10871 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10872 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
10873 ev.key.type = mgmt_ltk_type(key);
10874 ev.key.enc_size = key->enc_size;
10875 ev.key.ediv = key->ediv;
10876 ev.key.rand = key->rand;
10878 if (key->type == SMP_LTK)
10879 ev.key.initiator = 1;
10881 /* Make sure we copy only the significant bytes based on the
10882 * encryption key size, and set the rest of the value to zeroes.
10884 memcpy(ev.key.val, key->val, key->enc_size);
10885 memset(ev.key.val + key->enc_size, 0,
10886 sizeof(ev.key.val) - key->enc_size);
10888 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
10891 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
10893 struct mgmt_ev_new_irk ev;
10895 memset(&ev, 0, sizeof(ev));
10897 ev.store_hint = persistent;
10899 bacpy(&ev.rpa, &irk->rpa);
10900 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
10901 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
10902 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
10904 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
10907 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
10910 struct mgmt_ev_new_csrk ev;
10912 memset(&ev, 0, sizeof(ev));
10914 /* Devices using resolvable or non-resolvable random addresses
10915 * without providing an identity resolving key don't require
10916 * to store signature resolving keys. Their addresses will change
10917 * the next time around.
10919 * Only when a remote device provides an identity address
10920 * make sure the signature resolving key is stored. So allow
10921 * static random and public addresses here.
10923 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10924 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
10925 ev.store_hint = 0x00;
10927 ev.store_hint = persistent;
10929 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
10930 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
10931 ev.key.type = csrk->type;
10932 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
10934 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
10937 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
10938 u8 bdaddr_type, u8 store_hint, u16 min_interval,
10939 u16 max_interval, u16 latency, u16 timeout)
10941 struct mgmt_ev_new_conn_param ev;
10943 if (!hci_is_identity_address(bdaddr, bdaddr_type))
10946 memset(&ev, 0, sizeof(ev));
10947 bacpy(&ev.addr.bdaddr, bdaddr);
10948 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
10949 ev.store_hint = store_hint;
10950 ev.min_interval = cpu_to_le16(min_interval);
10951 ev.max_interval = cpu_to_le16(max_interval);
10952 ev.latency = cpu_to_le16(latency);
10953 ev.timeout = cpu_to_le16(timeout);
10955 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
10958 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
10959 u8 *name, u8 name_len)
10962 struct mgmt_ev_device_connected *ev = (void *) buf;
10966 bacpy(&ev->addr.bdaddr, &conn->dst);
10967 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10970 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
10972 ev->flags = __cpu_to_le32(flags);
10974 /* We must ensure that the EIR Data fields are ordered and
10975 * unique. Keep it simple for now and avoid the problem by not
10976 * adding any BR/EDR data to the LE adv.
10978 if (conn->le_adv_data_len > 0) {
10979 memcpy(&ev->eir[eir_len],
10980 conn->le_adv_data, conn->le_adv_data_len);
10981 eir_len = conn->le_adv_data_len;
10984 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
10987 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
10988 eir_len = eir_append_data(ev->eir, eir_len,
10990 conn->dev_class, 3);
10993 ev->eir_len = cpu_to_le16(eir_len);
10995 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
10996 sizeof(*ev) + eir_len, NULL);
10999 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
11001 struct sock **sk = data;
11003 cmd->cmd_complete(cmd, 0);
11008 mgmt_pending_remove(cmd);
11011 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
11013 struct hci_dev *hdev = data;
11014 struct mgmt_cp_unpair_device *cp = cmd->param;
11016 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
11018 cmd->cmd_complete(cmd, 0);
11019 mgmt_pending_remove(cmd);
11022 bool mgmt_powering_down(struct hci_dev *hdev)
11024 struct mgmt_pending_cmd *cmd;
11025 struct mgmt_mode *cp;
11027 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11038 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
11039 u8 link_type, u8 addr_type, u8 reason,
11040 bool mgmt_connected)
11042 struct mgmt_ev_device_disconnected ev;
11043 struct sock *sk = NULL;
11045 /* The connection is still in hci_conn_hash so test for 1
11046 * instead of 0 to know if this is the last one.
11048 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11049 cancel_delayed_work(&hdev->power_off);
11050 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11053 if (!mgmt_connected)
11056 if (link_type != ACL_LINK && link_type != LE_LINK)
11059 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
11061 bacpy(&ev.addr.bdaddr, bdaddr);
11062 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11063 ev.reason = reason;
11065 /* Report disconnects due to suspend */
11066 if (hdev->suspended)
11067 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
11069 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
11074 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11078 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
11079 u8 link_type, u8 addr_type, u8 status)
11081 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
11082 struct mgmt_cp_disconnect *cp;
11083 struct mgmt_pending_cmd *cmd;
11085 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11088 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
11094 if (bacmp(bdaddr, &cp->addr.bdaddr))
11097 if (cp->addr.type != bdaddr_type)
11100 cmd->cmd_complete(cmd, mgmt_status(status));
11101 mgmt_pending_remove(cmd);
11104 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11105 u8 addr_type, u8 status)
11107 struct mgmt_ev_connect_failed ev;
11109 /* The connection is still in hci_conn_hash so test for 1
11110 * instead of 0 to know if this is the last one.
11112 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11113 cancel_delayed_work(&hdev->power_off);
11114 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11117 bacpy(&ev.addr.bdaddr, bdaddr);
11118 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11119 ev.status = mgmt_status(status);
11121 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
11124 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
11126 struct mgmt_ev_pin_code_request ev;
11128 bacpy(&ev.addr.bdaddr, bdaddr);
11129 ev.addr.type = BDADDR_BREDR;
11130 ev.secure = secure;
11132 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
11135 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11138 struct mgmt_pending_cmd *cmd;
11140 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
11144 cmd->cmd_complete(cmd, mgmt_status(status));
11145 mgmt_pending_remove(cmd);
11148 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11151 struct mgmt_pending_cmd *cmd;
11153 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
11157 cmd->cmd_complete(cmd, mgmt_status(status));
11158 mgmt_pending_remove(cmd);
11161 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11162 u8 link_type, u8 addr_type, u32 value,
11165 struct mgmt_ev_user_confirm_request ev;
11167 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11169 bacpy(&ev.addr.bdaddr, bdaddr);
11170 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11171 ev.confirm_hint = confirm_hint;
11172 ev.value = cpu_to_le32(value);
11174 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
11178 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11179 u8 link_type, u8 addr_type)
11181 struct mgmt_ev_user_passkey_request ev;
11183 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11185 bacpy(&ev.addr.bdaddr, bdaddr);
11186 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11188 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
11192 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11193 u8 link_type, u8 addr_type, u8 status,
11196 struct mgmt_pending_cmd *cmd;
11198 cmd = pending_find(opcode, hdev);
11202 cmd->cmd_complete(cmd, mgmt_status(status));
11203 mgmt_pending_remove(cmd);
11208 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11209 u8 link_type, u8 addr_type, u8 status)
11211 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11212 status, MGMT_OP_USER_CONFIRM_REPLY);
11215 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11216 u8 link_type, u8 addr_type, u8 status)
11218 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11220 MGMT_OP_USER_CONFIRM_NEG_REPLY);
11223 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11224 u8 link_type, u8 addr_type, u8 status)
11226 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11227 status, MGMT_OP_USER_PASSKEY_REPLY);
11230 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11231 u8 link_type, u8 addr_type, u8 status)
11233 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11235 MGMT_OP_USER_PASSKEY_NEG_REPLY);
11238 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
11239 u8 link_type, u8 addr_type, u32 passkey,
11242 struct mgmt_ev_passkey_notify ev;
11244 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11246 bacpy(&ev.addr.bdaddr, bdaddr);
11247 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11248 ev.passkey = __cpu_to_le32(passkey);
11249 ev.entered = entered;
11251 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
11254 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
11256 struct mgmt_ev_auth_failed ev;
11257 struct mgmt_pending_cmd *cmd;
11258 u8 status = mgmt_status(hci_status);
11260 bacpy(&ev.addr.bdaddr, &conn->dst);
11261 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11262 ev.status = status;
11264 cmd = find_pairing(conn);
11266 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
11267 cmd ? cmd->sk : NULL);
11270 cmd->cmd_complete(cmd, status);
11271 mgmt_pending_remove(cmd);
11275 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
11277 struct cmd_lookup match = { NULL, hdev };
11281 u8 mgmt_err = mgmt_status(status);
11282 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
11283 cmd_status_rsp, &mgmt_err);
11287 if (test_bit(HCI_AUTH, &hdev->flags))
11288 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
11290 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
11292 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
11296 new_settings(hdev, match.sk);
11299 sock_put(match.sk);
11302 static void clear_eir(struct hci_request *req)
11304 struct hci_dev *hdev = req->hdev;
11305 struct hci_cp_write_eir cp;
11307 if (!lmp_ext_inq_capable(hdev))
11310 memset(hdev->eir, 0, sizeof(hdev->eir));
11312 memset(&cp, 0, sizeof(cp));
11314 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
11317 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
11319 struct cmd_lookup match = { NULL, hdev };
11320 struct hci_request req;
11321 bool changed = false;
11324 u8 mgmt_err = mgmt_status(status);
11326 if (enable && hci_dev_test_and_clear_flag(hdev,
11327 HCI_SSP_ENABLED)) {
11328 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
11329 new_settings(hdev, NULL);
11332 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
11338 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
11340 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
11342 changed = hci_dev_test_and_clear_flag(hdev,
11345 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
11348 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
11351 new_settings(hdev, match.sk);
11354 sock_put(match.sk);
11356 hci_req_init(&req, hdev);
11358 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
11359 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
11360 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
11361 sizeof(enable), &enable);
11362 __hci_req_update_eir(&req);
11367 hci_req_run(&req, NULL);
11370 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
11372 struct cmd_lookup *match = data;
11374 if (match->sk == NULL) {
11375 match->sk = cmd->sk;
11376 sock_hold(match->sk);
11380 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
11383 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
11385 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
11386 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
11387 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
11390 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
11391 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11392 ext_info_changed(hdev, NULL);
11396 sock_put(match.sk);
11399 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
11401 struct mgmt_cp_set_local_name ev;
11402 struct mgmt_pending_cmd *cmd;
11407 memset(&ev, 0, sizeof(ev));
11408 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
11409 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
11411 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
11413 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
11415 /* If this is a HCI command related to powering on the
11416 * HCI dev don't send any mgmt signals.
11418 if (pending_find(MGMT_OP_SET_POWERED, hdev))
11422 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
11423 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
11424 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
11427 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
11431 for (i = 0; i < uuid_count; i++) {
11432 if (!memcmp(uuid, uuids[i], 16))
11439 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
11443 while (parsed < eir_len) {
11444 u8 field_len = eir[0];
11448 if (field_len == 0)
11451 if (eir_len - parsed < field_len + 1)
11455 case EIR_UUID16_ALL:
11456 case EIR_UUID16_SOME:
11457 for (i = 0; i + 3 <= field_len; i += 2) {
11458 memcpy(uuid, bluetooth_base_uuid, 16);
11459 uuid[13] = eir[i + 3];
11460 uuid[12] = eir[i + 2];
11461 if (has_uuid(uuid, uuid_count, uuids))
11465 case EIR_UUID32_ALL:
11466 case EIR_UUID32_SOME:
11467 for (i = 0; i + 5 <= field_len; i += 4) {
11468 memcpy(uuid, bluetooth_base_uuid, 16);
11469 uuid[15] = eir[i + 5];
11470 uuid[14] = eir[i + 4];
11471 uuid[13] = eir[i + 3];
11472 uuid[12] = eir[i + 2];
11473 if (has_uuid(uuid, uuid_count, uuids))
11477 case EIR_UUID128_ALL:
11478 case EIR_UUID128_SOME:
11479 for (i = 0; i + 17 <= field_len; i += 16) {
11480 memcpy(uuid, eir + i + 2, 16);
11481 if (has_uuid(uuid, uuid_count, uuids))
11487 parsed += field_len + 1;
11488 eir += field_len + 1;
11494 static void restart_le_scan(struct hci_dev *hdev)
11496 /* If controller is not scanning we are done. */
11497 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
11500 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
11501 hdev->discovery.scan_start +
11502 hdev->discovery.scan_duration))
11505 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
11506 DISCOV_LE_RESTART_DELAY);
11509 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
11510 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11512 /* If a RSSI threshold has been specified, and
11513 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
11514 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
11515 * is set, let it through for further processing, as we might need to
11516 * restart the scan.
11518 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
11519 * the results are also dropped.
11521 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11522 (rssi == HCI_RSSI_INVALID ||
11523 (rssi < hdev->discovery.rssi &&
11524 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
11527 if (hdev->discovery.uuid_count != 0) {
11528 /* If a list of UUIDs is provided in filter, results with no
11529 * matching UUID should be dropped.
11531 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
11532 hdev->discovery.uuids) &&
11533 !eir_has_uuids(scan_rsp, scan_rsp_len,
11534 hdev->discovery.uuid_count,
11535 hdev->discovery.uuids))
11539 /* If duplicate filtering does not report RSSI changes, then restart
11540 * scanning to ensure updated result with updated RSSI values.
11542 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
11543 restart_le_scan(hdev);
11545 /* Validate RSSI value against the RSSI threshold once more. */
11546 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11547 rssi < hdev->discovery.rssi)
11554 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11555 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
11556 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11559 struct mgmt_ev_device_found *ev = (void *)buf;
11562 /* Don't send events for a non-kernel initiated discovery. With
11563 * LE one exception is if we have pend_le_reports > 0 in which
11564 * case we're doing passive scanning and want these events.
11566 if (!hci_discovery_active(hdev)) {
11567 if (link_type == ACL_LINK)
11569 if (link_type == LE_LINK &&
11570 list_empty(&hdev->pend_le_reports) &&
11571 !hci_is_adv_monitoring(hdev)) {
11576 if (hdev->discovery.result_filtering) {
11577 /* We are using service discovery */
11578 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
11583 if (hdev->discovery.limited) {
11584 /* Check for limited discoverable bit */
11586 if (!(dev_class[1] & 0x20))
11589 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
11590 if (!flags || !(flags[0] & LE_AD_LIMITED))
11595 /* Make sure that the buffer is big enough. The 5 extra bytes
11596 * are for the potential CoD field.
11598 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
11601 memset(buf, 0, sizeof(buf));
11603 /* In case of device discovery with BR/EDR devices (pre 1.2), the
11604 * RSSI value was reported as 0 when not available. This behavior
11605 * is kept when using device discovery. This is required for full
11606 * backwards compatibility with the API.
11608 * However when using service discovery, the value 127 will be
11609 * returned when the RSSI is not available.
11611 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
11612 link_type == ACL_LINK)
11615 bacpy(&ev->addr.bdaddr, bdaddr);
11616 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11618 ev->flags = cpu_to_le32(flags);
11621 /* Copy EIR or advertising data into event */
11622 memcpy(ev->eir, eir, eir_len);
11624 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11626 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11629 if (scan_rsp_len > 0)
11630 /* Append scan response data to event */
11631 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
11633 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
11634 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
11636 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
11639 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11640 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
11642 struct mgmt_ev_device_found *ev;
11643 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
11646 ev = (struct mgmt_ev_device_found *) buf;
11648 memset(buf, 0, sizeof(buf));
11650 bacpy(&ev->addr.bdaddr, bdaddr);
11651 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11654 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
11657 ev->eir_len = cpu_to_le16(eir_len);
11659 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
11662 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
11664 struct mgmt_ev_discovering ev;
11666 bt_dev_dbg(hdev, "discovering %u", discovering);
11668 memset(&ev, 0, sizeof(ev));
11669 ev.type = hdev->discovery.type;
11670 ev.discovering = discovering;
11672 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
11675 void mgmt_suspending(struct hci_dev *hdev, u8 state)
11677 struct mgmt_ev_controller_suspend ev;
11679 ev.suspend_state = state;
11680 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
11683 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
11686 struct mgmt_ev_controller_resume ev;
11688 ev.wake_reason = reason;
11690 bacpy(&ev.addr.bdaddr, bdaddr);
11691 ev.addr.type = addr_type;
11693 memset(&ev.addr, 0, sizeof(ev.addr));
11696 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
11699 static struct hci_mgmt_chan chan = {
11700 .channel = HCI_CHANNEL_CONTROL,
11701 .handler_count = ARRAY_SIZE(mgmt_handlers),
11702 .handlers = mgmt_handlers,
11704 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
11705 .tizen_handlers = tizen_mgmt_handlers,
11707 .hdev_init = mgmt_init_hdev,
11710 int mgmt_init(void)
11712 return hci_mgmt_chan_register(&chan);
11715 void mgmt_exit(void)
11717 hci_mgmt_chan_unregister(&chan);