2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
40 #include "hci_request.h"
42 #include "mgmt_util.h"
43 #include "mgmt_config.h"
46 #define MGMT_VERSION 1
47 #define MGMT_REVISION 21
49 static const u16 mgmt_commands[] = {
50 MGMT_OP_READ_INDEX_LIST,
53 MGMT_OP_SET_DISCOVERABLE,
54 MGMT_OP_SET_CONNECTABLE,
55 MGMT_OP_SET_FAST_CONNECTABLE,
57 MGMT_OP_SET_LINK_SECURITY,
61 MGMT_OP_SET_DEV_CLASS,
62 MGMT_OP_SET_LOCAL_NAME,
65 MGMT_OP_LOAD_LINK_KEYS,
66 MGMT_OP_LOAD_LONG_TERM_KEYS,
68 MGMT_OP_GET_CONNECTIONS,
69 MGMT_OP_PIN_CODE_REPLY,
70 MGMT_OP_PIN_CODE_NEG_REPLY,
71 MGMT_OP_SET_IO_CAPABILITY,
73 MGMT_OP_CANCEL_PAIR_DEVICE,
74 MGMT_OP_UNPAIR_DEVICE,
75 MGMT_OP_USER_CONFIRM_REPLY,
76 MGMT_OP_USER_CONFIRM_NEG_REPLY,
77 MGMT_OP_USER_PASSKEY_REPLY,
78 MGMT_OP_USER_PASSKEY_NEG_REPLY,
79 MGMT_OP_READ_LOCAL_OOB_DATA,
80 MGMT_OP_ADD_REMOTE_OOB_DATA,
81 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
82 MGMT_OP_START_DISCOVERY,
83 MGMT_OP_STOP_DISCOVERY,
86 MGMT_OP_UNBLOCK_DEVICE,
87 MGMT_OP_SET_DEVICE_ID,
88 MGMT_OP_SET_ADVERTISING,
90 MGMT_OP_SET_STATIC_ADDRESS,
91 MGMT_OP_SET_SCAN_PARAMS,
92 MGMT_OP_SET_SECURE_CONN,
93 MGMT_OP_SET_DEBUG_KEYS,
96 MGMT_OP_GET_CONN_INFO,
97 MGMT_OP_GET_CLOCK_INFO,
99 MGMT_OP_REMOVE_DEVICE,
100 MGMT_OP_LOAD_CONN_PARAM,
101 MGMT_OP_READ_UNCONF_INDEX_LIST,
102 MGMT_OP_READ_CONFIG_INFO,
103 MGMT_OP_SET_EXTERNAL_CONFIG,
104 MGMT_OP_SET_PUBLIC_ADDRESS,
105 MGMT_OP_START_SERVICE_DISCOVERY,
106 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
107 MGMT_OP_READ_EXT_INDEX_LIST,
108 MGMT_OP_READ_ADV_FEATURES,
109 MGMT_OP_ADD_ADVERTISING,
110 MGMT_OP_REMOVE_ADVERTISING,
111 MGMT_OP_GET_ADV_SIZE_INFO,
112 MGMT_OP_START_LIMITED_DISCOVERY,
113 MGMT_OP_READ_EXT_INFO,
114 MGMT_OP_SET_APPEARANCE,
115 MGMT_OP_GET_PHY_CONFIGURATION,
116 MGMT_OP_SET_PHY_CONFIGURATION,
117 MGMT_OP_SET_BLOCKED_KEYS,
118 MGMT_OP_SET_WIDEBAND_SPEECH,
119 MGMT_OP_READ_CONTROLLER_CAP,
120 MGMT_OP_READ_EXP_FEATURES_INFO,
121 MGMT_OP_SET_EXP_FEATURE,
122 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
123 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
124 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
125 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
126 MGMT_OP_GET_DEVICE_FLAGS,
127 MGMT_OP_SET_DEVICE_FLAGS,
128 MGMT_OP_READ_ADV_MONITOR_FEATURES,
129 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
130 MGMT_OP_REMOVE_ADV_MONITOR,
131 MGMT_OP_ADD_EXT_ADV_PARAMS,
132 MGMT_OP_ADD_EXT_ADV_DATA,
133 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
136 static const u16 mgmt_events[] = {
137 MGMT_EV_CONTROLLER_ERROR,
139 MGMT_EV_INDEX_REMOVED,
140 MGMT_EV_NEW_SETTINGS,
141 MGMT_EV_CLASS_OF_DEV_CHANGED,
142 MGMT_EV_LOCAL_NAME_CHANGED,
143 MGMT_EV_NEW_LINK_KEY,
144 MGMT_EV_NEW_LONG_TERM_KEY,
145 MGMT_EV_DEVICE_CONNECTED,
146 MGMT_EV_DEVICE_DISCONNECTED,
147 MGMT_EV_CONNECT_FAILED,
148 MGMT_EV_PIN_CODE_REQUEST,
149 MGMT_EV_USER_CONFIRM_REQUEST,
150 MGMT_EV_USER_PASSKEY_REQUEST,
152 MGMT_EV_DEVICE_FOUND,
154 MGMT_EV_DEVICE_BLOCKED,
155 MGMT_EV_DEVICE_UNBLOCKED,
156 MGMT_EV_DEVICE_UNPAIRED,
157 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_DEVICE_ADDED,
161 MGMT_EV_DEVICE_REMOVED,
162 MGMT_EV_NEW_CONN_PARAM,
163 MGMT_EV_UNCONF_INDEX_ADDED,
164 MGMT_EV_UNCONF_INDEX_REMOVED,
165 MGMT_EV_NEW_CONFIG_OPTIONS,
166 MGMT_EV_EXT_INDEX_ADDED,
167 MGMT_EV_EXT_INDEX_REMOVED,
168 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
169 MGMT_EV_ADVERTISING_ADDED,
170 MGMT_EV_ADVERTISING_REMOVED,
171 MGMT_EV_EXT_INFO_CHANGED,
172 MGMT_EV_PHY_CONFIGURATION_CHANGED,
173 MGMT_EV_EXP_FEATURE_CHANGED,
174 MGMT_EV_DEVICE_FLAGS_CHANGED,
175 MGMT_EV_ADV_MONITOR_ADDED,
176 MGMT_EV_ADV_MONITOR_REMOVED,
177 MGMT_EV_CONTROLLER_SUSPEND,
178 MGMT_EV_CONTROLLER_RESUME,
181 static const u16 mgmt_untrusted_commands[] = {
182 MGMT_OP_READ_INDEX_LIST,
184 MGMT_OP_READ_UNCONF_INDEX_LIST,
185 MGMT_OP_READ_CONFIG_INFO,
186 MGMT_OP_READ_EXT_INDEX_LIST,
187 MGMT_OP_READ_EXT_INFO,
188 MGMT_OP_READ_CONTROLLER_CAP,
189 MGMT_OP_READ_EXP_FEATURES_INFO,
190 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
194 static const u16 mgmt_untrusted_events[] = {
196 MGMT_EV_INDEX_REMOVED,
197 MGMT_EV_NEW_SETTINGS,
198 MGMT_EV_CLASS_OF_DEV_CHANGED,
199 MGMT_EV_LOCAL_NAME_CHANGED,
200 MGMT_EV_UNCONF_INDEX_ADDED,
201 MGMT_EV_UNCONF_INDEX_REMOVED,
202 MGMT_EV_NEW_CONFIG_OPTIONS,
203 MGMT_EV_EXT_INDEX_ADDED,
204 MGMT_EV_EXT_INDEX_REMOVED,
205 MGMT_EV_EXT_INFO_CHANGED,
206 MGMT_EV_EXP_FEATURE_CHANGED,
209 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 "\x00\x00\x00\x00\x00\x00\x00\x00"
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
217 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
218 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
219 MGMT_STATUS_FAILED, /* Hardware Failure */
220 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
221 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
222 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
223 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
224 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
225 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
226 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
227 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
228 MGMT_STATUS_BUSY, /* Command Disallowed */
229 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
230 MGMT_STATUS_REJECTED, /* Rejected Security */
231 MGMT_STATUS_REJECTED, /* Rejected Personal */
232 MGMT_STATUS_TIMEOUT, /* Host Timeout */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
234 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
235 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
236 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
237 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
238 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
239 MGMT_STATUS_BUSY, /* Repeated Attempts */
240 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
241 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
242 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
243 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
244 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
245 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
246 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
247 MGMT_STATUS_FAILED, /* Unspecified Error */
248 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
249 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
250 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
251 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
252 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
253 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
254 MGMT_STATUS_FAILED, /* Unit Link Key Used */
255 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
256 MGMT_STATUS_TIMEOUT, /* Instant Passed */
257 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
258 MGMT_STATUS_FAILED, /* Transaction Collision */
259 MGMT_STATUS_FAILED, /* Reserved for future use */
260 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
261 MGMT_STATUS_REJECTED, /* QoS Rejected */
262 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
263 MGMT_STATUS_REJECTED, /* Insufficient Security */
264 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
265 MGMT_STATUS_FAILED, /* Reserved for future use */
266 MGMT_STATUS_BUSY, /* Role Switch Pending */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_FAILED, /* Slot Violation */
269 MGMT_STATUS_FAILED, /* Role Switch Failed */
270 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
271 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
272 MGMT_STATUS_BUSY, /* Host Busy Pairing */
273 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
274 MGMT_STATUS_BUSY, /* Controller Busy */
275 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
276 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
277 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
278 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
279 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
282 static u8 mgmt_status(u8 hci_status)
284 if (hci_status < ARRAY_SIZE(mgmt_status_table))
285 return mgmt_status_table[hci_status];
287 return MGMT_STATUS_FAILED;
290 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
293 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
297 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
298 u16 len, int flag, struct sock *skip_sk)
300 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
304 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
305 struct sock *skip_sk)
307 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
308 HCI_SOCK_TRUSTED, skip_sk);
311 static u8 le_addr_type(u8 mgmt_addr_type)
313 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
314 return ADDR_LE_DEV_PUBLIC;
316 return ADDR_LE_DEV_RANDOM;
319 void mgmt_fill_version_info(void *ver)
321 struct mgmt_rp_read_version *rp = ver;
323 rp->version = MGMT_VERSION;
324 rp->revision = cpu_to_le16(MGMT_REVISION);
327 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
330 struct mgmt_rp_read_version rp;
332 bt_dev_dbg(hdev, "sock %p", sk);
334 mgmt_fill_version_info(&rp);
336 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
340 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
343 struct mgmt_rp_read_commands *rp;
344 u16 num_commands, num_events;
348 bt_dev_dbg(hdev, "sock %p", sk);
350 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
351 num_commands = ARRAY_SIZE(mgmt_commands);
352 num_events = ARRAY_SIZE(mgmt_events);
354 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
355 num_events = ARRAY_SIZE(mgmt_untrusted_events);
358 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
360 rp = kmalloc(rp_size, GFP_KERNEL);
364 rp->num_commands = cpu_to_le16(num_commands);
365 rp->num_events = cpu_to_le16(num_events);
367 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
368 __le16 *opcode = rp->opcodes;
370 for (i = 0; i < num_commands; i++, opcode++)
371 put_unaligned_le16(mgmt_commands[i], opcode);
373 for (i = 0; i < num_events; i++, opcode++)
374 put_unaligned_le16(mgmt_events[i], opcode);
376 __le16 *opcode = rp->opcodes;
378 for (i = 0; i < num_commands; i++, opcode++)
379 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
381 for (i = 0; i < num_events; i++, opcode++)
382 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
385 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
392 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
395 struct mgmt_rp_read_index_list *rp;
401 bt_dev_dbg(hdev, "sock %p", sk);
403 read_lock(&hci_dev_list_lock);
406 list_for_each_entry(d, &hci_dev_list, list) {
407 if (d->dev_type == HCI_PRIMARY &&
408 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
412 rp_len = sizeof(*rp) + (2 * count);
413 rp = kmalloc(rp_len, GFP_ATOMIC);
415 read_unlock(&hci_dev_list_lock);
420 list_for_each_entry(d, &hci_dev_list, list) {
421 if (hci_dev_test_flag(d, HCI_SETUP) ||
422 hci_dev_test_flag(d, HCI_CONFIG) ||
423 hci_dev_test_flag(d, HCI_USER_CHANNEL))
426 /* Devices marked as raw-only are neither configured
427 * nor unconfigured controllers.
429 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
432 if (d->dev_type == HCI_PRIMARY &&
433 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
434 rp->index[count++] = cpu_to_le16(d->id);
435 bt_dev_dbg(hdev, "Added hci%u", d->id);
439 rp->num_controllers = cpu_to_le16(count);
440 rp_len = sizeof(*rp) + (2 * count);
442 read_unlock(&hci_dev_list_lock);
444 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
452 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
453 void *data, u16 data_len)
455 struct mgmt_rp_read_unconf_index_list *rp;
461 bt_dev_dbg(hdev, "sock %p", sk);
463 read_lock(&hci_dev_list_lock);
466 list_for_each_entry(d, &hci_dev_list, list) {
467 if (d->dev_type == HCI_PRIMARY &&
468 hci_dev_test_flag(d, HCI_UNCONFIGURED))
472 rp_len = sizeof(*rp) + (2 * count);
473 rp = kmalloc(rp_len, GFP_ATOMIC);
475 read_unlock(&hci_dev_list_lock);
480 list_for_each_entry(d, &hci_dev_list, list) {
481 if (hci_dev_test_flag(d, HCI_SETUP) ||
482 hci_dev_test_flag(d, HCI_CONFIG) ||
483 hci_dev_test_flag(d, HCI_USER_CHANNEL))
486 /* Devices marked as raw-only are neither configured
487 * nor unconfigured controllers.
489 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
492 if (d->dev_type == HCI_PRIMARY &&
493 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
494 rp->index[count++] = cpu_to_le16(d->id);
495 bt_dev_dbg(hdev, "Added hci%u", d->id);
499 rp->num_controllers = cpu_to_le16(count);
500 rp_len = sizeof(*rp) + (2 * count);
502 read_unlock(&hci_dev_list_lock);
504 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
505 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
512 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
513 void *data, u16 data_len)
515 struct mgmt_rp_read_ext_index_list *rp;
520 bt_dev_dbg(hdev, "sock %p", sk);
522 read_lock(&hci_dev_list_lock);
525 list_for_each_entry(d, &hci_dev_list, list) {
526 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
530 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
532 read_unlock(&hci_dev_list_lock);
537 list_for_each_entry(d, &hci_dev_list, list) {
538 if (hci_dev_test_flag(d, HCI_SETUP) ||
539 hci_dev_test_flag(d, HCI_CONFIG) ||
540 hci_dev_test_flag(d, HCI_USER_CHANNEL))
543 /* Devices marked as raw-only are neither configured
544 * nor unconfigured controllers.
546 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
549 if (d->dev_type == HCI_PRIMARY) {
550 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
551 rp->entry[count].type = 0x01;
553 rp->entry[count].type = 0x00;
554 } else if (d->dev_type == HCI_AMP) {
555 rp->entry[count].type = 0x02;
560 rp->entry[count].bus = d->bus;
561 rp->entry[count++].index = cpu_to_le16(d->id);
562 bt_dev_dbg(hdev, "Added hci%u", d->id);
565 rp->num_controllers = cpu_to_le16(count);
567 read_unlock(&hci_dev_list_lock);
569 /* If this command is called at least once, then all the
570 * default index and unconfigured index events are disabled
571 * and from now on only extended index events are used.
573 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
574 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
575 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
577 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
578 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
579 struct_size(rp, entry, count));
586 static bool is_configured(struct hci_dev *hdev)
588 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
589 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
592 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
593 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
594 !bacmp(&hdev->public_addr, BDADDR_ANY))
600 static __le32 get_missing_options(struct hci_dev *hdev)
604 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
605 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
606 options |= MGMT_OPTION_EXTERNAL_CONFIG;
608 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
609 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
610 !bacmp(&hdev->public_addr, BDADDR_ANY))
611 options |= MGMT_OPTION_PUBLIC_ADDRESS;
613 return cpu_to_le32(options);
616 static int new_options(struct hci_dev *hdev, struct sock *skip)
618 __le32 options = get_missing_options(hdev);
620 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
621 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
624 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
626 __le32 options = get_missing_options(hdev);
628 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
632 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
633 void *data, u16 data_len)
635 struct mgmt_rp_read_config_info rp;
638 bt_dev_dbg(hdev, "sock %p", sk);
642 memset(&rp, 0, sizeof(rp));
643 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
645 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
646 options |= MGMT_OPTION_EXTERNAL_CONFIG;
648 if (hdev->set_bdaddr)
649 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651 rp.supported_options = cpu_to_le32(options);
652 rp.missing_options = get_missing_options(hdev);
654 hci_dev_unlock(hdev);
656 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
660 static u32 get_supported_phys(struct hci_dev *hdev)
662 u32 supported_phys = 0;
664 if (lmp_bredr_capable(hdev)) {
665 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
667 if (hdev->features[0][0] & LMP_3SLOT)
668 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
670 if (hdev->features[0][0] & LMP_5SLOT)
671 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
673 if (lmp_edr_2m_capable(hdev)) {
674 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
676 if (lmp_edr_3slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
679 if (lmp_edr_5slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
682 if (lmp_edr_3m_capable(hdev)) {
683 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
685 if (lmp_edr_3slot_capable(hdev))
686 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
688 if (lmp_edr_5slot_capable(hdev))
689 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
694 if (lmp_le_capable(hdev)) {
695 supported_phys |= MGMT_PHY_LE_1M_TX;
696 supported_phys |= MGMT_PHY_LE_1M_RX;
698 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
699 supported_phys |= MGMT_PHY_LE_2M_TX;
700 supported_phys |= MGMT_PHY_LE_2M_RX;
703 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
704 supported_phys |= MGMT_PHY_LE_CODED_TX;
705 supported_phys |= MGMT_PHY_LE_CODED_RX;
709 return supported_phys;
712 static u32 get_selected_phys(struct hci_dev *hdev)
714 u32 selected_phys = 0;
716 if (lmp_bredr_capable(hdev)) {
717 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
719 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
720 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
722 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
723 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
725 if (lmp_edr_2m_capable(hdev)) {
726 if (!(hdev->pkt_type & HCI_2DH1))
727 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
729 if (lmp_edr_3slot_capable(hdev) &&
730 !(hdev->pkt_type & HCI_2DH3))
731 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
733 if (lmp_edr_5slot_capable(hdev) &&
734 !(hdev->pkt_type & HCI_2DH5))
735 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
737 if (lmp_edr_3m_capable(hdev)) {
738 if (!(hdev->pkt_type & HCI_3DH1))
739 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
741 if (lmp_edr_3slot_capable(hdev) &&
742 !(hdev->pkt_type & HCI_3DH3))
743 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
745 if (lmp_edr_5slot_capable(hdev) &&
746 !(hdev->pkt_type & HCI_3DH5))
747 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
752 if (lmp_le_capable(hdev)) {
753 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
754 selected_phys |= MGMT_PHY_LE_1M_TX;
756 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
757 selected_phys |= MGMT_PHY_LE_1M_RX;
759 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
760 selected_phys |= MGMT_PHY_LE_2M_TX;
762 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
763 selected_phys |= MGMT_PHY_LE_2M_RX;
765 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
766 selected_phys |= MGMT_PHY_LE_CODED_TX;
768 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
769 selected_phys |= MGMT_PHY_LE_CODED_RX;
772 return selected_phys;
775 static u32 get_configurable_phys(struct hci_dev *hdev)
777 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
778 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
781 static u32 get_supported_settings(struct hci_dev *hdev)
785 settings |= MGMT_SETTING_POWERED;
786 settings |= MGMT_SETTING_BONDABLE;
787 settings |= MGMT_SETTING_DEBUG_KEYS;
788 settings |= MGMT_SETTING_CONNECTABLE;
789 settings |= MGMT_SETTING_DISCOVERABLE;
791 if (lmp_bredr_capable(hdev)) {
792 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
793 settings |= MGMT_SETTING_FAST_CONNECTABLE;
794 settings |= MGMT_SETTING_BREDR;
795 settings |= MGMT_SETTING_LINK_SECURITY;
797 if (lmp_ssp_capable(hdev)) {
798 settings |= MGMT_SETTING_SSP;
799 if (IS_ENABLED(CONFIG_BT_HS))
800 settings |= MGMT_SETTING_HS;
803 if (lmp_sc_capable(hdev))
804 settings |= MGMT_SETTING_SECURE_CONN;
806 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
808 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
811 if (lmp_le_capable(hdev)) {
812 settings |= MGMT_SETTING_LE;
813 settings |= MGMT_SETTING_SECURE_CONN;
814 settings |= MGMT_SETTING_PRIVACY;
815 settings |= MGMT_SETTING_STATIC_ADDRESS;
817 /* When the experimental feature for LL Privacy support is
818 * enabled, then advertising is no longer supported.
820 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
821 settings |= MGMT_SETTING_ADVERTISING;
824 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
826 settings |= MGMT_SETTING_CONFIGURATION;
828 settings |= MGMT_SETTING_PHY_CONFIGURATION;
833 static u32 get_current_settings(struct hci_dev *hdev)
837 if (hdev_is_powered(hdev))
838 settings |= MGMT_SETTING_POWERED;
840 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
841 settings |= MGMT_SETTING_CONNECTABLE;
843 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
844 settings |= MGMT_SETTING_FAST_CONNECTABLE;
846 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
847 settings |= MGMT_SETTING_DISCOVERABLE;
849 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
850 settings |= MGMT_SETTING_BONDABLE;
852 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
853 settings |= MGMT_SETTING_BREDR;
855 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
856 settings |= MGMT_SETTING_LE;
858 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
859 settings |= MGMT_SETTING_LINK_SECURITY;
861 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
862 settings |= MGMT_SETTING_SSP;
864 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
865 settings |= MGMT_SETTING_HS;
867 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
868 settings |= MGMT_SETTING_ADVERTISING;
870 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
871 settings |= MGMT_SETTING_SECURE_CONN;
873 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
874 settings |= MGMT_SETTING_DEBUG_KEYS;
876 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
877 settings |= MGMT_SETTING_PRIVACY;
879 /* The current setting for static address has two purposes. The
880 * first is to indicate if the static address will be used and
881 * the second is to indicate if it is actually set.
883 * This means if the static address is not configured, this flag
884 * will never be set. If the address is configured, then if the
885 * address is actually used decides if the flag is set or not.
887 * For single mode LE only controllers and dual-mode controllers
888 * with BR/EDR disabled, the existence of the static address will
891 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
892 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
893 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
894 if (bacmp(&hdev->static_addr, BDADDR_ANY))
895 settings |= MGMT_SETTING_STATIC_ADDRESS;
898 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
899 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
904 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
906 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
909 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
910 struct hci_dev *hdev,
913 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
916 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
918 struct mgmt_pending_cmd *cmd;
920 /* If there's a pending mgmt command the flags will not yet have
921 * their final values, so check for this first.
923 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
925 struct mgmt_mode *cp = cmd->param;
927 return LE_AD_GENERAL;
928 else if (cp->val == 0x02)
929 return LE_AD_LIMITED;
931 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
932 return LE_AD_LIMITED;
933 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
934 return LE_AD_GENERAL;
940 bool mgmt_get_connectable(struct hci_dev *hdev)
942 struct mgmt_pending_cmd *cmd;
944 /* If there's a pending mgmt command the flag will not yet have
945 * it's final value, so check for this first.
947 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
949 struct mgmt_mode *cp = cmd->param;
954 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
957 static void service_cache_off(struct work_struct *work)
959 struct hci_dev *hdev = container_of(work, struct hci_dev,
961 struct hci_request req;
963 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
966 hci_req_init(&req, hdev);
970 __hci_req_update_eir(&req);
971 __hci_req_update_class(&req);
973 hci_dev_unlock(hdev);
975 hci_req_run(&req, NULL);
978 static void rpa_expired(struct work_struct *work)
980 struct hci_dev *hdev = container_of(work, struct hci_dev,
982 struct hci_request req;
984 bt_dev_dbg(hdev, "");
986 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
988 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
991 /* The generation of a new RPA and programming it into the
992 * controller happens in the hci_req_enable_advertising()
995 hci_req_init(&req, hdev);
996 if (ext_adv_capable(hdev))
997 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
999 __hci_req_enable_advertising(&req);
1000 hci_req_run(&req, NULL);
1003 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1005 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1008 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1009 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1011 /* Non-mgmt controlled devices get this bit set
1012 * implicitly so that pairing works for them, however
1013 * for mgmt we require user-space to explicitly enable
1016 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1019 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1020 void *data, u16 data_len)
1022 struct mgmt_rp_read_info rp;
1024 bt_dev_dbg(hdev, "sock %p", sk);
1028 memset(&rp, 0, sizeof(rp));
1030 bacpy(&rp.bdaddr, &hdev->bdaddr);
1032 rp.version = hdev->hci_ver;
1033 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1035 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1036 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1038 memcpy(rp.dev_class, hdev->dev_class, 3);
1040 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1041 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1043 hci_dev_unlock(hdev);
1045 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1049 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1054 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1055 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1056 hdev->dev_class, 3);
1058 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1059 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1062 name_len = strlen(hdev->dev_name);
1063 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1064 hdev->dev_name, name_len);
1066 name_len = strlen(hdev->short_name);
1067 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1068 hdev->short_name, name_len);
1073 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1074 void *data, u16 data_len)
1077 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1080 bt_dev_dbg(hdev, "sock %p", sk);
1082 memset(&buf, 0, sizeof(buf));
1086 bacpy(&rp->bdaddr, &hdev->bdaddr);
1088 rp->version = hdev->hci_ver;
1089 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1091 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1092 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1095 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1096 rp->eir_len = cpu_to_le16(eir_len);
1098 hci_dev_unlock(hdev);
1100 /* If this command is called at least once, then the events
1101 * for class of device and local name changes are disabled
1102 * and only the new extended controller information event
1105 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1106 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1107 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1109 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1110 sizeof(*rp) + eir_len);
1113 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1116 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1119 memset(buf, 0, sizeof(buf));
1121 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1122 ev->eir_len = cpu_to_le16(eir_len);
1124 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1125 sizeof(*ev) + eir_len,
1126 HCI_MGMT_EXT_INFO_EVENTS, skip);
1129 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1131 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1133 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1137 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1139 bt_dev_dbg(hdev, "status 0x%02x", status);
1141 if (hci_conn_count(hdev) == 0) {
1142 cancel_delayed_work(&hdev->power_off);
1143 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1147 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1149 struct mgmt_ev_advertising_added ev;
1151 ev.instance = instance;
1153 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1156 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1159 struct mgmt_ev_advertising_removed ev;
1161 ev.instance = instance;
1163 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1166 static void cancel_adv_timeout(struct hci_dev *hdev)
1168 if (hdev->adv_instance_timeout) {
1169 hdev->adv_instance_timeout = 0;
1170 cancel_delayed_work(&hdev->adv_instance_expire);
1174 static int clean_up_hci_state(struct hci_dev *hdev)
1176 struct hci_request req;
1177 struct hci_conn *conn;
1178 bool discov_stopped;
1181 hci_req_init(&req, hdev);
1183 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1184 test_bit(HCI_PSCAN, &hdev->flags)) {
1186 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1189 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1191 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1192 __hci_req_disable_advertising(&req);
1194 discov_stopped = hci_req_stop_discovery(&req);
1196 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1197 /* 0x15 == Terminated due to Power Off */
1198 __hci_abort_conn(&req, conn, 0x15);
1201 err = hci_req_run(&req, clean_up_hci_complete);
1202 if (!err && discov_stopped)
1203 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1208 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1211 struct mgmt_mode *cp = data;
1212 struct mgmt_pending_cmd *cmd;
1215 bt_dev_dbg(hdev, "sock %p", sk);
1217 if (cp->val != 0x00 && cp->val != 0x01)
1218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1219 MGMT_STATUS_INVALID_PARAMS);
1223 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1224 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1229 if (!!cp->val == hdev_is_powered(hdev)) {
1230 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1234 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1241 queue_work(hdev->req_workqueue, &hdev->power_on);
1244 /* Disconnect connections, stop scans, etc */
1245 err = clean_up_hci_state(hdev);
1247 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1248 HCI_POWER_OFF_TIMEOUT);
1250 /* ENODATA means there were no HCI commands queued */
1251 if (err == -ENODATA) {
1252 cancel_delayed_work(&hdev->power_off);
1253 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1259 hci_dev_unlock(hdev);
1263 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1265 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1267 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1268 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1271 int mgmt_new_settings(struct hci_dev *hdev)
1273 return new_settings(hdev, NULL);
1278 struct hci_dev *hdev;
1282 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1284 struct cmd_lookup *match = data;
1286 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1288 list_del(&cmd->list);
1290 if (match->sk == NULL) {
1291 match->sk = cmd->sk;
1292 sock_hold(match->sk);
1295 mgmt_pending_free(cmd);
1298 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1302 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1303 mgmt_pending_remove(cmd);
1306 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1308 if (cmd->cmd_complete) {
1311 cmd->cmd_complete(cmd, *status);
1312 mgmt_pending_remove(cmd);
1317 cmd_status_rsp(cmd, data);
1320 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1322 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1323 cmd->param, cmd->param_len);
1326 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1328 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1329 cmd->param, sizeof(struct mgmt_addr_info));
1332 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1334 if (!lmp_bredr_capable(hdev))
1335 return MGMT_STATUS_NOT_SUPPORTED;
1336 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1337 return MGMT_STATUS_REJECTED;
1339 return MGMT_STATUS_SUCCESS;
1342 static u8 mgmt_le_support(struct hci_dev *hdev)
1344 if (!lmp_le_capable(hdev))
1345 return MGMT_STATUS_NOT_SUPPORTED;
1346 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1347 return MGMT_STATUS_REJECTED;
1349 return MGMT_STATUS_SUCCESS;
1352 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1354 struct mgmt_pending_cmd *cmd;
1356 bt_dev_dbg(hdev, "status 0x%02x", status);
1360 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1365 u8 mgmt_err = mgmt_status(status);
1366 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1367 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1371 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1372 hdev->discov_timeout > 0) {
1373 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1374 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1377 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1378 new_settings(hdev, cmd->sk);
1381 mgmt_pending_remove(cmd);
1384 hci_dev_unlock(hdev);
1387 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1390 struct mgmt_cp_set_discoverable *cp = data;
1391 struct mgmt_pending_cmd *cmd;
1395 bt_dev_dbg(hdev, "sock %p", sk);
1397 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1398 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1400 MGMT_STATUS_REJECTED);
1402 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1404 MGMT_STATUS_INVALID_PARAMS);
1406 timeout = __le16_to_cpu(cp->timeout);
1408 /* Disabling discoverable requires that no timeout is set,
1409 * and enabling limited discoverable requires a timeout.
1411 if ((cp->val == 0x00 && timeout > 0) ||
1412 (cp->val == 0x02 && timeout == 0))
1413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1414 MGMT_STATUS_INVALID_PARAMS);
1418 if (!hdev_is_powered(hdev) && timeout > 0) {
1419 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1420 MGMT_STATUS_NOT_POWERED);
1424 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1425 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1431 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1432 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1433 MGMT_STATUS_REJECTED);
1437 if (hdev->advertising_paused) {
1438 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1443 if (!hdev_is_powered(hdev)) {
1444 bool changed = false;
1446 /* Setting limited discoverable when powered off is
1447 * not a valid operation since it requires a timeout
1448 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1450 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1451 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1455 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1460 err = new_settings(hdev, sk);
1465 /* If the current mode is the same, then just update the timeout
1466 * value with the new value. And if only the timeout gets updated,
1467 * then no need for any HCI transactions.
1469 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1470 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1471 HCI_LIMITED_DISCOVERABLE)) {
1472 cancel_delayed_work(&hdev->discov_off);
1473 hdev->discov_timeout = timeout;
1475 if (cp->val && hdev->discov_timeout > 0) {
1476 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1477 queue_delayed_work(hdev->req_workqueue,
1478 &hdev->discov_off, to);
1481 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1485 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1491 /* Cancel any potential discoverable timeout that might be
1492 * still active and store new timeout value. The arming of
1493 * the timeout happens in the complete handler.
1495 cancel_delayed_work(&hdev->discov_off);
1496 hdev->discov_timeout = timeout;
1499 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1501 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1503 /* Limited discoverable mode */
1504 if (cp->val == 0x02)
1505 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1507 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1509 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1513 hci_dev_unlock(hdev);
1517 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1519 struct mgmt_pending_cmd *cmd;
1521 bt_dev_dbg(hdev, "status 0x%02x", status);
1525 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1530 u8 mgmt_err = mgmt_status(status);
1531 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1535 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1536 new_settings(hdev, cmd->sk);
1539 mgmt_pending_remove(cmd);
1542 hci_dev_unlock(hdev);
1545 static int set_connectable_update_settings(struct hci_dev *hdev,
1546 struct sock *sk, u8 val)
1548 bool changed = false;
1551 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1555 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1557 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1558 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1561 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1566 hci_req_update_scan(hdev);
1567 hci_update_background_scan(hdev);
1568 return new_settings(hdev, sk);
1574 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1577 struct mgmt_mode *cp = data;
1578 struct mgmt_pending_cmd *cmd;
1581 bt_dev_dbg(hdev, "sock %p", sk);
1583 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1584 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1585 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1586 MGMT_STATUS_REJECTED);
1588 if (cp->val != 0x00 && cp->val != 0x01)
1589 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1590 MGMT_STATUS_INVALID_PARAMS);
1594 if (!hdev_is_powered(hdev)) {
1595 err = set_connectable_update_settings(hdev, sk, cp->val);
1599 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1600 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1601 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1606 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1613 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1615 if (hdev->discov_timeout > 0)
1616 cancel_delayed_work(&hdev->discov_off);
1618 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1619 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1620 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1623 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1627 hci_dev_unlock(hdev);
1631 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1634 struct mgmt_mode *cp = data;
1638 bt_dev_dbg(hdev, "sock %p", sk);
1640 if (cp->val != 0x00 && cp->val != 0x01)
1641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1642 MGMT_STATUS_INVALID_PARAMS);
1647 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1649 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1651 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1656 /* In limited privacy mode the change of bondable mode
1657 * may affect the local advertising address.
1659 if (hdev_is_powered(hdev) &&
1660 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1661 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1662 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1663 queue_work(hdev->req_workqueue,
1664 &hdev->discoverable_update);
1666 err = new_settings(hdev, sk);
1670 hci_dev_unlock(hdev);
1674 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1677 struct mgmt_mode *cp = data;
1678 struct mgmt_pending_cmd *cmd;
1682 bt_dev_dbg(hdev, "sock %p", sk);
1684 status = mgmt_bredr_support(hdev);
1686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1689 if (cp->val != 0x00 && cp->val != 0x01)
1690 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1691 MGMT_STATUS_INVALID_PARAMS);
1695 if (!hdev_is_powered(hdev)) {
1696 bool changed = false;
1698 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1699 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1703 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1708 err = new_settings(hdev, sk);
1713 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1721 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1722 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1726 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1732 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1734 mgmt_pending_remove(cmd);
1739 hci_dev_unlock(hdev);
1743 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1745 struct mgmt_mode *cp = data;
1746 struct mgmt_pending_cmd *cmd;
1750 bt_dev_dbg(hdev, "sock %p", sk);
1752 status = mgmt_bredr_support(hdev);
1754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1756 if (!lmp_ssp_capable(hdev))
1757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1758 MGMT_STATUS_NOT_SUPPORTED);
1760 if (cp->val != 0x00 && cp->val != 0x01)
1761 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1762 MGMT_STATUS_INVALID_PARAMS);
1766 if (!hdev_is_powered(hdev)) {
1770 changed = !hci_dev_test_and_set_flag(hdev,
1773 changed = hci_dev_test_and_clear_flag(hdev,
1776 changed = hci_dev_test_and_clear_flag(hdev,
1779 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1782 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1787 err = new_settings(hdev, sk);
1792 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1793 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1798 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1799 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1803 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1809 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1810 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1811 sizeof(cp->val), &cp->val);
1813 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1815 mgmt_pending_remove(cmd);
1820 hci_dev_unlock(hdev);
1824 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1826 struct mgmt_mode *cp = data;
1831 bt_dev_dbg(hdev, "sock %p", sk);
1833 if (!IS_ENABLED(CONFIG_BT_HS))
1834 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1835 MGMT_STATUS_NOT_SUPPORTED);
1837 status = mgmt_bredr_support(hdev);
1839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1841 if (!lmp_ssp_capable(hdev))
1842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1843 MGMT_STATUS_NOT_SUPPORTED);
1845 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1847 MGMT_STATUS_REJECTED);
1849 if (cp->val != 0x00 && cp->val != 0x01)
1850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1851 MGMT_STATUS_INVALID_PARAMS);
1855 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1862 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1864 if (hdev_is_powered(hdev)) {
1865 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1866 MGMT_STATUS_REJECTED);
1870 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1873 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1878 err = new_settings(hdev, sk);
1881 hci_dev_unlock(hdev);
1885 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1887 struct cmd_lookup match = { NULL, hdev };
1892 u8 mgmt_err = mgmt_status(status);
1894 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1899 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1901 new_settings(hdev, match.sk);
1906 /* Make sure the controller has a good default for
1907 * advertising data. Restrict the update to when LE
1908 * has actually been enabled. During power on, the
1909 * update in powered_update_hci will take care of it.
1911 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1912 struct hci_request req;
1913 hci_req_init(&req, hdev);
1914 if (ext_adv_capable(hdev)) {
1917 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1919 __hci_req_update_scan_rsp_data(&req, 0x00);
1921 __hci_req_update_adv_data(&req, 0x00);
1922 __hci_req_update_scan_rsp_data(&req, 0x00);
1924 hci_req_run(&req, NULL);
1925 hci_update_background_scan(hdev);
1929 hci_dev_unlock(hdev);
1932 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1934 struct mgmt_mode *cp = data;
1935 struct hci_cp_write_le_host_supported hci_cp;
1936 struct mgmt_pending_cmd *cmd;
1937 struct hci_request req;
1941 bt_dev_dbg(hdev, "sock %p", sk);
1943 if (!lmp_le_capable(hdev))
1944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1945 MGMT_STATUS_NOT_SUPPORTED);
1947 if (cp->val != 0x00 && cp->val != 0x01)
1948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1949 MGMT_STATUS_INVALID_PARAMS);
1951 /* Bluetooth single mode LE only controllers or dual-mode
1952 * controllers configured as LE only devices, do not allow
1953 * switching LE off. These have either LE enabled explicitly
1954 * or BR/EDR has been previously switched off.
1956 * When trying to enable an already enabled LE, then gracefully
1957 * send a positive response. Trying to disable it however will
1958 * result into rejection.
1960 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1961 if (cp->val == 0x01)
1962 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1965 MGMT_STATUS_REJECTED);
1971 enabled = lmp_host_le_capable(hdev);
1974 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1976 if (!hdev_is_powered(hdev) || val == enabled) {
1977 bool changed = false;
1979 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1980 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1984 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1985 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1989 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1994 err = new_settings(hdev, sk);
1999 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2000 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2006 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2012 hci_req_init(&req, hdev);
2014 memset(&hci_cp, 0, sizeof(hci_cp));
2018 hci_cp.simul = 0x00;
2020 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2021 __hci_req_disable_advertising(&req);
2023 if (ext_adv_capable(hdev))
2024 __hci_req_clear_ext_adv_sets(&req);
2027 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2030 err = hci_req_run(&req, le_enable_complete);
2032 mgmt_pending_remove(cmd);
2035 hci_dev_unlock(hdev);
2039 /* This is a helper function to test for pending mgmt commands that can
2040 * cause CoD or EIR HCI commands. We can only allow one such pending
2041 * mgmt command at a time since otherwise we cannot easily track what
2042 * the current values are, will be, and based on that calculate if a new
2043 * HCI command needs to be sent and if yes with what value.
2045 static bool pending_eir_or_class(struct hci_dev *hdev)
2047 struct mgmt_pending_cmd *cmd;
2049 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2050 switch (cmd->opcode) {
2051 case MGMT_OP_ADD_UUID:
2052 case MGMT_OP_REMOVE_UUID:
2053 case MGMT_OP_SET_DEV_CLASS:
2054 case MGMT_OP_SET_POWERED:
2062 static const u8 bluetooth_base_uuid[] = {
2063 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2064 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2067 static u8 get_uuid_size(const u8 *uuid)
2071 if (memcmp(uuid, bluetooth_base_uuid, 12))
2074 val = get_unaligned_le32(&uuid[12]);
2081 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2083 struct mgmt_pending_cmd *cmd;
2087 cmd = pending_find(mgmt_op, hdev);
2091 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2092 mgmt_status(status), hdev->dev_class, 3);
2094 mgmt_pending_remove(cmd);
2097 hci_dev_unlock(hdev);
2100 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2102 bt_dev_dbg(hdev, "status 0x%02x", status);
2104 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2107 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2109 struct mgmt_cp_add_uuid *cp = data;
2110 struct mgmt_pending_cmd *cmd;
2111 struct hci_request req;
2112 struct bt_uuid *uuid;
2115 bt_dev_dbg(hdev, "sock %p", sk);
2119 if (pending_eir_or_class(hdev)) {
2120 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2125 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2131 memcpy(uuid->uuid, cp->uuid, 16);
2132 uuid->svc_hint = cp->svc_hint;
2133 uuid->size = get_uuid_size(cp->uuid);
2135 list_add_tail(&uuid->list, &hdev->uuids);
2137 hci_req_init(&req, hdev);
2139 __hci_req_update_class(&req);
2140 __hci_req_update_eir(&req);
2142 err = hci_req_run(&req, add_uuid_complete);
2144 if (err != -ENODATA)
2147 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2148 hdev->dev_class, 3);
2152 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2161 hci_dev_unlock(hdev);
2165 static bool enable_service_cache(struct hci_dev *hdev)
2167 if (!hdev_is_powered(hdev))
2170 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2171 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2179 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2181 bt_dev_dbg(hdev, "status 0x%02x", status);
2183 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2186 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2189 struct mgmt_cp_remove_uuid *cp = data;
2190 struct mgmt_pending_cmd *cmd;
2191 struct bt_uuid *match, *tmp;
2192 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2193 struct hci_request req;
2196 bt_dev_dbg(hdev, "sock %p", sk);
2200 if (pending_eir_or_class(hdev)) {
2201 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2206 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2207 hci_uuids_clear(hdev);
2209 if (enable_service_cache(hdev)) {
2210 err = mgmt_cmd_complete(sk, hdev->id,
2211 MGMT_OP_REMOVE_UUID,
2212 0, hdev->dev_class, 3);
2221 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2222 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2225 list_del(&match->list);
2231 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2232 MGMT_STATUS_INVALID_PARAMS);
2237 hci_req_init(&req, hdev);
2239 __hci_req_update_class(&req);
2240 __hci_req_update_eir(&req);
2242 err = hci_req_run(&req, remove_uuid_complete);
2244 if (err != -ENODATA)
2247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2248 hdev->dev_class, 3);
2252 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2261 hci_dev_unlock(hdev);
2265 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2267 bt_dev_dbg(hdev, "status 0x%02x", status);
2269 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2272 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2275 struct mgmt_cp_set_dev_class *cp = data;
2276 struct mgmt_pending_cmd *cmd;
2277 struct hci_request req;
2280 bt_dev_dbg(hdev, "sock %p", sk);
2282 if (!lmp_bredr_capable(hdev))
2283 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2284 MGMT_STATUS_NOT_SUPPORTED);
2288 if (pending_eir_or_class(hdev)) {
2289 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2294 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2295 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2296 MGMT_STATUS_INVALID_PARAMS);
2300 hdev->major_class = cp->major;
2301 hdev->minor_class = cp->minor;
2303 if (!hdev_is_powered(hdev)) {
2304 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2305 hdev->dev_class, 3);
2309 hci_req_init(&req, hdev);
2311 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2312 hci_dev_unlock(hdev);
2313 cancel_delayed_work_sync(&hdev->service_cache);
2315 __hci_req_update_eir(&req);
2318 __hci_req_update_class(&req);
2320 err = hci_req_run(&req, set_class_complete);
2322 if (err != -ENODATA)
2325 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2326 hdev->dev_class, 3);
2330 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2339 hci_dev_unlock(hdev);
2343 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2346 struct mgmt_cp_load_link_keys *cp = data;
2347 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2348 sizeof(struct mgmt_link_key_info));
2349 u16 key_count, expected_len;
2353 bt_dev_dbg(hdev, "sock %p", sk);
2355 if (!lmp_bredr_capable(hdev))
2356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2357 MGMT_STATUS_NOT_SUPPORTED);
2359 key_count = __le16_to_cpu(cp->key_count);
2360 if (key_count > max_key_count) {
2361 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2364 MGMT_STATUS_INVALID_PARAMS);
2367 expected_len = struct_size(cp, keys, key_count);
2368 if (expected_len != len) {
2369 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2372 MGMT_STATUS_INVALID_PARAMS);
2375 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2376 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2377 MGMT_STATUS_INVALID_PARAMS);
2379 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2382 for (i = 0; i < key_count; i++) {
2383 struct mgmt_link_key_info *key = &cp->keys[i];
2385 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2386 return mgmt_cmd_status(sk, hdev->id,
2387 MGMT_OP_LOAD_LINK_KEYS,
2388 MGMT_STATUS_INVALID_PARAMS);
2393 hci_link_keys_clear(hdev);
2396 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2398 changed = hci_dev_test_and_clear_flag(hdev,
2399 HCI_KEEP_DEBUG_KEYS);
2402 new_settings(hdev, NULL);
2404 for (i = 0; i < key_count; i++) {
2405 struct mgmt_link_key_info *key = &cp->keys[i];
2407 if (hci_is_blocked_key(hdev,
2408 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2410 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2415 /* Always ignore debug keys and require a new pairing if
2416 * the user wants to use them.
2418 if (key->type == HCI_LK_DEBUG_COMBINATION)
2421 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2422 key->type, key->pin_len, NULL);
2425 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2427 hci_dev_unlock(hdev);
2432 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2433 u8 addr_type, struct sock *skip_sk)
2435 struct mgmt_ev_device_unpaired ev;
2437 bacpy(&ev.addr.bdaddr, bdaddr);
2438 ev.addr.type = addr_type;
2440 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2444 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2447 struct mgmt_cp_unpair_device *cp = data;
2448 struct mgmt_rp_unpair_device rp;
2449 struct hci_conn_params *params;
2450 struct mgmt_pending_cmd *cmd;
2451 struct hci_conn *conn;
2455 memset(&rp, 0, sizeof(rp));
2456 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2457 rp.addr.type = cp->addr.type;
2459 if (!bdaddr_type_is_valid(cp->addr.type))
2460 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2461 MGMT_STATUS_INVALID_PARAMS,
2464 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2465 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2466 MGMT_STATUS_INVALID_PARAMS,
2471 if (!hdev_is_powered(hdev)) {
2472 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2473 MGMT_STATUS_NOT_POWERED, &rp,
2478 if (cp->addr.type == BDADDR_BREDR) {
2479 /* If disconnection is requested, then look up the
2480 * connection. If the remote device is connected, it
2481 * will be later used to terminate the link.
2483 * Setting it to NULL explicitly will cause no
2484 * termination of the link.
2487 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2492 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2494 err = mgmt_cmd_complete(sk, hdev->id,
2495 MGMT_OP_UNPAIR_DEVICE,
2496 MGMT_STATUS_NOT_PAIRED, &rp,
2504 /* LE address type */
2505 addr_type = le_addr_type(cp->addr.type);
2507 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2508 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2510 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2511 MGMT_STATUS_NOT_PAIRED, &rp,
2516 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2518 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2523 /* Defer clearing up the connection parameters until closing to
2524 * give a chance of keeping them if a repairing happens.
2526 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2528 /* Disable auto-connection parameters if present */
2529 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2531 if (params->explicit_connect)
2532 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2534 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2537 /* If disconnection is not requested, then clear the connection
2538 * variable so that the link is not terminated.
2540 if (!cp->disconnect)
2544 /* If the connection variable is set, then termination of the
2545 * link is requested.
2548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2550 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2554 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2561 cmd->cmd_complete = addr_cmd_complete;
2563 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2565 mgmt_pending_remove(cmd);
2568 hci_dev_unlock(hdev);
2572 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2575 struct mgmt_cp_disconnect *cp = data;
2576 struct mgmt_rp_disconnect rp;
2577 struct mgmt_pending_cmd *cmd;
2578 struct hci_conn *conn;
2581 bt_dev_dbg(hdev, "sock %p", sk);
2583 memset(&rp, 0, sizeof(rp));
2584 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2585 rp.addr.type = cp->addr.type;
2587 if (!bdaddr_type_is_valid(cp->addr.type))
2588 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2589 MGMT_STATUS_INVALID_PARAMS,
2594 if (!test_bit(HCI_UP, &hdev->flags)) {
2595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2596 MGMT_STATUS_NOT_POWERED, &rp,
2601 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2602 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2603 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2607 if (cp->addr.type == BDADDR_BREDR)
2608 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2611 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2612 le_addr_type(cp->addr.type));
2614 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2615 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2616 MGMT_STATUS_NOT_CONNECTED, &rp,
2621 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2627 cmd->cmd_complete = generic_cmd_complete;
2629 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2631 mgmt_pending_remove(cmd);
2634 hci_dev_unlock(hdev);
2638 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2640 switch (link_type) {
2642 switch (addr_type) {
2643 case ADDR_LE_DEV_PUBLIC:
2644 return BDADDR_LE_PUBLIC;
2647 /* Fallback to LE Random address type */
2648 return BDADDR_LE_RANDOM;
2652 /* Fallback to BR/EDR type */
2653 return BDADDR_BREDR;
2657 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2660 struct mgmt_rp_get_connections *rp;
2665 bt_dev_dbg(hdev, "sock %p", sk);
2669 if (!hdev_is_powered(hdev)) {
2670 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2671 MGMT_STATUS_NOT_POWERED);
2676 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2677 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2681 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2688 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2689 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2691 bacpy(&rp->addr[i].bdaddr, &c->dst);
2692 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2693 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2698 rp->conn_count = cpu_to_le16(i);
2700 /* Recalculate length in case of filtered SCO connections, etc */
2701 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2702 struct_size(rp, addr, i));
2707 hci_dev_unlock(hdev);
2711 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2712 struct mgmt_cp_pin_code_neg_reply *cp)
2714 struct mgmt_pending_cmd *cmd;
2717 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2722 cmd->cmd_complete = addr_cmd_complete;
2724 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2725 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2727 mgmt_pending_remove(cmd);
2732 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2735 struct hci_conn *conn;
2736 struct mgmt_cp_pin_code_reply *cp = data;
2737 struct hci_cp_pin_code_reply reply;
2738 struct mgmt_pending_cmd *cmd;
2741 bt_dev_dbg(hdev, "sock %p", sk);
2745 if (!hdev_is_powered(hdev)) {
2746 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2747 MGMT_STATUS_NOT_POWERED);
2751 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2753 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2754 MGMT_STATUS_NOT_CONNECTED);
2758 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2759 struct mgmt_cp_pin_code_neg_reply ncp;
2761 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2763 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2765 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2767 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2768 MGMT_STATUS_INVALID_PARAMS);
2773 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2779 cmd->cmd_complete = addr_cmd_complete;
2781 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2782 reply.pin_len = cp->pin_len;
2783 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2785 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2787 mgmt_pending_remove(cmd);
2790 hci_dev_unlock(hdev);
2794 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2797 struct mgmt_cp_set_io_capability *cp = data;
2799 bt_dev_dbg(hdev, "sock %p", sk);
2801 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2803 MGMT_STATUS_INVALID_PARAMS);
2807 hdev->io_capability = cp->io_capability;
2809 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2811 hci_dev_unlock(hdev);
2813 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2817 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2819 struct hci_dev *hdev = conn->hdev;
2820 struct mgmt_pending_cmd *cmd;
2822 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2823 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2826 if (cmd->user_data != conn)
2835 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2837 struct mgmt_rp_pair_device rp;
2838 struct hci_conn *conn = cmd->user_data;
2841 bacpy(&rp.addr.bdaddr, &conn->dst);
2842 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2844 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2845 status, &rp, sizeof(rp));
2847 /* So we don't get further callbacks for this connection */
2848 conn->connect_cfm_cb = NULL;
2849 conn->security_cfm_cb = NULL;
2850 conn->disconn_cfm_cb = NULL;
2852 hci_conn_drop(conn);
2854 /* The device is paired so there is no need to remove
2855 * its connection parameters anymore.
2857 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2864 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2866 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2867 struct mgmt_pending_cmd *cmd;
2869 cmd = find_pairing(conn);
2871 cmd->cmd_complete(cmd, status);
2872 mgmt_pending_remove(cmd);
2876 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2878 struct mgmt_pending_cmd *cmd;
2880 BT_DBG("status %u", status);
2882 cmd = find_pairing(conn);
2884 BT_DBG("Unable to find a pending command");
2888 cmd->cmd_complete(cmd, mgmt_status(status));
2889 mgmt_pending_remove(cmd);
2892 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2894 struct mgmt_pending_cmd *cmd;
2896 BT_DBG("status %u", status);
2901 cmd = find_pairing(conn);
2903 BT_DBG("Unable to find a pending command");
2907 cmd->cmd_complete(cmd, mgmt_status(status));
2908 mgmt_pending_remove(cmd);
2911 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2914 struct mgmt_cp_pair_device *cp = data;
2915 struct mgmt_rp_pair_device rp;
2916 struct mgmt_pending_cmd *cmd;
2917 u8 sec_level, auth_type;
2918 struct hci_conn *conn;
2921 bt_dev_dbg(hdev, "sock %p", sk);
2923 memset(&rp, 0, sizeof(rp));
2924 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2925 rp.addr.type = cp->addr.type;
2927 if (!bdaddr_type_is_valid(cp->addr.type))
2928 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2929 MGMT_STATUS_INVALID_PARAMS,
2932 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2933 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2934 MGMT_STATUS_INVALID_PARAMS,
2939 if (!hdev_is_powered(hdev)) {
2940 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2941 MGMT_STATUS_NOT_POWERED, &rp,
2946 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2947 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2948 MGMT_STATUS_ALREADY_PAIRED, &rp,
2953 sec_level = BT_SECURITY_MEDIUM;
2954 auth_type = HCI_AT_DEDICATED_BONDING;
2956 if (cp->addr.type == BDADDR_BREDR) {
2957 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2958 auth_type, CONN_REASON_PAIR_DEVICE);
2960 u8 addr_type = le_addr_type(cp->addr.type);
2961 struct hci_conn_params *p;
2963 /* When pairing a new device, it is expected to remember
2964 * this device for future connections. Adding the connection
2965 * parameter information ahead of time allows tracking
2966 * of the peripheral preferred values and will speed up any
2967 * further connection establishment.
2969 * If connection parameters already exist, then they
2970 * will be kept and this function does nothing.
2972 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2974 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2975 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2977 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2978 sec_level, HCI_LE_CONN_TIMEOUT,
2979 CONN_REASON_PAIR_DEVICE);
2985 if (PTR_ERR(conn) == -EBUSY)
2986 status = MGMT_STATUS_BUSY;
2987 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2988 status = MGMT_STATUS_NOT_SUPPORTED;
2989 else if (PTR_ERR(conn) == -ECONNREFUSED)
2990 status = MGMT_STATUS_REJECTED;
2992 status = MGMT_STATUS_CONNECT_FAILED;
2994 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2995 status, &rp, sizeof(rp));
2999 if (conn->connect_cfm_cb) {
3000 hci_conn_drop(conn);
3001 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3002 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3006 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3009 hci_conn_drop(conn);
3013 cmd->cmd_complete = pairing_complete;
3015 /* For LE, just connecting isn't a proof that the pairing finished */
3016 if (cp->addr.type == BDADDR_BREDR) {
3017 conn->connect_cfm_cb = pairing_complete_cb;
3018 conn->security_cfm_cb = pairing_complete_cb;
3019 conn->disconn_cfm_cb = pairing_complete_cb;
3021 conn->connect_cfm_cb = le_pairing_complete_cb;
3022 conn->security_cfm_cb = le_pairing_complete_cb;
3023 conn->disconn_cfm_cb = le_pairing_complete_cb;
3026 conn->io_capability = cp->io_cap;
3027 cmd->user_data = hci_conn_get(conn);
3029 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3030 hci_conn_security(conn, sec_level, auth_type, true)) {
3031 cmd->cmd_complete(cmd, 0);
3032 mgmt_pending_remove(cmd);
3038 hci_dev_unlock(hdev);
3042 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3045 struct mgmt_addr_info *addr = data;
3046 struct mgmt_pending_cmd *cmd;
3047 struct hci_conn *conn;
3050 bt_dev_dbg(hdev, "sock %p", sk);
3054 if (!hdev_is_powered(hdev)) {
3055 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3056 MGMT_STATUS_NOT_POWERED);
3060 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3062 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3063 MGMT_STATUS_INVALID_PARAMS);
3067 conn = cmd->user_data;
3069 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3070 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3071 MGMT_STATUS_INVALID_PARAMS);
3075 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3076 mgmt_pending_remove(cmd);
3078 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3079 addr, sizeof(*addr));
3081 /* Since user doesn't want to proceed with the connection, abort any
3082 * ongoing pairing and then terminate the link if it was created
3083 * because of the pair device action.
3085 if (addr->type == BDADDR_BREDR)
3086 hci_remove_link_key(hdev, &addr->bdaddr);
3088 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3089 le_addr_type(addr->type));
3091 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3092 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3095 hci_dev_unlock(hdev);
3099 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3100 struct mgmt_addr_info *addr, u16 mgmt_op,
3101 u16 hci_op, __le32 passkey)
3103 struct mgmt_pending_cmd *cmd;
3104 struct hci_conn *conn;
3109 if (!hdev_is_powered(hdev)) {
3110 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3111 MGMT_STATUS_NOT_POWERED, addr,
3116 if (addr->type == BDADDR_BREDR)
3117 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3119 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3120 le_addr_type(addr->type));
3123 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3124 MGMT_STATUS_NOT_CONNECTED, addr,
3129 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3130 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3132 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3133 MGMT_STATUS_SUCCESS, addr,
3136 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3137 MGMT_STATUS_FAILED, addr,
3143 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3149 cmd->cmd_complete = addr_cmd_complete;
3151 /* Continue with pairing via HCI */
3152 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3153 struct hci_cp_user_passkey_reply cp;
3155 bacpy(&cp.bdaddr, &addr->bdaddr);
3156 cp.passkey = passkey;
3157 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3159 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3163 mgmt_pending_remove(cmd);
3166 hci_dev_unlock(hdev);
3170 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3171 void *data, u16 len)
3173 struct mgmt_cp_pin_code_neg_reply *cp = data;
3175 bt_dev_dbg(hdev, "sock %p", sk);
3177 return user_pairing_resp(sk, hdev, &cp->addr,
3178 MGMT_OP_PIN_CODE_NEG_REPLY,
3179 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3182 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3185 struct mgmt_cp_user_confirm_reply *cp = data;
3187 bt_dev_dbg(hdev, "sock %p", sk);
3189 if (len != sizeof(*cp))
3190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3191 MGMT_STATUS_INVALID_PARAMS);
3193 return user_pairing_resp(sk, hdev, &cp->addr,
3194 MGMT_OP_USER_CONFIRM_REPLY,
3195 HCI_OP_USER_CONFIRM_REPLY, 0);
3198 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3199 void *data, u16 len)
3201 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3203 bt_dev_dbg(hdev, "sock %p", sk);
3205 return user_pairing_resp(sk, hdev, &cp->addr,
3206 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3207 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3210 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3213 struct mgmt_cp_user_passkey_reply *cp = data;
3215 bt_dev_dbg(hdev, "sock %p", sk);
3217 return user_pairing_resp(sk, hdev, &cp->addr,
3218 MGMT_OP_USER_PASSKEY_REPLY,
3219 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3222 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3223 void *data, u16 len)
3225 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3227 bt_dev_dbg(hdev, "sock %p", sk);
3229 return user_pairing_resp(sk, hdev, &cp->addr,
3230 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3231 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3234 static void adv_expire(struct hci_dev *hdev, u32 flags)
3236 struct adv_info *adv_instance;
3237 struct hci_request req;
3240 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3244 /* stop if current instance doesn't need to be changed */
3245 if (!(adv_instance->flags & flags))
3248 cancel_adv_timeout(hdev);
3250 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3254 hci_req_init(&req, hdev);
3255 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3260 hci_req_run(&req, NULL);
3263 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3265 struct mgmt_cp_set_local_name *cp;
3266 struct mgmt_pending_cmd *cmd;
3268 bt_dev_dbg(hdev, "status 0x%02x", status);
3272 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3279 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3280 mgmt_status(status));
3282 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3285 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3286 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3289 mgmt_pending_remove(cmd);
3292 hci_dev_unlock(hdev);
3295 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3298 struct mgmt_cp_set_local_name *cp = data;
3299 struct mgmt_pending_cmd *cmd;
3300 struct hci_request req;
3303 bt_dev_dbg(hdev, "sock %p", sk);
3307 /* If the old values are the same as the new ones just return a
3308 * direct command complete event.
3310 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3311 !memcmp(hdev->short_name, cp->short_name,
3312 sizeof(hdev->short_name))) {
3313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3318 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3320 if (!hdev_is_powered(hdev)) {
3321 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3323 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3328 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3329 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3330 ext_info_changed(hdev, sk);
3335 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3341 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3343 hci_req_init(&req, hdev);
3345 if (lmp_bredr_capable(hdev)) {
3346 __hci_req_update_name(&req);
3347 __hci_req_update_eir(&req);
3350 /* The name is stored in the scan response data and so
3351 * no need to update the advertising data here.
3353 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3354 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3356 err = hci_req_run(&req, set_name_complete);
3358 mgmt_pending_remove(cmd);
3361 hci_dev_unlock(hdev);
3365 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3368 struct mgmt_cp_set_appearance *cp = data;
3372 bt_dev_dbg(hdev, "sock %p", sk);
3374 if (!lmp_le_capable(hdev))
3375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3376 MGMT_STATUS_NOT_SUPPORTED);
3378 appearance = le16_to_cpu(cp->appearance);
3382 if (hdev->appearance != appearance) {
3383 hdev->appearance = appearance;
3385 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3386 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3388 ext_info_changed(hdev, sk);
3391 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3394 hci_dev_unlock(hdev);
3399 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3400 void *data, u16 len)
3402 struct mgmt_rp_get_phy_configuration rp;
3404 bt_dev_dbg(hdev, "sock %p", sk);
3408 memset(&rp, 0, sizeof(rp));
3410 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3411 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3412 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3414 hci_dev_unlock(hdev);
3416 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3420 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3422 struct mgmt_ev_phy_configuration_changed ev;
3424 memset(&ev, 0, sizeof(ev));
3426 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3428 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3432 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3433 u16 opcode, struct sk_buff *skb)
3435 struct mgmt_pending_cmd *cmd;
3437 bt_dev_dbg(hdev, "status 0x%02x", status);
3441 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3446 mgmt_cmd_status(cmd->sk, hdev->id,
3447 MGMT_OP_SET_PHY_CONFIGURATION,
3448 mgmt_status(status));
3450 mgmt_cmd_complete(cmd->sk, hdev->id,
3451 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3454 mgmt_phy_configuration_changed(hdev, cmd->sk);
3457 mgmt_pending_remove(cmd);
3460 hci_dev_unlock(hdev);
3463 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3464 void *data, u16 len)
3466 struct mgmt_cp_set_phy_configuration *cp = data;
3467 struct hci_cp_le_set_default_phy cp_phy;
3468 struct mgmt_pending_cmd *cmd;
3469 struct hci_request req;
3470 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3471 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3472 bool changed = false;
3475 bt_dev_dbg(hdev, "sock %p", sk);
3477 configurable_phys = get_configurable_phys(hdev);
3478 supported_phys = get_supported_phys(hdev);
3479 selected_phys = __le32_to_cpu(cp->selected_phys);
3481 if (selected_phys & ~supported_phys)
3482 return mgmt_cmd_status(sk, hdev->id,
3483 MGMT_OP_SET_PHY_CONFIGURATION,
3484 MGMT_STATUS_INVALID_PARAMS);
3486 unconfigure_phys = supported_phys & ~configurable_phys;
3488 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3489 return mgmt_cmd_status(sk, hdev->id,
3490 MGMT_OP_SET_PHY_CONFIGURATION,
3491 MGMT_STATUS_INVALID_PARAMS);
3493 if (selected_phys == get_selected_phys(hdev))
3494 return mgmt_cmd_complete(sk, hdev->id,
3495 MGMT_OP_SET_PHY_CONFIGURATION,
3500 if (!hdev_is_powered(hdev)) {
3501 err = mgmt_cmd_status(sk, hdev->id,
3502 MGMT_OP_SET_PHY_CONFIGURATION,
3503 MGMT_STATUS_REJECTED);
3507 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3508 err = mgmt_cmd_status(sk, hdev->id,
3509 MGMT_OP_SET_PHY_CONFIGURATION,
3514 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3515 pkt_type |= (HCI_DH3 | HCI_DM3);
3517 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3519 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3520 pkt_type |= (HCI_DH5 | HCI_DM5);
3522 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3524 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3525 pkt_type &= ~HCI_2DH1;
3527 pkt_type |= HCI_2DH1;
3529 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3530 pkt_type &= ~HCI_2DH3;
3532 pkt_type |= HCI_2DH3;
3534 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3535 pkt_type &= ~HCI_2DH5;
3537 pkt_type |= HCI_2DH5;
3539 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3540 pkt_type &= ~HCI_3DH1;
3542 pkt_type |= HCI_3DH1;
3544 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3545 pkt_type &= ~HCI_3DH3;
3547 pkt_type |= HCI_3DH3;
3549 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3550 pkt_type &= ~HCI_3DH5;
3552 pkt_type |= HCI_3DH5;
3554 if (pkt_type != hdev->pkt_type) {
3555 hdev->pkt_type = pkt_type;
3559 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3560 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3562 mgmt_phy_configuration_changed(hdev, sk);
3564 err = mgmt_cmd_complete(sk, hdev->id,
3565 MGMT_OP_SET_PHY_CONFIGURATION,
3571 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3578 hci_req_init(&req, hdev);
3580 memset(&cp_phy, 0, sizeof(cp_phy));
3582 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3583 cp_phy.all_phys |= 0x01;
3585 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3586 cp_phy.all_phys |= 0x02;
3588 if (selected_phys & MGMT_PHY_LE_1M_TX)
3589 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3591 if (selected_phys & MGMT_PHY_LE_2M_TX)
3592 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3594 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3595 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3597 if (selected_phys & MGMT_PHY_LE_1M_RX)
3598 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3600 if (selected_phys & MGMT_PHY_LE_2M_RX)
3601 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3603 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3604 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3606 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3608 err = hci_req_run_skb(&req, set_default_phy_complete);
3610 mgmt_pending_remove(cmd);
3613 hci_dev_unlock(hdev);
3618 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3621 int err = MGMT_STATUS_SUCCESS;
3622 struct mgmt_cp_set_blocked_keys *keys = data;
3623 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3624 sizeof(struct mgmt_blocked_key_info));
3625 u16 key_count, expected_len;
3628 bt_dev_dbg(hdev, "sock %p", sk);
3630 key_count = __le16_to_cpu(keys->key_count);
3631 if (key_count > max_key_count) {
3632 bt_dev_err(hdev, "too big key_count value %u", key_count);
3633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3634 MGMT_STATUS_INVALID_PARAMS);
3637 expected_len = struct_size(keys, keys, key_count);
3638 if (expected_len != len) {
3639 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3642 MGMT_STATUS_INVALID_PARAMS);
3647 hci_blocked_keys_clear(hdev);
3649 for (i = 0; i < keys->key_count; ++i) {
3650 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3653 err = MGMT_STATUS_NO_RESOURCES;
3657 b->type = keys->keys[i].type;
3658 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3659 list_add_rcu(&b->list, &hdev->blocked_keys);
3661 hci_dev_unlock(hdev);
3663 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3667 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3668 void *data, u16 len)
3670 struct mgmt_mode *cp = data;
3672 bool changed = false;
3674 bt_dev_dbg(hdev, "sock %p", sk);
3676 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3677 return mgmt_cmd_status(sk, hdev->id,
3678 MGMT_OP_SET_WIDEBAND_SPEECH,
3679 MGMT_STATUS_NOT_SUPPORTED);
3681 if (cp->val != 0x00 && cp->val != 0x01)
3682 return mgmt_cmd_status(sk, hdev->id,
3683 MGMT_OP_SET_WIDEBAND_SPEECH,
3684 MGMT_STATUS_INVALID_PARAMS);
3688 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3689 err = mgmt_cmd_status(sk, hdev->id,
3690 MGMT_OP_SET_WIDEBAND_SPEECH,
3695 if (hdev_is_powered(hdev) &&
3696 !!cp->val != hci_dev_test_flag(hdev,
3697 HCI_WIDEBAND_SPEECH_ENABLED)) {
3698 err = mgmt_cmd_status(sk, hdev->id,
3699 MGMT_OP_SET_WIDEBAND_SPEECH,
3700 MGMT_STATUS_REJECTED);
3705 changed = !hci_dev_test_and_set_flag(hdev,
3706 HCI_WIDEBAND_SPEECH_ENABLED);
3708 changed = hci_dev_test_and_clear_flag(hdev,
3709 HCI_WIDEBAND_SPEECH_ENABLED);
3711 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3716 err = new_settings(hdev, sk);
3719 hci_dev_unlock(hdev);
3723 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3724 void *data, u16 data_len)
3727 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3730 u8 tx_power_range[2];
3732 bt_dev_dbg(hdev, "sock %p", sk);
3734 memset(&buf, 0, sizeof(buf));
3738 /* When the Read Simple Pairing Options command is supported, then
3739 * the remote public key validation is supported.
3741 * Alternatively, when Microsoft extensions are available, they can
3742 * indicate support for public key validation as well.
3744 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3745 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3747 flags |= 0x02; /* Remote public key validation (LE) */
3749 /* When the Read Encryption Key Size command is supported, then the
3750 * encryption key size is enforced.
3752 if (hdev->commands[20] & 0x10)
3753 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3755 flags |= 0x08; /* Encryption key size enforcement (LE) */
3757 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3760 /* When the Read Simple Pairing Options command is supported, then
3761 * also max encryption key size information is provided.
3763 if (hdev->commands[41] & 0x08)
3764 cap_len = eir_append_le16(rp->cap, cap_len,
3765 MGMT_CAP_MAX_ENC_KEY_SIZE,
3766 hdev->max_enc_key_size);
3768 cap_len = eir_append_le16(rp->cap, cap_len,
3769 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3770 SMP_MAX_ENC_KEY_SIZE);
3772 /* Append the min/max LE tx power parameters if we were able to fetch
3773 * it from the controller
3775 if (hdev->commands[38] & 0x80) {
3776 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3777 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3778 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3782 rp->cap_len = cpu_to_le16(cap_len);
3784 hci_dev_unlock(hdev);
3786 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3787 rp, sizeof(*rp) + cap_len);
3790 #ifdef CONFIG_BT_FEATURE_DEBUG
3791 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3792 static const u8 debug_uuid[16] = {
3793 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3794 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3798 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3799 static const u8 simult_central_periph_uuid[16] = {
3800 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3801 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3804 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3805 static const u8 rpa_resolution_uuid[16] = {
3806 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3807 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3810 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3811 void *data, u16 data_len)
3813 char buf[62]; /* Enough space for 3 features */
3814 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3818 bt_dev_dbg(hdev, "sock %p", sk);
3820 memset(&buf, 0, sizeof(buf));
3822 #ifdef CONFIG_BT_FEATURE_DEBUG
3824 flags = bt_dbg_get() ? BIT(0) : 0;
3826 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3827 rp->features[idx].flags = cpu_to_le32(flags);
3833 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3834 (hdev->le_states[4] & 0x08) && /* Central */
3835 (hdev->le_states[4] & 0x40) && /* Peripheral */
3836 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3841 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3842 rp->features[idx].flags = cpu_to_le32(flags);
3846 if (hdev && use_ll_privacy(hdev)) {
3847 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3848 flags = BIT(0) | BIT(1);
3852 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3853 rp->features[idx].flags = cpu_to_le32(flags);
3857 rp->feature_count = cpu_to_le16(idx);
3859 /* After reading the experimental features information, enable
3860 * the events to update client on any future change.
3862 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3864 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3865 MGMT_OP_READ_EXP_FEATURES_INFO,
3866 0, rp, sizeof(*rp) + (20 * idx));
3869 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3872 struct mgmt_ev_exp_feature_changed ev;
3874 memset(&ev, 0, sizeof(ev));
3875 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3876 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3878 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3880 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3884 #ifdef CONFIG_BT_FEATURE_DEBUG
3885 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3887 struct mgmt_ev_exp_feature_changed ev;
3889 memset(&ev, 0, sizeof(ev));
3890 memcpy(ev.uuid, debug_uuid, 16);
3891 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3893 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3895 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3899 #define EXP_FEAT(_uuid, _set_func) \
3902 .set_func = _set_func, \
3905 /* The zero key uuid is special. Multiple exp features are set through it. */
3906 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
3907 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3909 struct mgmt_rp_set_exp_feature rp;
3911 memset(rp.uuid, 0, 16);
3912 rp.flags = cpu_to_le32(0);
3914 #ifdef CONFIG_BT_FEATURE_DEBUG
3916 bool changed = bt_dbg_get();
3921 exp_debug_feature_changed(false, sk);
3925 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3928 changed = hci_dev_test_and_clear_flag(hdev,
3929 HCI_ENABLE_LL_PRIVACY);
3931 exp_ll_privacy_feature_changed(false, hdev, sk);
3934 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3936 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3937 MGMT_OP_SET_EXP_FEATURE, 0,
3941 #ifdef CONFIG_BT_FEATURE_DEBUG
3942 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
3943 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3945 struct mgmt_rp_set_exp_feature rp;
3950 /* Command requires to use the non-controller index */
3952 return mgmt_cmd_status(sk, hdev->id,
3953 MGMT_OP_SET_EXP_FEATURE,
3954 MGMT_STATUS_INVALID_INDEX);
3956 /* Parameters are limited to a single octet */
3957 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3958 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3959 MGMT_OP_SET_EXP_FEATURE,
3960 MGMT_STATUS_INVALID_PARAMS);
3962 /* Only boolean on/off is supported */
3963 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3964 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3965 MGMT_OP_SET_EXP_FEATURE,
3966 MGMT_STATUS_INVALID_PARAMS);
3968 val = !!cp->param[0];
3969 changed = val ? !bt_dbg_get() : bt_dbg_get();
3972 memcpy(rp.uuid, debug_uuid, 16);
3973 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3975 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3977 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3978 MGMT_OP_SET_EXP_FEATURE, 0,
3982 exp_debug_feature_changed(val, sk);
3988 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
3989 struct mgmt_cp_set_exp_feature *cp,
3992 struct mgmt_rp_set_exp_feature rp;
3997 /* Command requires to use the controller index */
3999 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4000 MGMT_OP_SET_EXP_FEATURE,
4001 MGMT_STATUS_INVALID_INDEX);
4003 /* Changes can only be made when controller is powered down */
4004 if (hdev_is_powered(hdev))
4005 return mgmt_cmd_status(sk, hdev->id,
4006 MGMT_OP_SET_EXP_FEATURE,
4007 MGMT_STATUS_REJECTED);
4009 /* Parameters are limited to a single octet */
4010 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4011 return mgmt_cmd_status(sk, hdev->id,
4012 MGMT_OP_SET_EXP_FEATURE,
4013 MGMT_STATUS_INVALID_PARAMS);
4015 /* Only boolean on/off is supported */
4016 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4017 return mgmt_cmd_status(sk, hdev->id,
4018 MGMT_OP_SET_EXP_FEATURE,
4019 MGMT_STATUS_INVALID_PARAMS);
4021 val = !!cp->param[0];
4024 changed = !hci_dev_test_and_set_flag(hdev,
4025 HCI_ENABLE_LL_PRIVACY);
4026 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4028 /* Enable LL privacy + supported settings changed */
4029 flags = BIT(0) | BIT(1);
4031 changed = hci_dev_test_and_clear_flag(hdev,
4032 HCI_ENABLE_LL_PRIVACY);
4034 /* Disable LL privacy + supported settings changed */
4038 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4039 rp.flags = cpu_to_le32(flags);
4041 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4043 err = mgmt_cmd_complete(sk, hdev->id,
4044 MGMT_OP_SET_EXP_FEATURE, 0,
4048 exp_ll_privacy_feature_changed(val, hdev, sk);
4053 static const struct mgmt_exp_feature {
4055 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4056 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4057 } exp_features[] = {
4058 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4059 #ifdef CONFIG_BT_FEATURE_DEBUG
4060 EXP_FEAT(debug_uuid, set_debug_func),
4062 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4064 /* end with a null feature */
4065 EXP_FEAT(NULL, NULL)
4068 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4069 void *data, u16 data_len)
4071 struct mgmt_cp_set_exp_feature *cp = data;
4074 bt_dev_dbg(hdev, "sock %p", sk);
4076 for (i = 0; exp_features[i].uuid; i++) {
4077 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4078 return exp_features[i].set_func(sk, hdev, cp, data_len);
4081 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4082 MGMT_OP_SET_EXP_FEATURE,
4083 MGMT_STATUS_NOT_SUPPORTED);
4086 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4088 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4091 struct mgmt_cp_get_device_flags *cp = data;
4092 struct mgmt_rp_get_device_flags rp;
4093 struct bdaddr_list_with_flags *br_params;
4094 struct hci_conn_params *params;
4095 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4096 u32 current_flags = 0;
4097 u8 status = MGMT_STATUS_INVALID_PARAMS;
4099 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4100 &cp->addr.bdaddr, cp->addr.type);
4104 memset(&rp, 0, sizeof(rp));
4106 if (cp->addr.type == BDADDR_BREDR) {
4107 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4113 current_flags = br_params->current_flags;
4115 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4116 le_addr_type(cp->addr.type));
4121 current_flags = params->current_flags;
4124 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4125 rp.addr.type = cp->addr.type;
4126 rp.supported_flags = cpu_to_le32(supported_flags);
4127 rp.current_flags = cpu_to_le32(current_flags);
4129 status = MGMT_STATUS_SUCCESS;
4132 hci_dev_unlock(hdev);
4134 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4138 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4139 bdaddr_t *bdaddr, u8 bdaddr_type,
4140 u32 supported_flags, u32 current_flags)
4142 struct mgmt_ev_device_flags_changed ev;
4144 bacpy(&ev.addr.bdaddr, bdaddr);
4145 ev.addr.type = bdaddr_type;
4146 ev.supported_flags = cpu_to_le32(supported_flags);
4147 ev.current_flags = cpu_to_le32(current_flags);
4149 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4152 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4155 struct mgmt_cp_set_device_flags *cp = data;
4156 struct bdaddr_list_with_flags *br_params;
4157 struct hci_conn_params *params;
4158 u8 status = MGMT_STATUS_INVALID_PARAMS;
4159 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4160 u32 current_flags = __le32_to_cpu(cp->current_flags);
4162 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4163 &cp->addr.bdaddr, cp->addr.type,
4164 __le32_to_cpu(current_flags));
4166 if ((supported_flags | current_flags) != supported_flags) {
4167 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4168 current_flags, supported_flags);
4174 if (cp->addr.type == BDADDR_BREDR) {
4175 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4180 br_params->current_flags = current_flags;
4181 status = MGMT_STATUS_SUCCESS;
4183 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4184 &cp->addr.bdaddr, cp->addr.type);
4187 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4188 le_addr_type(cp->addr.type));
4190 params->current_flags = current_flags;
4191 status = MGMT_STATUS_SUCCESS;
4193 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4195 le_addr_type(cp->addr.type));
4200 hci_dev_unlock(hdev);
4202 if (status == MGMT_STATUS_SUCCESS)
4203 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4204 supported_flags, current_flags);
4206 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4207 &cp->addr, sizeof(cp->addr));
4210 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4213 struct mgmt_ev_adv_monitor_added ev;
4215 ev.monitor_handle = cpu_to_le16(handle);
4217 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4220 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4222 struct mgmt_ev_adv_monitor_removed ev;
4223 struct mgmt_pending_cmd *cmd;
4224 struct sock *sk_skip = NULL;
4225 struct mgmt_cp_remove_adv_monitor *cp;
4227 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4231 if (cp->monitor_handle)
4235 ev.monitor_handle = cpu_to_le16(handle);
4237 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4240 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4241 void *data, u16 len)
4243 struct adv_monitor *monitor = NULL;
4244 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4247 __u32 supported = 0;
4249 __u16 num_handles = 0;
4250 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4252 BT_DBG("request for %s", hdev->name);
4256 if (msft_monitor_supported(hdev))
4257 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4259 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4260 handles[num_handles++] = monitor->handle;
4262 hci_dev_unlock(hdev);
4264 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4265 rp = kmalloc(rp_size, GFP_KERNEL);
4269 /* All supported features are currently enabled */
4270 enabled = supported;
4272 rp->supported_features = cpu_to_le32(supported);
4273 rp->enabled_features = cpu_to_le32(enabled);
4274 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4275 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4276 rp->num_handles = cpu_to_le16(num_handles);
4278 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4280 err = mgmt_cmd_complete(sk, hdev->id,
4281 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4282 MGMT_STATUS_SUCCESS, rp, rp_size);
4289 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4291 struct mgmt_rp_add_adv_patterns_monitor rp;
4292 struct mgmt_pending_cmd *cmd;
4293 struct adv_monitor *monitor;
4298 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4300 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4305 monitor = cmd->user_data;
4306 rp.monitor_handle = cpu_to_le16(monitor->handle);
4309 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4310 hdev->adv_monitors_cnt++;
4311 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4312 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4313 hci_update_background_scan(hdev);
4316 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4317 mgmt_status(status), &rp, sizeof(rp));
4318 mgmt_pending_remove(cmd);
4321 hci_dev_unlock(hdev);
4322 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4323 rp.monitor_handle, status);
4328 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4329 struct adv_monitor *m, u8 status,
4330 void *data, u16 len, u16 op)
4332 struct mgmt_rp_add_adv_patterns_monitor rp;
4333 struct mgmt_pending_cmd *cmd;
4342 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4343 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4344 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4345 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4346 status = MGMT_STATUS_BUSY;
4350 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4352 status = MGMT_STATUS_NO_RESOURCES;
4357 pending = hci_add_adv_monitor(hdev, m, &err);
4359 if (err == -ENOSPC || err == -ENOMEM)
4360 status = MGMT_STATUS_NO_RESOURCES;
4361 else if (err == -EINVAL)
4362 status = MGMT_STATUS_INVALID_PARAMS;
4364 status = MGMT_STATUS_FAILED;
4366 mgmt_pending_remove(cmd);
4371 mgmt_pending_remove(cmd);
4372 rp.monitor_handle = cpu_to_le16(m->handle);
4373 mgmt_adv_monitor_added(sk, hdev, m->handle);
4374 m->state = ADV_MONITOR_STATE_REGISTERED;
4375 hdev->adv_monitors_cnt++;
4377 hci_dev_unlock(hdev);
4378 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4382 hci_dev_unlock(hdev);
4387 hci_free_adv_monitor(hdev, m);
4388 hci_dev_unlock(hdev);
4389 return mgmt_cmd_status(sk, hdev->id, op, status);
4392 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4393 struct mgmt_adv_rssi_thresholds *rssi)
4396 m->rssi.low_threshold = rssi->low_threshold;
4397 m->rssi.low_threshold_timeout =
4398 __le16_to_cpu(rssi->low_threshold_timeout);
4399 m->rssi.high_threshold = rssi->high_threshold;
4400 m->rssi.high_threshold_timeout =
4401 __le16_to_cpu(rssi->high_threshold_timeout);
4402 m->rssi.sampling_period = rssi->sampling_period;
4404 /* Default values. These numbers are the least constricting
4405 * parameters for MSFT API to work, so it behaves as if there
4406 * are no rssi parameter to consider. May need to be changed
4407 * if other API are to be supported.
4409 m->rssi.low_threshold = -127;
4410 m->rssi.low_threshold_timeout = 60;
4411 m->rssi.high_threshold = -127;
4412 m->rssi.high_threshold_timeout = 0;
4413 m->rssi.sampling_period = 0;
4417 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4418 struct mgmt_adv_pattern *patterns)
4420 u8 offset = 0, length = 0;
4421 struct adv_pattern *p = NULL;
4424 for (i = 0; i < pattern_count; i++) {
4425 offset = patterns[i].offset;
4426 length = patterns[i].length;
4427 if (offset >= HCI_MAX_AD_LENGTH ||
4428 length > HCI_MAX_AD_LENGTH ||
4429 (offset + length) > HCI_MAX_AD_LENGTH)
4430 return MGMT_STATUS_INVALID_PARAMS;
4432 p = kmalloc(sizeof(*p), GFP_KERNEL);
4434 return MGMT_STATUS_NO_RESOURCES;
4436 p->ad_type = patterns[i].ad_type;
4437 p->offset = patterns[i].offset;
4438 p->length = patterns[i].length;
4439 memcpy(p->value, patterns[i].value, p->length);
4441 INIT_LIST_HEAD(&p->list);
4442 list_add(&p->list, &m->patterns);
4445 return MGMT_STATUS_SUCCESS;
4448 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4449 void *data, u16 len)
4451 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4452 struct adv_monitor *m = NULL;
4453 u8 status = MGMT_STATUS_SUCCESS;
4454 size_t expected_size = sizeof(*cp);
4456 BT_DBG("request for %s", hdev->name);
4458 if (len <= sizeof(*cp)) {
4459 status = MGMT_STATUS_INVALID_PARAMS;
4463 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4464 if (len != expected_size) {
4465 status = MGMT_STATUS_INVALID_PARAMS;
4469 m = kzalloc(sizeof(*m), GFP_KERNEL);
4471 status = MGMT_STATUS_NO_RESOURCES;
4475 INIT_LIST_HEAD(&m->patterns);
4477 parse_adv_monitor_rssi(m, NULL);
4478 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4481 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4482 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4485 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4486 void *data, u16 len)
4488 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4489 struct adv_monitor *m = NULL;
4490 u8 status = MGMT_STATUS_SUCCESS;
4491 size_t expected_size = sizeof(*cp);
4493 BT_DBG("request for %s", hdev->name);
4495 if (len <= sizeof(*cp)) {
4496 status = MGMT_STATUS_INVALID_PARAMS;
4500 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4501 if (len != expected_size) {
4502 status = MGMT_STATUS_INVALID_PARAMS;
4506 m = kzalloc(sizeof(*m), GFP_KERNEL);
4508 status = MGMT_STATUS_NO_RESOURCES;
4512 INIT_LIST_HEAD(&m->patterns);
4514 parse_adv_monitor_rssi(m, &cp->rssi);
4515 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4518 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4519 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4522 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4524 struct mgmt_rp_remove_adv_monitor rp;
4525 struct mgmt_cp_remove_adv_monitor *cp;
4526 struct mgmt_pending_cmd *cmd;
4531 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4536 rp.monitor_handle = cp->monitor_handle;
4539 hci_update_background_scan(hdev);
4541 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4542 mgmt_status(status), &rp, sizeof(rp));
4543 mgmt_pending_remove(cmd);
4546 hci_dev_unlock(hdev);
4547 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4548 rp.monitor_handle, status);
4553 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4554 void *data, u16 len)
4556 struct mgmt_cp_remove_adv_monitor *cp = data;
4557 struct mgmt_rp_remove_adv_monitor rp;
4558 struct mgmt_pending_cmd *cmd;
4559 u16 handle = __le16_to_cpu(cp->monitor_handle);
4563 BT_DBG("request for %s", hdev->name);
4564 rp.monitor_handle = cp->monitor_handle;
4568 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4569 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4570 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4571 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4572 status = MGMT_STATUS_BUSY;
4576 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4578 status = MGMT_STATUS_NO_RESOURCES;
4583 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4585 pending = hci_remove_all_adv_monitor(hdev, &err);
4588 mgmt_pending_remove(cmd);
4591 status = MGMT_STATUS_INVALID_INDEX;
4593 status = MGMT_STATUS_FAILED;
4598 /* monitor can be removed without forwarding request to controller */
4600 mgmt_pending_remove(cmd);
4601 hci_dev_unlock(hdev);
4603 return mgmt_cmd_complete(sk, hdev->id,
4604 MGMT_OP_REMOVE_ADV_MONITOR,
4605 MGMT_STATUS_SUCCESS,
4609 hci_dev_unlock(hdev);
4613 hci_dev_unlock(hdev);
4614 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4618 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4619 u16 opcode, struct sk_buff *skb)
4621 struct mgmt_rp_read_local_oob_data mgmt_rp;
4622 size_t rp_size = sizeof(mgmt_rp);
4623 struct mgmt_pending_cmd *cmd;
4625 bt_dev_dbg(hdev, "status %u", status);
4627 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4631 if (status || !skb) {
4632 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4633 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4637 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4639 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4640 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4642 if (skb->len < sizeof(*rp)) {
4643 mgmt_cmd_status(cmd->sk, hdev->id,
4644 MGMT_OP_READ_LOCAL_OOB_DATA,
4645 MGMT_STATUS_FAILED);
4649 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4650 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4652 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4654 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4656 if (skb->len < sizeof(*rp)) {
4657 mgmt_cmd_status(cmd->sk, hdev->id,
4658 MGMT_OP_READ_LOCAL_OOB_DATA,
4659 MGMT_STATUS_FAILED);
4663 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4664 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4666 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4667 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4670 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4671 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4674 mgmt_pending_remove(cmd);
4677 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4678 void *data, u16 data_len)
4680 struct mgmt_pending_cmd *cmd;
4681 struct hci_request req;
4684 bt_dev_dbg(hdev, "sock %p", sk);
4688 if (!hdev_is_powered(hdev)) {
4689 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4690 MGMT_STATUS_NOT_POWERED);
4694 if (!lmp_ssp_capable(hdev)) {
4695 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4696 MGMT_STATUS_NOT_SUPPORTED);
4700 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4701 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4706 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4712 hci_req_init(&req, hdev);
4714 if (bredr_sc_enabled(hdev))
4715 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4717 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4719 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4721 mgmt_pending_remove(cmd);
4724 hci_dev_unlock(hdev);
4728 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4729 void *data, u16 len)
4731 struct mgmt_addr_info *addr = data;
4734 bt_dev_dbg(hdev, "sock %p", sk);
4736 if (!bdaddr_type_is_valid(addr->type))
4737 return mgmt_cmd_complete(sk, hdev->id,
4738 MGMT_OP_ADD_REMOTE_OOB_DATA,
4739 MGMT_STATUS_INVALID_PARAMS,
4740 addr, sizeof(*addr));
4744 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4745 struct mgmt_cp_add_remote_oob_data *cp = data;
4748 if (cp->addr.type != BDADDR_BREDR) {
4749 err = mgmt_cmd_complete(sk, hdev->id,
4750 MGMT_OP_ADD_REMOTE_OOB_DATA,
4751 MGMT_STATUS_INVALID_PARAMS,
4752 &cp->addr, sizeof(cp->addr));
4756 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4757 cp->addr.type, cp->hash,
4758 cp->rand, NULL, NULL);
4760 status = MGMT_STATUS_FAILED;
4762 status = MGMT_STATUS_SUCCESS;
4764 err = mgmt_cmd_complete(sk, hdev->id,
4765 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4766 &cp->addr, sizeof(cp->addr));
4767 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4768 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4769 u8 *rand192, *hash192, *rand256, *hash256;
4772 if (bdaddr_type_is_le(cp->addr.type)) {
4773 /* Enforce zero-valued 192-bit parameters as
4774 * long as legacy SMP OOB isn't implemented.
4776 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4777 memcmp(cp->hash192, ZERO_KEY, 16)) {
4778 err = mgmt_cmd_complete(sk, hdev->id,
4779 MGMT_OP_ADD_REMOTE_OOB_DATA,
4780 MGMT_STATUS_INVALID_PARAMS,
4781 addr, sizeof(*addr));
4788 /* In case one of the P-192 values is set to zero,
4789 * then just disable OOB data for P-192.
4791 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4792 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4796 rand192 = cp->rand192;
4797 hash192 = cp->hash192;
4801 /* In case one of the P-256 values is set to zero, then just
4802 * disable OOB data for P-256.
4804 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4805 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4809 rand256 = cp->rand256;
4810 hash256 = cp->hash256;
4813 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4814 cp->addr.type, hash192, rand192,
4817 status = MGMT_STATUS_FAILED;
4819 status = MGMT_STATUS_SUCCESS;
4821 err = mgmt_cmd_complete(sk, hdev->id,
4822 MGMT_OP_ADD_REMOTE_OOB_DATA,
4823 status, &cp->addr, sizeof(cp->addr));
4825 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4827 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4828 MGMT_STATUS_INVALID_PARAMS);
4832 hci_dev_unlock(hdev);
4836 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4837 void *data, u16 len)
4839 struct mgmt_cp_remove_remote_oob_data *cp = data;
4843 bt_dev_dbg(hdev, "sock %p", sk);
4845 if (cp->addr.type != BDADDR_BREDR)
4846 return mgmt_cmd_complete(sk, hdev->id,
4847 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4848 MGMT_STATUS_INVALID_PARAMS,
4849 &cp->addr, sizeof(cp->addr));
4853 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4854 hci_remote_oob_data_clear(hdev);
4855 status = MGMT_STATUS_SUCCESS;
4859 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4861 status = MGMT_STATUS_INVALID_PARAMS;
4863 status = MGMT_STATUS_SUCCESS;
4866 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4867 status, &cp->addr, sizeof(cp->addr));
4869 hci_dev_unlock(hdev);
4873 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4875 struct mgmt_pending_cmd *cmd;
4877 bt_dev_dbg(hdev, "status %u", status);
4881 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4883 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4886 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4889 cmd->cmd_complete(cmd, mgmt_status(status));
4890 mgmt_pending_remove(cmd);
4893 hci_dev_unlock(hdev);
4895 /* Handle suspend notifier */
4896 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4897 hdev->suspend_tasks)) {
4898 bt_dev_dbg(hdev, "Unpaused discovery");
4899 wake_up(&hdev->suspend_wait_q);
4903 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4904 uint8_t *mgmt_status)
4907 case DISCOV_TYPE_LE:
4908 *mgmt_status = mgmt_le_support(hdev);
4912 case DISCOV_TYPE_INTERLEAVED:
4913 *mgmt_status = mgmt_le_support(hdev);
4917 case DISCOV_TYPE_BREDR:
4918 *mgmt_status = mgmt_bredr_support(hdev);
4923 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4930 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4931 u16 op, void *data, u16 len)
4933 struct mgmt_cp_start_discovery *cp = data;
4934 struct mgmt_pending_cmd *cmd;
4938 bt_dev_dbg(hdev, "sock %p", sk);
4942 if (!hdev_is_powered(hdev)) {
4943 err = mgmt_cmd_complete(sk, hdev->id, op,
4944 MGMT_STATUS_NOT_POWERED,
4945 &cp->type, sizeof(cp->type));
4949 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4950 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4951 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4952 &cp->type, sizeof(cp->type));
4956 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4957 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4958 &cp->type, sizeof(cp->type));
4962 /* Can't start discovery when it is paused */
4963 if (hdev->discovery_paused) {
4964 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4965 &cp->type, sizeof(cp->type));
4969 /* Clear the discovery filter first to free any previously
4970 * allocated memory for the UUID list.
4972 hci_discovery_filter_clear(hdev);
4974 hdev->discovery.type = cp->type;
4975 hdev->discovery.report_invalid_rssi = false;
4976 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4977 hdev->discovery.limited = true;
4979 hdev->discovery.limited = false;
4981 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4987 cmd->cmd_complete = generic_cmd_complete;
4989 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4990 queue_work(hdev->req_workqueue, &hdev->discov_update);
4994 hci_dev_unlock(hdev);
4998 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4999 void *data, u16 len)
5001 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5005 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5006 void *data, u16 len)
5008 return start_discovery_internal(sk, hdev,
5009 MGMT_OP_START_LIMITED_DISCOVERY,
5013 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
5016 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
5020 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5021 void *data, u16 len)
5023 struct mgmt_cp_start_service_discovery *cp = data;
5024 struct mgmt_pending_cmd *cmd;
5025 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5026 u16 uuid_count, expected_len;
5030 bt_dev_dbg(hdev, "sock %p", sk);
5034 if (!hdev_is_powered(hdev)) {
5035 err = mgmt_cmd_complete(sk, hdev->id,
5036 MGMT_OP_START_SERVICE_DISCOVERY,
5037 MGMT_STATUS_NOT_POWERED,
5038 &cp->type, sizeof(cp->type));
5042 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5043 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5044 err = mgmt_cmd_complete(sk, hdev->id,
5045 MGMT_OP_START_SERVICE_DISCOVERY,
5046 MGMT_STATUS_BUSY, &cp->type,
5051 if (hdev->discovery_paused) {
5052 err = mgmt_cmd_complete(sk, hdev->id,
5053 MGMT_OP_START_SERVICE_DISCOVERY,
5054 MGMT_STATUS_BUSY, &cp->type,
5059 uuid_count = __le16_to_cpu(cp->uuid_count);
5060 if (uuid_count > max_uuid_count) {
5061 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5063 err = mgmt_cmd_complete(sk, hdev->id,
5064 MGMT_OP_START_SERVICE_DISCOVERY,
5065 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5070 expected_len = sizeof(*cp) + uuid_count * 16;
5071 if (expected_len != len) {
5072 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5074 err = mgmt_cmd_complete(sk, hdev->id,
5075 MGMT_OP_START_SERVICE_DISCOVERY,
5076 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5081 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5082 err = mgmt_cmd_complete(sk, hdev->id,
5083 MGMT_OP_START_SERVICE_DISCOVERY,
5084 status, &cp->type, sizeof(cp->type));
5088 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5095 cmd->cmd_complete = service_discovery_cmd_complete;
5097 /* Clear the discovery filter first to free any previously
5098 * allocated memory for the UUID list.
5100 hci_discovery_filter_clear(hdev);
5102 hdev->discovery.result_filtering = true;
5103 hdev->discovery.type = cp->type;
5104 hdev->discovery.rssi = cp->rssi;
5105 hdev->discovery.uuid_count = uuid_count;
5107 if (uuid_count > 0) {
5108 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5110 if (!hdev->discovery.uuids) {
5111 err = mgmt_cmd_complete(sk, hdev->id,
5112 MGMT_OP_START_SERVICE_DISCOVERY,
5114 &cp->type, sizeof(cp->type));
5115 mgmt_pending_remove(cmd);
5120 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5121 queue_work(hdev->req_workqueue, &hdev->discov_update);
5125 hci_dev_unlock(hdev);
5129 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5131 struct mgmt_pending_cmd *cmd;
5133 bt_dev_dbg(hdev, "status %u", status);
5137 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5139 cmd->cmd_complete(cmd, mgmt_status(status));
5140 mgmt_pending_remove(cmd);
5143 hci_dev_unlock(hdev);
5145 /* Handle suspend notifier */
5146 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5147 bt_dev_dbg(hdev, "Paused discovery");
5148 wake_up(&hdev->suspend_wait_q);
5152 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5155 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5156 struct mgmt_pending_cmd *cmd;
5159 bt_dev_dbg(hdev, "sock %p", sk);
5163 if (!hci_discovery_active(hdev)) {
5164 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5165 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5166 sizeof(mgmt_cp->type));
5170 if (hdev->discovery.type != mgmt_cp->type) {
5171 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5172 MGMT_STATUS_INVALID_PARAMS,
5173 &mgmt_cp->type, sizeof(mgmt_cp->type));
5177 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5183 cmd->cmd_complete = generic_cmd_complete;
5185 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5186 queue_work(hdev->req_workqueue, &hdev->discov_update);
5190 hci_dev_unlock(hdev);
5194 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5197 struct mgmt_cp_confirm_name *cp = data;
5198 struct inquiry_entry *e;
5201 bt_dev_dbg(hdev, "sock %p", sk);
5205 if (!hci_discovery_active(hdev)) {
5206 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5207 MGMT_STATUS_FAILED, &cp->addr,
5212 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5214 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5215 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5220 if (cp->name_known) {
5221 e->name_state = NAME_KNOWN;
5224 e->name_state = NAME_NEEDED;
5225 hci_inquiry_cache_update_resolve(hdev, e);
5228 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5229 &cp->addr, sizeof(cp->addr));
5232 hci_dev_unlock(hdev);
5236 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5239 struct mgmt_cp_block_device *cp = data;
5243 bt_dev_dbg(hdev, "sock %p", sk);
5245 if (!bdaddr_type_is_valid(cp->addr.type))
5246 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5247 MGMT_STATUS_INVALID_PARAMS,
5248 &cp->addr, sizeof(cp->addr));
5252 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5255 status = MGMT_STATUS_FAILED;
5259 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5261 status = MGMT_STATUS_SUCCESS;
5264 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5265 &cp->addr, sizeof(cp->addr));
5267 hci_dev_unlock(hdev);
5272 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5275 struct mgmt_cp_unblock_device *cp = data;
5279 bt_dev_dbg(hdev, "sock %p", sk);
5281 if (!bdaddr_type_is_valid(cp->addr.type))
5282 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5283 MGMT_STATUS_INVALID_PARAMS,
5284 &cp->addr, sizeof(cp->addr));
5288 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5291 status = MGMT_STATUS_INVALID_PARAMS;
5295 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5297 status = MGMT_STATUS_SUCCESS;
5300 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5301 &cp->addr, sizeof(cp->addr));
5303 hci_dev_unlock(hdev);
5308 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5311 struct mgmt_cp_set_device_id *cp = data;
5312 struct hci_request req;
5316 bt_dev_dbg(hdev, "sock %p", sk);
5318 source = __le16_to_cpu(cp->source);
5320 if (source > 0x0002)
5321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5322 MGMT_STATUS_INVALID_PARAMS);
5326 hdev->devid_source = source;
5327 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5328 hdev->devid_product = __le16_to_cpu(cp->product);
5329 hdev->devid_version = __le16_to_cpu(cp->version);
5331 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5334 hci_req_init(&req, hdev);
5335 __hci_req_update_eir(&req);
5336 hci_req_run(&req, NULL);
5338 hci_dev_unlock(hdev);
5343 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5346 bt_dev_dbg(hdev, "status %u", status);
5349 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5352 struct cmd_lookup match = { NULL, hdev };
5353 struct hci_request req;
5355 struct adv_info *adv_instance;
5361 u8 mgmt_err = mgmt_status(status);
5363 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5364 cmd_status_rsp, &mgmt_err);
5368 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5369 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5371 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5373 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5376 new_settings(hdev, match.sk);
5381 /* Handle suspend notifier */
5382 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5383 hdev->suspend_tasks)) {
5384 bt_dev_dbg(hdev, "Paused advertising");
5385 wake_up(&hdev->suspend_wait_q);
5386 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5387 hdev->suspend_tasks)) {
5388 bt_dev_dbg(hdev, "Unpaused advertising");
5389 wake_up(&hdev->suspend_wait_q);
5392 /* If "Set Advertising" was just disabled and instance advertising was
5393 * set up earlier, then re-enable multi-instance advertising.
5395 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5396 list_empty(&hdev->adv_instances))
5399 instance = hdev->cur_adv_instance;
5401 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5402 struct adv_info, list);
5406 instance = adv_instance->instance;
5409 hci_req_init(&req, hdev);
5411 err = __hci_req_schedule_adv_instance(&req, instance, true);
5414 err = hci_req_run(&req, enable_advertising_instance);
5417 bt_dev_err(hdev, "failed to re-configure advertising");
5420 hci_dev_unlock(hdev);
5423 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5426 struct mgmt_mode *cp = data;
5427 struct mgmt_pending_cmd *cmd;
5428 struct hci_request req;
5432 bt_dev_dbg(hdev, "sock %p", sk);
5434 status = mgmt_le_support(hdev);
5436 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5439 /* Enabling the experimental LL Privay support disables support for
5442 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5444 MGMT_STATUS_NOT_SUPPORTED);
5446 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5448 MGMT_STATUS_INVALID_PARAMS);
5450 if (hdev->advertising_paused)
5451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5458 /* The following conditions are ones which mean that we should
5459 * not do any HCI communication but directly send a mgmt
5460 * response to user space (after toggling the flag if
5463 if (!hdev_is_powered(hdev) ||
5464 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5465 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5466 hci_conn_num(hdev, LE_LINK) > 0 ||
5467 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5468 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5472 hdev->cur_adv_instance = 0x00;
5473 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5474 if (cp->val == 0x02)
5475 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5477 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5479 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5480 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5483 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5488 err = new_settings(hdev, sk);
5493 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5494 pending_find(MGMT_OP_SET_LE, hdev)) {
5495 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5500 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5506 hci_req_init(&req, hdev);
5508 if (cp->val == 0x02)
5509 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5511 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5513 cancel_adv_timeout(hdev);
5516 /* Switch to instance "0" for the Set Advertising setting.
5517 * We cannot use update_[adv|scan_rsp]_data() here as the
5518 * HCI_ADVERTISING flag is not yet set.
5520 hdev->cur_adv_instance = 0x00;
5522 if (ext_adv_capable(hdev)) {
5523 __hci_req_start_ext_adv(&req, 0x00);
5525 __hci_req_update_adv_data(&req, 0x00);
5526 __hci_req_update_scan_rsp_data(&req, 0x00);
5527 __hci_req_enable_advertising(&req);
5530 __hci_req_disable_advertising(&req);
5533 err = hci_req_run(&req, set_advertising_complete);
5535 mgmt_pending_remove(cmd);
5538 hci_dev_unlock(hdev);
5542 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5543 void *data, u16 len)
5545 struct mgmt_cp_set_static_address *cp = data;
5548 bt_dev_dbg(hdev, "sock %p", sk);
5550 if (!lmp_le_capable(hdev))
5551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5552 MGMT_STATUS_NOT_SUPPORTED);
5554 if (hdev_is_powered(hdev))
5555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5556 MGMT_STATUS_REJECTED);
5558 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5559 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5560 return mgmt_cmd_status(sk, hdev->id,
5561 MGMT_OP_SET_STATIC_ADDRESS,
5562 MGMT_STATUS_INVALID_PARAMS);
5564 /* Two most significant bits shall be set */
5565 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5566 return mgmt_cmd_status(sk, hdev->id,
5567 MGMT_OP_SET_STATIC_ADDRESS,
5568 MGMT_STATUS_INVALID_PARAMS);
5573 bacpy(&hdev->static_addr, &cp->bdaddr);
5575 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5579 err = new_settings(hdev, sk);
5582 hci_dev_unlock(hdev);
5586 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5587 void *data, u16 len)
5589 struct mgmt_cp_set_scan_params *cp = data;
5590 __u16 interval, window;
5593 bt_dev_dbg(hdev, "sock %p", sk);
5595 if (!lmp_le_capable(hdev))
5596 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5597 MGMT_STATUS_NOT_SUPPORTED);
5599 interval = __le16_to_cpu(cp->interval);
5601 if (interval < 0x0004 || interval > 0x4000)
5602 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5603 MGMT_STATUS_INVALID_PARAMS);
5605 window = __le16_to_cpu(cp->window);
5607 if (window < 0x0004 || window > 0x4000)
5608 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5609 MGMT_STATUS_INVALID_PARAMS);
5611 if (window > interval)
5612 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5613 MGMT_STATUS_INVALID_PARAMS);
5617 hdev->le_scan_interval = interval;
5618 hdev->le_scan_window = window;
5620 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5623 /* If background scan is running, restart it so new parameters are
5626 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5627 hdev->discovery.state == DISCOVERY_STOPPED) {
5628 struct hci_request req;
5630 hci_req_init(&req, hdev);
5632 hci_req_add_le_scan_disable(&req, false);
5633 hci_req_add_le_passive_scan(&req);
5635 hci_req_run(&req, NULL);
5638 hci_dev_unlock(hdev);
5643 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5646 struct mgmt_pending_cmd *cmd;
5648 bt_dev_dbg(hdev, "status 0x%02x", status);
5652 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5657 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5658 mgmt_status(status));
5660 struct mgmt_mode *cp = cmd->param;
5663 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5665 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5667 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5668 new_settings(hdev, cmd->sk);
5671 mgmt_pending_remove(cmd);
5674 hci_dev_unlock(hdev);
5677 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5678 void *data, u16 len)
5680 struct mgmt_mode *cp = data;
5681 struct mgmt_pending_cmd *cmd;
5682 struct hci_request req;
5685 bt_dev_dbg(hdev, "sock %p", sk);
5687 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5688 hdev->hci_ver < BLUETOOTH_VER_1_2)
5689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5690 MGMT_STATUS_NOT_SUPPORTED);
5692 if (cp->val != 0x00 && cp->val != 0x01)
5693 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5694 MGMT_STATUS_INVALID_PARAMS);
5698 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5699 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5704 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5705 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5710 if (!hdev_is_powered(hdev)) {
5711 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5712 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5714 new_settings(hdev, sk);
5718 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5725 hci_req_init(&req, hdev);
5727 __hci_req_write_fast_connectable(&req, cp->val);
5729 err = hci_req_run(&req, fast_connectable_complete);
5731 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5732 MGMT_STATUS_FAILED);
5733 mgmt_pending_remove(cmd);
5737 hci_dev_unlock(hdev);
5742 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5744 struct mgmt_pending_cmd *cmd;
5746 bt_dev_dbg(hdev, "status 0x%02x", status);
5750 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5755 u8 mgmt_err = mgmt_status(status);
5757 /* We need to restore the flag if related HCI commands
5760 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5762 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5764 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5765 new_settings(hdev, cmd->sk);
5768 mgmt_pending_remove(cmd);
5771 hci_dev_unlock(hdev);
5774 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5776 struct mgmt_mode *cp = data;
5777 struct mgmt_pending_cmd *cmd;
5778 struct hci_request req;
5781 bt_dev_dbg(hdev, "sock %p", sk);
5783 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5785 MGMT_STATUS_NOT_SUPPORTED);
5787 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5789 MGMT_STATUS_REJECTED);
5791 if (cp->val != 0x00 && cp->val != 0x01)
5792 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5793 MGMT_STATUS_INVALID_PARAMS);
5797 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5798 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5802 if (!hdev_is_powered(hdev)) {
5804 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5805 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5806 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5807 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5808 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5811 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5813 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5817 err = new_settings(hdev, sk);
5821 /* Reject disabling when powered on */
5823 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5824 MGMT_STATUS_REJECTED);
5827 /* When configuring a dual-mode controller to operate
5828 * with LE only and using a static address, then switching
5829 * BR/EDR back on is not allowed.
5831 * Dual-mode controllers shall operate with the public
5832 * address as its identity address for BR/EDR and LE. So
5833 * reject the attempt to create an invalid configuration.
5835 * The same restrictions applies when secure connections
5836 * has been enabled. For BR/EDR this is a controller feature
5837 * while for LE it is a host stack feature. This means that
5838 * switching BR/EDR back on when secure connections has been
5839 * enabled is not a supported transaction.
5841 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5842 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5843 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5844 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5845 MGMT_STATUS_REJECTED);
5850 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5851 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5856 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5862 /* We need to flip the bit already here so that
5863 * hci_req_update_adv_data generates the correct flags.
5865 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5867 hci_req_init(&req, hdev);
5869 __hci_req_write_fast_connectable(&req, false);
5870 __hci_req_update_scan(&req);
5872 /* Since only the advertising data flags will change, there
5873 * is no need to update the scan response data.
5875 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5877 err = hci_req_run(&req, set_bredr_complete);
5879 mgmt_pending_remove(cmd);
5882 hci_dev_unlock(hdev);
5886 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5888 struct mgmt_pending_cmd *cmd;
5889 struct mgmt_mode *cp;
5891 bt_dev_dbg(hdev, "status %u", status);
5895 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5900 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5901 mgmt_status(status));
5909 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5910 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5913 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5914 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5917 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5918 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5922 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5923 new_settings(hdev, cmd->sk);
5926 mgmt_pending_remove(cmd);
5928 hci_dev_unlock(hdev);
5931 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5932 void *data, u16 len)
5934 struct mgmt_mode *cp = data;
5935 struct mgmt_pending_cmd *cmd;
5936 struct hci_request req;
5940 bt_dev_dbg(hdev, "sock %p", sk);
5942 if (!lmp_sc_capable(hdev) &&
5943 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5945 MGMT_STATUS_NOT_SUPPORTED);
5947 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5948 lmp_sc_capable(hdev) &&
5949 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5951 MGMT_STATUS_REJECTED);
5953 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5954 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5955 MGMT_STATUS_INVALID_PARAMS);
5959 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5960 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5964 changed = !hci_dev_test_and_set_flag(hdev,
5966 if (cp->val == 0x02)
5967 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5969 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5971 changed = hci_dev_test_and_clear_flag(hdev,
5973 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5976 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5981 err = new_settings(hdev, sk);
5986 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5987 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5994 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5995 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5996 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6000 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6006 hci_req_init(&req, hdev);
6007 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
6008 err = hci_req_run(&req, sc_enable_complete);
6010 mgmt_pending_remove(cmd);
6015 hci_dev_unlock(hdev);
6019 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6020 void *data, u16 len)
6022 struct mgmt_mode *cp = data;
6023 bool changed, use_changed;
6026 bt_dev_dbg(hdev, "sock %p", sk);
6028 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6030 MGMT_STATUS_INVALID_PARAMS);
6035 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6037 changed = hci_dev_test_and_clear_flag(hdev,
6038 HCI_KEEP_DEBUG_KEYS);
6040 if (cp->val == 0x02)
6041 use_changed = !hci_dev_test_and_set_flag(hdev,
6042 HCI_USE_DEBUG_KEYS);
6044 use_changed = hci_dev_test_and_clear_flag(hdev,
6045 HCI_USE_DEBUG_KEYS);
6047 if (hdev_is_powered(hdev) && use_changed &&
6048 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6049 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6050 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6051 sizeof(mode), &mode);
6054 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6059 err = new_settings(hdev, sk);
6062 hci_dev_unlock(hdev);
6066 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6069 struct mgmt_cp_set_privacy *cp = cp_data;
6073 bt_dev_dbg(hdev, "sock %p", sk);
6075 if (!lmp_le_capable(hdev))
6076 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6077 MGMT_STATUS_NOT_SUPPORTED);
6079 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6080 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6081 MGMT_STATUS_INVALID_PARAMS);
6084 /* commenting out since set privacy command is always rejected
6085 * if this condition is enabled.
6087 if (hdev_is_powered(hdev))
6088 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6089 MGMT_STATUS_REJECTED);
6094 /* If user space supports this command it is also expected to
6095 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6097 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6100 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6101 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6102 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6103 hci_adv_instances_set_rpa_expired(hdev, true);
6104 if (cp->privacy == 0x02)
6105 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6107 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6109 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6110 memset(hdev->irk, 0, sizeof(hdev->irk));
6111 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6112 hci_adv_instances_set_rpa_expired(hdev, false);
6113 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6116 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6121 err = new_settings(hdev, sk);
6124 hci_dev_unlock(hdev);
6128 static bool irk_is_valid(struct mgmt_irk_info *irk)
6130 switch (irk->addr.type) {
6131 case BDADDR_LE_PUBLIC:
6134 case BDADDR_LE_RANDOM:
6135 /* Two most significant bits shall be set */
6136 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6144 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6147 struct mgmt_cp_load_irks *cp = cp_data;
6148 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6149 sizeof(struct mgmt_irk_info));
6150 u16 irk_count, expected_len;
6153 bt_dev_dbg(hdev, "sock %p", sk);
6155 if (!lmp_le_capable(hdev))
6156 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6157 MGMT_STATUS_NOT_SUPPORTED);
6159 irk_count = __le16_to_cpu(cp->irk_count);
6160 if (irk_count > max_irk_count) {
6161 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6163 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6164 MGMT_STATUS_INVALID_PARAMS);
6167 expected_len = struct_size(cp, irks, irk_count);
6168 if (expected_len != len) {
6169 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6171 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6172 MGMT_STATUS_INVALID_PARAMS);
6175 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6177 for (i = 0; i < irk_count; i++) {
6178 struct mgmt_irk_info *key = &cp->irks[i];
6180 if (!irk_is_valid(key))
6181 return mgmt_cmd_status(sk, hdev->id,
6183 MGMT_STATUS_INVALID_PARAMS);
6188 hci_smp_irks_clear(hdev);
6190 for (i = 0; i < irk_count; i++) {
6191 struct mgmt_irk_info *irk = &cp->irks[i];
6193 if (hci_is_blocked_key(hdev,
6194 HCI_BLOCKED_KEY_TYPE_IRK,
6196 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6201 hci_add_irk(hdev, &irk->addr.bdaddr,
6202 le_addr_type(irk->addr.type), irk->val,
6206 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6208 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6210 hci_dev_unlock(hdev);
6216 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
6217 void *data, u16 len)
6219 struct mgmt_cp_set_advertising_params *cp = data;
6224 BT_DBG("%s", hdev->name);
6226 if (!lmp_le_capable(hdev))
6227 return mgmt_cmd_status(sk, hdev->id,
6228 MGMT_OP_SET_ADVERTISING_PARAMS,
6229 MGMT_STATUS_NOT_SUPPORTED);
6231 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6232 return mgmt_cmd_status(sk, hdev->id,
6233 MGMT_OP_SET_ADVERTISING_PARAMS,
6236 min_interval = __le16_to_cpu(cp->interval_min);
6237 max_interval = __le16_to_cpu(cp->interval_max);
6239 if (min_interval > max_interval ||
6240 min_interval < 0x0020 || max_interval > 0x4000)
6241 return mgmt_cmd_status(sk, hdev->id,
6242 MGMT_OP_SET_ADVERTISING_PARAMS,
6243 MGMT_STATUS_INVALID_PARAMS);
6247 hdev->le_adv_min_interval = min_interval;
6248 hdev->le_adv_max_interval = max_interval;
6249 hdev->adv_filter_policy = cp->filter_policy;
6250 hdev->adv_type = cp->type;
6252 err = mgmt_cmd_complete(sk, hdev->id,
6253 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
6255 hci_dev_unlock(hdev);
6260 static void set_advertising_data_complete(struct hci_dev *hdev,
6261 u8 status, u16 opcode)
6263 struct mgmt_cp_set_advertising_data *cp;
6264 struct mgmt_pending_cmd *cmd;
6266 BT_DBG("status 0x%02x", status);
6270 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
6277 mgmt_cmd_status(cmd->sk, hdev->id,
6278 MGMT_OP_SET_ADVERTISING_DATA,
6279 mgmt_status(status));
6281 mgmt_cmd_complete(cmd->sk, hdev->id,
6282 MGMT_OP_SET_ADVERTISING_DATA, 0,
6285 mgmt_pending_remove(cmd);
6288 hci_dev_unlock(hdev);
6291 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
6292 void *data, u16 len)
6294 struct mgmt_pending_cmd *cmd;
6295 struct hci_request req;
6296 struct mgmt_cp_set_advertising_data *cp = data;
6297 struct hci_cp_le_set_adv_data adv;
6300 BT_DBG("%s", hdev->name);
6302 if (!lmp_le_capable(hdev)) {
6303 return mgmt_cmd_status(sk, hdev->id,
6304 MGMT_OP_SET_ADVERTISING_DATA,
6305 MGMT_STATUS_NOT_SUPPORTED);
6310 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
6311 err = mgmt_cmd_status(sk, hdev->id,
6312 MGMT_OP_SET_ADVERTISING_DATA,
6317 if (len > HCI_MAX_AD_LENGTH) {
6318 err = mgmt_cmd_status(sk, hdev->id,
6319 MGMT_OP_SET_ADVERTISING_DATA,
6320 MGMT_STATUS_INVALID_PARAMS);
6324 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
6331 hci_req_init(&req, hdev);
6333 memset(&adv, 0, sizeof(adv));
6334 memcpy(adv.data, cp->data, len);
6337 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
6339 err = hci_req_run(&req, set_advertising_data_complete);
6341 mgmt_pending_remove(cmd);
6344 hci_dev_unlock(hdev);
6349 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
6352 struct mgmt_cp_set_scan_rsp_data *cp;
6353 struct mgmt_pending_cmd *cmd;
6355 BT_DBG("status 0x%02x", status);
6359 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
6366 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6367 mgmt_status(status));
6369 mgmt_cmd_complete(cmd->sk, hdev->id,
6370 MGMT_OP_SET_SCAN_RSP_DATA, 0,
6373 mgmt_pending_remove(cmd);
6376 hci_dev_unlock(hdev);
6379 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
6382 struct mgmt_pending_cmd *cmd;
6383 struct hci_request req;
6384 struct mgmt_cp_set_scan_rsp_data *cp = data;
6385 struct hci_cp_le_set_scan_rsp_data rsp;
6388 BT_DBG("%s", hdev->name);
6390 if (!lmp_le_capable(hdev))
6391 return mgmt_cmd_status(sk, hdev->id,
6392 MGMT_OP_SET_SCAN_RSP_DATA,
6393 MGMT_STATUS_NOT_SUPPORTED);
6397 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
6398 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6403 if (len > HCI_MAX_AD_LENGTH) {
6404 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6405 MGMT_STATUS_INVALID_PARAMS);
6409 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
6415 hci_req_init(&req, hdev);
6417 memset(&rsp, 0, sizeof(rsp));
6418 memcpy(rsp.data, cp->data, len);
6421 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
6423 err = hci_req_run(&req, set_scan_rsp_data_complete);
6425 mgmt_pending_remove(cmd);
6428 hci_dev_unlock(hdev);
6433 /* Adv White List feature */
6434 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6436 struct mgmt_cp_add_dev_white_list *cp;
6437 struct mgmt_pending_cmd *cmd;
6439 BT_DBG("status 0x%02x", status);
6443 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
6450 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6451 mgmt_status(status));
6453 mgmt_cmd_complete(cmd->sk, hdev->id,
6454 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
6456 mgmt_pending_remove(cmd);
6459 hci_dev_unlock(hdev);
6462 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
6463 void *data, u16 len)
6465 struct mgmt_pending_cmd *cmd;
6466 struct mgmt_cp_add_dev_white_list *cp = data;
6467 struct hci_request req;
6470 BT_DBG("%s", hdev->name);
6472 if (!lmp_le_capable(hdev))
6473 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6474 MGMT_STATUS_NOT_SUPPORTED);
6476 if (!hdev_is_powered(hdev))
6477 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6478 MGMT_STATUS_REJECTED);
6482 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
6483 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6488 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
6494 hci_req_init(&req, hdev);
6496 hci_req_add(&req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(*cp), cp);
6498 err = hci_req_run(&req, add_white_list_complete);
6500 mgmt_pending_remove(cmd);
6505 hci_dev_unlock(hdev);
6510 static void remove_from_white_list_complete(struct hci_dev *hdev,
6511 u8 status, u16 opcode)
6513 struct mgmt_cp_remove_dev_from_white_list *cp;
6514 struct mgmt_pending_cmd *cmd;
6516 BT_DBG("status 0x%02x", status);
6520 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
6527 mgmt_cmd_status(cmd->sk, hdev->id,
6528 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6529 mgmt_status(status));
6531 mgmt_cmd_complete(cmd->sk, hdev->id,
6532 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
6535 mgmt_pending_remove(cmd);
6538 hci_dev_unlock(hdev);
6541 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
6542 void *data, u16 len)
6544 struct mgmt_pending_cmd *cmd;
6545 struct mgmt_cp_remove_dev_from_white_list *cp = data;
6546 struct hci_request req;
6549 BT_DBG("%s", hdev->name);
6551 if (!lmp_le_capable(hdev))
6552 return mgmt_cmd_status(sk, hdev->id,
6553 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6554 MGMT_STATUS_NOT_SUPPORTED);
6556 if (!hdev_is_powered(hdev))
6557 return mgmt_cmd_status(sk, hdev->id,
6558 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6559 MGMT_STATUS_REJECTED);
6563 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
6564 err = mgmt_cmd_status(sk, hdev->id,
6565 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6570 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6577 hci_req_init(&req, hdev);
6579 hci_req_add(&req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(*cp), cp);
6581 err = hci_req_run(&req, remove_from_white_list_complete);
6583 mgmt_pending_remove(cmd);
6588 hci_dev_unlock(hdev);
6593 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
6596 struct mgmt_pending_cmd *cmd;
6598 BT_DBG("status 0x%02x", status);
6602 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
6607 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6608 mgmt_status(status));
6610 mgmt_cmd_complete(cmd->sk, hdev->id,
6611 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6614 mgmt_pending_remove(cmd);
6617 hci_dev_unlock(hdev);
6620 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
6621 void *data, u16 len)
6623 struct mgmt_pending_cmd *cmd;
6624 struct hci_request req;
6627 BT_DBG("%s", hdev->name);
6629 if (!lmp_le_capable(hdev))
6630 return mgmt_cmd_status(sk, hdev->id,
6631 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6632 MGMT_STATUS_NOT_SUPPORTED);
6634 if (!hdev_is_powered(hdev))
6635 return mgmt_cmd_status(sk, hdev->id,
6636 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6637 MGMT_STATUS_REJECTED);
6641 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
6642 err = mgmt_cmd_status(sk, hdev->id,
6643 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6648 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6655 hci_req_init(&req, hdev);
6657 hci_req_add(&req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
6659 err = hci_req_run(&req, clear_white_list_complete);
6661 mgmt_pending_remove(cmd);
6666 hci_dev_unlock(hdev);
6671 static void set_rssi_threshold_complete(struct hci_dev *hdev,
6672 u8 status, u16 opcode)
6674 struct mgmt_pending_cmd *cmd;
6676 BT_DBG("status 0x%02x", status);
6680 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6685 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6686 mgmt_status(status));
6688 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6691 mgmt_pending_remove(cmd);
6694 hci_dev_unlock(hdev);
6697 static void set_rssi_disable_complete(struct hci_dev *hdev,
6698 u8 status, u16 opcode)
6700 struct mgmt_pending_cmd *cmd;
6702 BT_DBG("status 0x%02x", status);
6706 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6711 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6712 mgmt_status(status));
6714 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6717 mgmt_pending_remove(cmd);
6720 hci_dev_unlock(hdev);
6723 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
6724 void *data, u16 len)
6727 struct hci_cp_set_rssi_threshold th = { 0, };
6728 struct mgmt_cp_set_enable_rssi *cp = data;
6729 struct hci_conn *conn;
6730 struct mgmt_pending_cmd *cmd;
6731 struct hci_request req;
6736 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6738 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6739 MGMT_STATUS_FAILED);
6743 if (!lmp_le_capable(hdev)) {
6744 mgmt_pending_remove(cmd);
6745 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6746 MGMT_STATUS_NOT_SUPPORTED);
6750 if (!hdev_is_powered(hdev)) {
6751 BT_DBG("%s", hdev->name);
6752 mgmt_pending_remove(cmd);
6753 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6754 MGMT_STATUS_NOT_POWERED);
6758 if (cp->link_type == 0x01)
6759 dest_type = LE_LINK;
6761 dest_type = ACL_LINK;
6763 /* Get LE/ACL link handle info */
6764 conn = hci_conn_hash_lookup_ba(hdev,
6765 dest_type, &cp->bdaddr);
6768 err = mgmt_cmd_complete(sk, hdev->id,
6769 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
6770 mgmt_pending_remove(cmd);
6774 hci_req_init(&req, hdev);
6776 th.hci_le_ext_opcode = 0x0B;
6778 th.conn_handle = conn->handle;
6779 th.alert_mask = 0x07;
6780 th.low_th = cp->low_th;
6781 th.in_range_th = cp->in_range_th;
6782 th.high_th = cp->high_th;
6784 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6785 err = hci_req_run(&req, set_rssi_threshold_complete);
6788 mgmt_pending_remove(cmd);
6789 BT_ERR("Error in requesting hci_req_run");
6794 hci_dev_unlock(hdev);
6798 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
6799 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6801 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
6802 struct mgmt_cp_set_enable_rssi *cp = data;
6803 struct mgmt_pending_cmd *cmd;
6808 mgmt_rp.status = rp->status;
6809 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6810 mgmt_rp.bt_address = cp->bdaddr;
6811 mgmt_rp.link_type = cp->link_type;
6813 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6814 MGMT_STATUS_SUCCESS, &mgmt_rp,
6815 sizeof(struct mgmt_cc_rsp_enable_rssi));
6817 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
6818 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6820 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
6821 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6822 &mgmt_rp.bt_address, true);
6826 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6828 mgmt_pending_remove(cmd);
6830 hci_dev_unlock(hdev);
6833 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
6834 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6836 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
6837 struct mgmt_cp_disable_rssi *cp = data;
6838 struct mgmt_pending_cmd *cmd;
6843 mgmt_rp.status = rp->status;
6844 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6845 mgmt_rp.bt_address = cp->bdaddr;
6846 mgmt_rp.link_type = cp->link_type;
6848 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6849 MGMT_STATUS_SUCCESS, &mgmt_rp,
6850 sizeof(struct mgmt_cc_rsp_enable_rssi));
6852 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
6853 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6855 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6856 &mgmt_rp.bt_address, false);
6860 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6862 mgmt_pending_remove(cmd);
6864 hci_dev_unlock(hdev);
6867 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
6868 void *data, u16 len)
6870 struct mgmt_pending_cmd *cmd;
6871 struct hci_request req;
6872 struct hci_cp_set_enable_rssi cp_en = { 0, };
6875 BT_DBG("Set Disable RSSI.");
6877 cp_en.hci_le_ext_opcode = 0x01;
6878 cp_en.le_enable_cs_Features = 0x00;
6879 cp_en.data[0] = 0x00;
6880 cp_en.data[1] = 0x00;
6881 cp_en.data[2] = 0x00;
6885 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6887 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6888 MGMT_STATUS_FAILED);
6892 if (!lmp_le_capable(hdev)) {
6893 mgmt_pending_remove(cmd);
6894 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6895 MGMT_STATUS_NOT_SUPPORTED);
6899 if (!hdev_is_powered(hdev)) {
6900 BT_DBG("%s", hdev->name);
6901 mgmt_pending_remove(cmd);
6902 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6903 MGMT_STATUS_NOT_POWERED);
6907 hci_req_init(&req, hdev);
6909 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6910 sizeof(struct hci_cp_set_enable_rssi),
6911 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6912 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6914 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6915 err = hci_req_run(&req, set_rssi_disable_complete);
6918 mgmt_pending_remove(cmd);
6919 BT_ERR("Error in requesting hci_req_run");
6924 hci_dev_unlock(hdev);
6928 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
6930 struct hci_cc_rsp_enable_rssi *rp = response;
6931 struct mgmt_pending_cmd *cmd_enable = NULL;
6932 struct mgmt_pending_cmd *cmd_disable = NULL;
6933 struct mgmt_cp_set_enable_rssi *cp_en;
6934 struct mgmt_cp_disable_rssi *cp_dis;
6937 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6938 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6939 hci_dev_unlock(hdev);
6942 BT_DBG("Enable Request");
6945 BT_DBG("Disable Request");
6948 cp_en = cmd_enable->param;
6953 switch (rp->le_ext_opcode) {
6955 BT_DBG("RSSI enabled.. Setting Threshold...");
6956 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
6957 cp_en, sizeof(*cp_en));
6961 BT_DBG("Sending RSSI enable success");
6962 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
6963 cp_en, rp, rp->status);
6967 } else if (cmd_disable) {
6968 cp_dis = cmd_disable->param;
6973 switch (rp->le_ext_opcode) {
6975 BT_DBG("Sending RSSI disable success");
6976 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6977 cp_dis, rp, rp->status);
6982 * Only unset RSSI Threshold values for the Link if
6983 * RSSI is monitored for other BREDR or LE Links
6985 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
6986 BT_DBG("Unset Threshold. Other links being monitored");
6987 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6988 cp_dis, rp, rp->status);
6990 BT_DBG("Unset Threshold. Disabling...");
6991 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
6992 cp_dis, sizeof(*cp_dis));
6999 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
7002 struct mgmt_pending_cmd *cmd;
7004 BT_DBG("status 0x%02x", status);
7008 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7013 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7014 mgmt_status(status));
7016 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7019 mgmt_pending_remove(cmd);
7022 hci_dev_unlock(hdev);
7025 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7026 void *data, u16 len)
7028 struct mgmt_pending_cmd *cmd;
7029 struct hci_request req;
7030 struct mgmt_cp_set_enable_rssi *cp = data;
7031 struct hci_cp_set_enable_rssi cp_en = { 0, };
7034 BT_DBG("Set Enable RSSI.");
7036 cp_en.hci_le_ext_opcode = 0x01;
7037 cp_en.le_enable_cs_Features = 0x04;
7038 cp_en.data[0] = 0x00;
7039 cp_en.data[1] = 0x00;
7040 cp_en.data[2] = 0x00;
7044 if (!lmp_le_capable(hdev)) {
7045 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7046 MGMT_STATUS_NOT_SUPPORTED);
7050 if (!hdev_is_powered(hdev)) {
7051 BT_DBG("%s", hdev->name);
7052 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7053 MGMT_STATUS_NOT_POWERED);
7057 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
7058 BT_DBG("%s", hdev->name);
7059 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7064 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
7067 BT_DBG("%s", hdev->name);
7072 /* If RSSI is already enabled directly set Threshold values */
7073 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
7074 hci_dev_unlock(hdev);
7075 BT_DBG("RSSI Enabled. Directly set Threshold");
7076 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
7080 hci_req_init(&req, hdev);
7082 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7083 sizeof(struct hci_cp_set_enable_rssi),
7084 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7085 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7087 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7088 err = hci_req_run(&req, set_rssi_enable_complete);
7091 mgmt_pending_remove(cmd);
7092 BT_ERR("Error in requesting hci_req_run");
7097 hci_dev_unlock(hdev);
7102 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7104 struct mgmt_pending_cmd *cmd;
7106 BT_DBG("status 0x%02x", status);
7110 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
7114 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7115 MGMT_STATUS_SUCCESS, &status, 1);
7117 mgmt_pending_remove(cmd);
7120 hci_dev_unlock(hdev);
7123 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
7126 struct mgmt_pending_cmd *cmd;
7127 struct hci_request req;
7128 struct mgmt_cp_get_raw_rssi *cp = data;
7129 struct hci_cp_get_raw_rssi hci_cp;
7131 struct hci_conn *conn;
7135 BT_DBG("Get Raw RSSI.");
7139 if (!lmp_le_capable(hdev)) {
7140 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7141 MGMT_STATUS_NOT_SUPPORTED);
7145 if (cp->link_type == 0x01)
7146 dest_type = LE_LINK;
7148 dest_type = ACL_LINK;
7150 /* Get LE/BREDR link handle info */
7151 conn = hci_conn_hash_lookup_ba(hdev,
7152 dest_type, &cp->bt_address);
7154 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7155 MGMT_STATUS_NOT_CONNECTED);
7158 hci_cp.conn_handle = conn->handle;
7160 if (!hdev_is_powered(hdev)) {
7161 BT_DBG("%s", hdev->name);
7162 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7163 MGMT_STATUS_NOT_POWERED);
7167 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
7168 BT_DBG("%s", hdev->name);
7169 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7174 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
7176 BT_DBG("%s", hdev->name);
7181 hci_req_init(&req, hdev);
7183 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
7184 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
7185 err = hci_req_run(&req, get_raw_rssi_complete);
7188 mgmt_pending_remove(cmd);
7189 BT_ERR("Error in requesting hci_req_run");
7193 hci_dev_unlock(hdev);
7198 void mgmt_raw_rssi_response(struct hci_dev *hdev,
7199 struct hci_cc_rp_get_raw_rssi *rp, int success)
7201 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
7202 struct hci_conn *conn;
7204 mgmt_rp.status = rp->status;
7205 mgmt_rp.rssi_dbm = rp->rssi_dbm;
7207 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
7211 bacpy(&mgmt_rp.bt_address, &conn->dst);
7212 if (conn->type == LE_LINK)
7213 mgmt_rp.link_type = 0x01;
7215 mgmt_rp.link_type = 0x00;
7217 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
7218 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
7221 static void set_disable_threshold_complete(struct hci_dev *hdev,
7222 u8 status, u16 opcode)
7224 struct mgmt_pending_cmd *cmd;
7226 BT_DBG("status 0x%02x", status);
7230 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7234 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7235 MGMT_STATUS_SUCCESS, &status, 1);
7237 mgmt_pending_remove(cmd);
7240 hci_dev_unlock(hdev);
7243 /** Removes monitoring for a link*/
7244 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
7245 void *data, u16 len)
7248 struct hci_cp_set_rssi_threshold th = { 0, };
7249 struct mgmt_cp_disable_rssi *cp = data;
7250 struct hci_conn *conn;
7251 struct mgmt_pending_cmd *cmd;
7252 struct hci_request req;
7255 BT_DBG("Set Disable RSSI.");
7259 if (!lmp_le_capable(hdev)) {
7260 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7261 MGMT_STATUS_NOT_SUPPORTED);
7265 /* Get LE/ACL link handle info*/
7266 if (cp->link_type == 0x01)
7267 dest_type = LE_LINK;
7269 dest_type = ACL_LINK;
7271 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
7273 err = mgmt_cmd_complete(sk, hdev->id,
7274 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
7278 th.hci_le_ext_opcode = 0x0B;
7280 th.conn_handle = conn->handle;
7281 th.alert_mask = 0x00;
7283 th.in_range_th = 0x00;
7286 if (!hdev_is_powered(hdev)) {
7287 BT_DBG("%s", hdev->name);
7288 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7293 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
7294 BT_DBG("%s", hdev->name);
7295 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7300 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
7303 BT_DBG("%s", hdev->name);
7308 hci_req_init(&req, hdev);
7310 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7311 err = hci_req_run(&req, set_disable_threshold_complete);
7313 mgmt_pending_remove(cmd);
7314 BT_ERR("Error in requesting hci_req_run");
7319 hci_dev_unlock(hdev);
7324 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
7325 s8 alert_type, s8 rssi_dbm)
7327 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
7328 struct hci_conn *conn;
7330 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
7331 conn_handle, alert_type, rssi_dbm);
7333 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
7336 BT_ERR("RSSI alert Error: Device not found for handle");
7339 bacpy(&mgmt_ev.bdaddr, &conn->dst);
7341 if (conn->type == LE_LINK)
7342 mgmt_ev.link_type = 0x01;
7344 mgmt_ev.link_type = 0x00;
7346 mgmt_ev.alert_type = alert_type;
7347 mgmt_ev.rssi_dbm = rssi_dbm;
7349 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
7350 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
7354 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
7356 struct mgmt_pending_cmd *cmd;
7360 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7362 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7366 type = hdev->le_discovery.type;
7368 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7369 mgmt_status(status), &type, sizeof(type));
7370 mgmt_pending_remove(cmd);
7375 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
7378 unsigned long timeout = 0;
7380 BT_DBG("status %d", status);
7384 mgmt_start_le_discovery_failed(hdev, status);
7385 hci_dev_unlock(hdev);
7390 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
7391 hci_dev_unlock(hdev);
7393 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
7394 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
7399 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
7402 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
7403 void *data, u16 len)
7405 struct mgmt_cp_start_le_discovery *cp = data;
7406 struct mgmt_pending_cmd *cmd;
7407 struct hci_cp_le_set_scan_param param_cp;
7408 struct hci_cp_le_set_scan_enable enable_cp;
7409 struct hci_request req;
7410 u8 status, own_addr_type;
7413 BT_DBG("%s", hdev->name);
7417 if (!hdev_is_powered(hdev)) {
7418 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7419 MGMT_STATUS_NOT_POWERED);
7423 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
7424 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7429 if (cp->type != DISCOV_TYPE_LE) {
7430 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7431 MGMT_STATUS_INVALID_PARAMS);
7435 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
7441 hdev->le_discovery.type = cp->type;
7443 hci_req_init(&req, hdev);
7445 status = mgmt_le_support(hdev);
7447 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7449 mgmt_pending_remove(cmd);
7453 /* If controller is scanning, it means the background scanning
7454 * is running. Thus, we should temporarily stop it in order to
7455 * set the discovery scanning parameters.
7457 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
7458 hci_req_add_le_scan_disable(&req, false);
7460 memset(¶m_cp, 0, sizeof(param_cp));
7462 /* All active scans will be done with either a resolvable
7463 * private address (when privacy feature has been enabled)
7464 * or unresolvable private address.
7466 err = hci_update_random_address(&req, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
7468 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7469 MGMT_STATUS_FAILED);
7470 mgmt_pending_remove(cmd);
7474 param_cp.type = hdev->le_scan_type;
7475 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
7476 param_cp.window = cpu_to_le16(hdev->le_scan_window);
7477 param_cp.own_address_type = own_addr_type;
7478 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
7481 memset(&enable_cp, 0, sizeof(enable_cp));
7482 enable_cp.enable = LE_SCAN_ENABLE;
7483 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
7485 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
7488 err = hci_req_run(&req, start_le_discovery_complete);
7490 mgmt_pending_remove(cmd);
7492 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
7495 hci_dev_unlock(hdev);
7499 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
7501 struct mgmt_pending_cmd *cmd;
7504 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7508 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7509 mgmt_status(status), &hdev->le_discovery.type,
7510 sizeof(hdev->le_discovery.type));
7511 mgmt_pending_remove(cmd);
7516 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
7519 BT_DBG("status %d", status);
7524 mgmt_stop_le_discovery_failed(hdev, status);
7528 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7531 hci_dev_unlock(hdev);
7534 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
7535 void *data, u16 len)
7537 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
7538 struct mgmt_pending_cmd *cmd;
7539 struct hci_request req;
7542 BT_DBG("%s", hdev->name);
7546 if (!hci_le_discovery_active(hdev)) {
7547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7548 MGMT_STATUS_REJECTED, &mgmt_cp->type,
7549 sizeof(mgmt_cp->type));
7553 if (hdev->le_discovery.type != mgmt_cp->type) {
7554 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7555 MGMT_STATUS_INVALID_PARAMS,
7556 &mgmt_cp->type, sizeof(mgmt_cp->type));
7560 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
7566 hci_req_init(&req, hdev);
7568 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
7569 BT_DBG("unknown le discovery state %u",
7570 hdev->le_discovery.state);
7572 mgmt_pending_remove(cmd);
7573 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7574 MGMT_STATUS_FAILED, &mgmt_cp->type,
7575 sizeof(mgmt_cp->type));
7579 cancel_delayed_work(&hdev->le_scan_disable);
7580 hci_req_add_le_scan_disable(&req, false);
7582 err = hci_req_run(&req, stop_le_discovery_complete);
7584 mgmt_pending_remove(cmd);
7586 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
7589 hci_dev_unlock(hdev);
7593 /* Separate LE discovery */
7594 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
7596 struct mgmt_ev_discovering ev;
7597 struct mgmt_pending_cmd *cmd;
7599 BT_DBG("%s le discovering %u", hdev->name, discovering);
7602 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7604 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7607 u8 type = hdev->le_discovery.type;
7609 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
7611 mgmt_pending_remove(cmd);
7614 memset(&ev, 0, sizeof(ev));
7615 ev.type = hdev->le_discovery.type;
7616 ev.discovering = discovering;
7618 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7621 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
7622 void *data, u16 len)
7626 BT_DBG("%s", hdev->name);
7630 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
7632 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
7634 hci_dev_unlock(hdev);
7639 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
7644 if (min > max || min < 6 || max > 3200)
7647 if (to_multiplier < 10 || to_multiplier > 3200)
7650 if (max >= to_multiplier * 8)
7653 max_latency = (to_multiplier * 8 / max) - 1;
7655 if (latency > 499 || latency > max_latency)
7661 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
7664 struct mgmt_cp_le_conn_update *cp = data;
7666 struct hci_conn *conn;
7667 u16 min, max, latency, supervision_timeout;
7670 if (!hdev_is_powered(hdev))
7671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7672 MGMT_STATUS_NOT_POWERED);
7674 min = __le16_to_cpu(cp->conn_interval_min);
7675 max = __le16_to_cpu(cp->conn_interval_max);
7676 latency = __le16_to_cpu(cp->conn_latency);
7677 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
7679 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
7680 min, max, latency, supervision_timeout);
7682 err = check_le_conn_update_param(min, max, latency,
7683 supervision_timeout);
7686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7687 MGMT_STATUS_INVALID_PARAMS);
7691 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
7693 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7694 MGMT_STATUS_NOT_CONNECTED);
7695 hci_dev_unlock(hdev);
7699 hci_dev_unlock(hdev);
7701 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
7703 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
7707 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
7710 struct mgmt_cp_set_manufacturer_data *cp;
7711 struct mgmt_pending_cmd *cmd;
7713 BT_DBG("status 0x%02x", status);
7717 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
7724 mgmt_cmd_status(cmd->sk, hdev->id,
7725 MGMT_OP_SET_MANUFACTURER_DATA,
7726 mgmt_status(status));
7728 mgmt_cmd_complete(cmd->sk, hdev->id,
7729 MGMT_OP_SET_MANUFACTURER_DATA, 0,
7732 mgmt_pending_remove(cmd);
7735 hci_dev_unlock(hdev);
7738 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
7739 void *data, u16 len)
7741 struct mgmt_pending_cmd *cmd;
7742 struct hci_request req;
7743 struct mgmt_cp_set_manufacturer_data *cp = data;
7744 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
7748 BT_DBG("%s", hdev->name);
7750 if (!lmp_bredr_capable(hdev))
7751 return mgmt_cmd_status(sk, hdev->id,
7752 MGMT_OP_SET_MANUFACTURER_DATA,
7753 MGMT_STATUS_NOT_SUPPORTED);
7755 if (cp->data[0] == 0 ||
7756 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
7757 return mgmt_cmd_status(sk, hdev->id,
7758 MGMT_OP_SET_MANUFACTURER_DATA,
7759 MGMT_STATUS_INVALID_PARAMS);
7761 if (cp->data[1] != 0xFF)
7762 return mgmt_cmd_status(sk, hdev->id,
7763 MGMT_OP_SET_MANUFACTURER_DATA,
7764 MGMT_STATUS_NOT_SUPPORTED);
7768 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
7769 err = mgmt_cmd_status(sk, hdev->id,
7770 MGMT_OP_SET_MANUFACTURER_DATA,
7775 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
7782 hci_req_init(&req, hdev);
7784 /* if new data is same as previous data then return command
7787 if (hdev->manufacturer_len == cp->data[0] - 1 &&
7788 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
7789 mgmt_pending_remove(cmd);
7790 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
7791 0, cp, sizeof(*cp));
7796 old_len = hdev->manufacturer_len;
7798 memcpy(old_data, hdev->manufacturer_data, old_len);
7800 hdev->manufacturer_len = cp->data[0] - 1;
7801 if (hdev->manufacturer_len > 0)
7802 memcpy(hdev->manufacturer_data, cp->data + 2,
7803 hdev->manufacturer_len);
7805 __hci_req_update_eir(&req);
7807 err = hci_req_run(&req, set_manufacturer_data_complete);
7809 mgmt_pending_remove(cmd);
7814 hci_dev_unlock(hdev);
7819 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
7820 hdev->manufacturer_len = old_len;
7821 if (hdev->manufacturer_len > 0)
7822 memcpy(hdev->manufacturer_data, old_data,
7823 hdev->manufacturer_len);
7824 hci_dev_unlock(hdev);
7828 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
7829 void *data, u16 len)
7831 struct mgmt_cp_le_set_scan_params *cp = data;
7832 __u16 interval, window;
7835 BT_DBG("%s", hdev->name);
7837 if (!lmp_le_capable(hdev))
7838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7839 MGMT_STATUS_NOT_SUPPORTED);
7841 interval = __le16_to_cpu(cp->interval);
7843 if (interval < 0x0004 || interval > 0x4000)
7844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7845 MGMT_STATUS_INVALID_PARAMS);
7847 window = __le16_to_cpu(cp->window);
7849 if (window < 0x0004 || window > 0x4000)
7850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7851 MGMT_STATUS_INVALID_PARAMS);
7853 if (window > interval)
7854 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7855 MGMT_STATUS_INVALID_PARAMS);
7859 hdev->le_scan_type = cp->type;
7860 hdev->le_scan_interval = interval;
7861 hdev->le_scan_window = window;
7863 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
7866 /* If background scan is running, restart it so new parameters are
7869 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
7870 hdev->discovery.state == DISCOVERY_STOPPED) {
7871 struct hci_request req;
7873 hci_req_init(&req, hdev);
7875 hci_req_add_le_scan_disable(&req, false);
7876 hci_req_add_le_passive_scan(&req);
7878 hci_req_run(&req, NULL);
7881 hci_dev_unlock(hdev);
7886 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
7887 void *data, u16 len)
7889 struct mgmt_cp_set_voice_setting *cp = data;
7890 struct hci_conn *conn;
7891 struct hci_conn *sco_conn;
7895 BT_DBG("%s", hdev->name);
7897 if (!lmp_bredr_capable(hdev)) {
7898 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
7899 MGMT_STATUS_NOT_SUPPORTED);
7904 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
7906 err = mgmt_cmd_complete(sk, hdev->id,
7907 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
7911 conn->voice_setting = cp->voice_setting;
7912 conn->sco_role = cp->sco_role;
7914 sco_conn = hci_conn_hash_lookup_sco(hdev);
7915 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
7916 BT_ERR("There is other SCO connection.");
7920 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
7921 if (conn->voice_setting == 0x0063)
7922 sco_connect_set_wbc(hdev);
7924 sco_connect_set_nbc(hdev);
7926 if (conn->voice_setting == 0x0063)
7927 sco_connect_set_gw_wbc(hdev);
7929 sco_connect_set_gw_nbc(hdev);
7933 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
7937 hci_dev_unlock(hdev);
7941 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
7942 void *data, u16 len)
7944 struct mgmt_rp_get_adv_tx_power *rp;
7948 BT_DBG("%s", hdev->name);
7952 rp_len = sizeof(*rp);
7953 rp = kmalloc(rp_len, GFP_KERNEL);
7959 rp->adv_tx_power = hdev->adv_tx_power;
7961 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
7967 hci_dev_unlock(hdev);
7972 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
7974 struct mgmt_ev_hardware_error ev;
7976 ev.error_code = err_code;
7977 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
7980 void mgmt_tx_timeout_error(struct hci_dev *hdev)
7982 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
7985 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
7986 u8 state_change_reason, u16 connection_handle)
7988 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
7990 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
7991 adv_instance, state_change_reason, connection_handle);
7993 mgmt_ev.adv_instance = adv_instance;
7994 mgmt_ev.state_change_reason = state_change_reason;
7995 mgmt_ev.connection_handle = connection_handle;
7997 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
7998 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
8002 static int enable_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8003 void *data, u16 len)
8006 struct mgmt_cp_enable_6lowpan *cp = data;
8008 BT_DBG("%s", hdev->name);
8012 if (!hdev_is_powered(hdev)) {
8013 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8014 MGMT_STATUS_NOT_POWERED);
8018 if (!lmp_le_capable(hdev)) {
8019 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8020 MGMT_STATUS_NOT_SUPPORTED);
8024 if (cp->enable_6lowpan)
8025 bt_6lowpan_enable();
8027 bt_6lowpan_disable();
8029 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8030 MGMT_STATUS_SUCCESS, NULL, 0);
8032 hci_dev_unlock(hdev);
8036 static int connect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8037 void *data, u16 len)
8039 struct mgmt_cp_connect_6lowpan *cp = data;
8040 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
8043 BT_DBG("%s", hdev->name);
8047 if (!lmp_le_capable(hdev)) {
8048 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8049 MGMT_STATUS_NOT_SUPPORTED);
8053 if (!hdev_is_powered(hdev)) {
8054 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8055 MGMT_STATUS_REJECTED);
8059 if (bdaddr_type_is_le(cp->addr.type)) {
8060 if (cp->addr.type == BDADDR_LE_PUBLIC)
8061 addr_type = ADDR_LE_DEV_PUBLIC;
8063 addr_type = ADDR_LE_DEV_RANDOM;
8065 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8066 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
8070 hci_dev_unlock(hdev);
8072 /* 6lowpan Connect */
8073 err = _bt_6lowpan_connect(&cp->addr.bdaddr, cp->addr.type);
8078 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8079 MGMT_STATUS_REJECTED, NULL, 0);
8084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
8087 hci_dev_unlock(hdev);
8091 static int disconnect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8092 void *data, u16 len)
8094 struct mgmt_cp_disconnect_6lowpan *cp = data;
8095 struct hci_conn *conn = NULL;
8096 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
8099 BT_DBG("%s", hdev->name);
8103 if (!lmp_le_capable(hdev)) {
8104 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
8105 MGMT_STATUS_NOT_SUPPORTED);
8109 if (!hdev_is_powered(hdev)) {
8110 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
8111 MGMT_STATUS_REJECTED);
8115 if (bdaddr_type_is_le(cp->addr.type)) {
8116 if (cp->addr.type == BDADDR_LE_PUBLIC)
8117 addr_type = ADDR_LE_DEV_PUBLIC;
8119 addr_type = ADDR_LE_DEV_RANDOM;
8121 err = mgmt_cmd_complete(sk, hdev->id,
8122 MGMT_OP_DISCONNECT_6LOWPAN,
8123 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
8127 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8129 err = mgmt_cmd_complete(sk, hdev->id,
8130 MGMT_OP_DISCONNECT_6LOWPAN,
8131 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
8135 if (conn->dst_type != addr_type) {
8136 err = mgmt_cmd_complete(sk, hdev->id,
8137 MGMT_OP_DISCONNECT_6LOWPAN,
8138 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
8142 if (conn->state != BT_CONNECTED) {
8143 err = mgmt_cmd_complete(sk, hdev->id,
8144 MGMT_OP_DISCONNECT_6LOWPAN,
8145 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
8149 /* 6lowpan Disconnect */
8150 err = _bt_6lowpan_disconnect(conn->l2cap_data, cp->addr.type);
8152 err = mgmt_cmd_complete(sk, hdev->id,
8153 MGMT_OP_DISCONNECT_6LOWPAN,
8154 MGMT_STATUS_REJECTED, NULL, 0);
8158 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
8162 hci_dev_unlock(hdev);
8166 void mgmt_6lowpan_conn_changed(struct hci_dev *hdev, char if_name[16],
8167 bdaddr_t *bdaddr, u8 addr_type, bool connected)
8170 struct mgmt_ev_6lowpan_conn_state_changed *ev = (void *)buf;
8173 memset(buf, 0, sizeof(buf));
8174 bacpy(&ev->addr.bdaddr, bdaddr);
8175 ev->addr.type = addr_type;
8176 ev->connected = connected;
8177 memcpy(ev->ifname, (__u8 *)if_name, 16);
8179 ev_size = sizeof(*ev);
8181 mgmt_event(MGMT_EV_6LOWPAN_CONN_STATE_CHANGED, hdev, ev, ev_size, NULL);
8184 void mgmt_le_read_maximum_data_length_complete(struct hci_dev *hdev, u8 status)
8186 struct mgmt_pending_cmd *cmd;
8187 struct mgmt_rp_le_read_maximum_data_length rp;
8189 BT_DBG("%s status %u", hdev->name, status);
8191 cmd = pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev);
8196 mgmt_cmd_status(cmd->sk, hdev->id,
8197 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8198 mgmt_status(status));
8200 memset(&rp, 0, sizeof(rp));
8202 rp.max_tx_octets = cpu_to_le16(hdev->le_max_tx_len);
8203 rp.max_tx_time = cpu_to_le16(hdev->le_max_tx_time);
8204 rp.max_rx_octets = cpu_to_le16(hdev->le_max_rx_len);
8205 rp.max_rx_time = cpu_to_le16(hdev->le_max_rx_time);
8207 mgmt_cmd_complete(cmd->sk, hdev->id,
8208 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, 0,
8211 mgmt_pending_remove(cmd);
8214 static int read_maximum_le_data_length(struct sock *sk,
8215 struct hci_dev *hdev, void *data, u16 len)
8217 struct mgmt_pending_cmd *cmd;
8220 BT_DBG("read_maximum_le_data_length %s", hdev->name);
8224 if (!hdev_is_powered(hdev)) {
8225 err = mgmt_cmd_status(sk, hdev->id,
8226 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8227 MGMT_STATUS_NOT_POWERED);
8231 if (!lmp_le_capable(hdev)) {
8232 err = mgmt_cmd_status(sk, hdev->id,
8233 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8234 MGMT_STATUS_NOT_SUPPORTED);
8238 if (pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev)) {
8239 err = mgmt_cmd_status(sk, hdev->id,
8240 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8245 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8252 err = hci_send_cmd(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
8254 mgmt_pending_remove(cmd);
8257 hci_dev_unlock(hdev);
8261 void mgmt_le_write_host_suggested_data_length_complete(struct hci_dev *hdev,
8264 struct mgmt_pending_cmd *cmd;
8266 BT_DBG("status 0x%02x", status);
8270 cmd = pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev);
8272 BT_ERR("cmd not found in the pending list");
8277 mgmt_cmd_status(cmd->sk, hdev->id,
8278 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8279 mgmt_status(status));
8281 mgmt_cmd_complete(cmd->sk, hdev->id,
8282 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8285 mgmt_pending_remove(cmd);
8288 hci_dev_unlock(hdev);
8291 static int write_host_suggested_le_data_length(struct sock *sk,
8292 struct hci_dev *hdev, void *data, u16 len)
8294 struct mgmt_pending_cmd *cmd;
8295 struct mgmt_cp_le_write_host_suggested_data_length *cp = data;
8296 struct hci_cp_le_write_def_data_len hci_data;
8299 BT_DBG("Write host suggested data length request for %s", hdev->name);
8303 if (!hdev_is_powered(hdev)) {
8304 err = mgmt_cmd_status(sk, hdev->id,
8305 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8306 MGMT_STATUS_NOT_POWERED);
8310 if (!lmp_le_capable(hdev)) {
8311 err = mgmt_cmd_status(sk, hdev->id,
8312 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8313 MGMT_STATUS_NOT_SUPPORTED);
8317 if (pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev)) {
8318 err = mgmt_cmd_status(sk, hdev->id,
8319 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8324 cmd = mgmt_pending_add(sk, MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8331 hci_data.tx_len = cp->def_tx_octets;
8332 hci_data.tx_time = cp->def_tx_time;
8334 err = hci_send_cmd(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
8335 sizeof(hci_data), &hci_data);
8337 mgmt_pending_remove(cmd);
8340 hci_dev_unlock(hdev);
8345 void mgmt_le_read_host_suggested_data_length_complete(struct hci_dev *hdev,
8348 struct mgmt_pending_cmd *cmd;
8349 struct mgmt_rp_le_read_host_suggested_data_length rp;
8351 BT_DBG("%s status %u", hdev->name, status);
8353 cmd = pending_find(MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, hdev);
8355 BT_ERR("cmd not found in the pending list");
8360 mgmt_cmd_status(cmd->sk, hdev->id,
8361 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
8362 mgmt_status(status));
8364 memset(&rp, 0, sizeof(rp));
8366 rp.def_tx_octets = cpu_to_le16(hdev->le_def_tx_len);
8367 rp.def_tx_time = cpu_to_le16(hdev->le_def_tx_time);
8369 mgmt_cmd_complete(cmd->sk, hdev->id,
8370 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, 0,
8373 mgmt_pending_remove(cmd);
8376 static int read_host_suggested_data_length(struct sock *sk,
8377 struct hci_dev *hdev, void *data, u16 len)
8379 struct mgmt_pending_cmd *cmd;
8382 BT_DBG("read_host_suggested_data_length %s", hdev->name);
8386 if (!hdev_is_powered(hdev)) {
8387 err = mgmt_cmd_status(sk, hdev->id,
8388 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
8389 MGMT_STATUS_NOT_POWERED);
8393 if (!lmp_le_capable(hdev)) {
8394 err = mgmt_cmd_status(sk, hdev->id,
8395 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
8396 MGMT_STATUS_NOT_SUPPORTED);
8400 if (pending_find(MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, hdev)) {
8401 err = mgmt_cmd_status(sk, hdev->id,
8402 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
8407 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
8414 err = hci_send_cmd(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
8416 mgmt_pending_remove(cmd);
8419 hci_dev_unlock(hdev);
8424 static int set_le_data_length_params(struct sock *sk, struct hci_dev *hdev,
8425 void *data, u16 len)
8427 struct mgmt_cp_le_set_data_length *cp = data;
8428 struct mgmt_rp_le_set_data_length *rp;
8429 struct mgmt_pending_cmd *cmd;
8430 struct hci_conn *conn;
8432 u16 max_tx_octets, max_tx_time;
8435 BT_INFO("Set Data length for the device %s", hdev->name);
8439 rp_len = sizeof(*rp);
8440 rp = kmalloc(rp_len, GFP_KERNEL);
8446 if (!hdev_is_powered(hdev)) {
8447 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH,
8448 MGMT_STATUS_NOT_POWERED);
8452 if (!lmp_le_capable(hdev)) {
8453 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH,
8454 MGMT_STATUS_NOT_SUPPORTED);
8458 if (pending_find(MGMT_OP_LE_SET_DATA_LENGTH, hdev)) {
8459 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH,
8464 cmd = mgmt_pending_add(sk, MGMT_OP_LE_SET_DATA_LENGTH, hdev, data, len);
8470 max_tx_octets = __le16_to_cpu(cp->max_tx_octets);
8471 max_tx_time = __le16_to_cpu(cp->max_tx_time);
8473 BT_DBG("max_tx_octets 0x%4.4x max_tx_time 0x%4.4x latency",
8474 max_tx_octets, max_tx_time);
8476 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
8478 mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH,
8479 MGMT_STATUS_NOT_CONNECTED);
8483 hci_dev_unlock(hdev);
8485 err = hci_le_set_data_length(conn, max_tx_octets, max_tx_time);
8487 mgmt_pending_remove(cmd);
8489 rp->handle = conn->handle;
8493 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH, 0,
8497 hci_dev_unlock(hdev);
8502 void mgmt_le_data_length_change_complete(struct hci_dev *hdev,
8503 bdaddr_t *bdaddr, u16 tx_octets, u16 tx_time,
8504 u16 rx_octets, u16 rx_time)
8506 struct mgmt_ev_le_data_length_changed ev;
8508 bacpy(&ev.addr.bdaddr, bdaddr);
8509 ev.max_tx_octets = tx_octets;
8510 ev.max_tx_time = tx_time;
8511 ev.max_rx_octets = rx_octets;
8512 ev.max_rx_time = rx_time;
8514 mgmt_event(MGMT_EV_LE_DATA_LENGTH_CHANGED, hdev, &ev, sizeof(ev), NULL);
8516 #endif /* TIZEN_BT */
8518 static bool ltk_is_valid(struct mgmt_ltk_info *key)
8520 if (key->initiator != 0x00 && key->initiator != 0x01)
8523 switch (key->addr.type) {
8524 case BDADDR_LE_PUBLIC:
8527 case BDADDR_LE_RANDOM:
8528 /* Two most significant bits shall be set */
8529 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
8537 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
8538 void *cp_data, u16 len)
8540 struct mgmt_cp_load_long_term_keys *cp = cp_data;
8541 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
8542 sizeof(struct mgmt_ltk_info));
8543 u16 key_count, expected_len;
8546 bt_dev_dbg(hdev, "sock %p", sk);
8548 if (!lmp_le_capable(hdev))
8549 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8550 MGMT_STATUS_NOT_SUPPORTED);
8552 key_count = __le16_to_cpu(cp->key_count);
8553 if (key_count > max_key_count) {
8554 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
8556 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8557 MGMT_STATUS_INVALID_PARAMS);
8560 expected_len = struct_size(cp, keys, key_count);
8561 if (expected_len != len) {
8562 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
8564 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8565 MGMT_STATUS_INVALID_PARAMS);
8568 bt_dev_dbg(hdev, "key_count %u", key_count);
8570 for (i = 0; i < key_count; i++) {
8571 struct mgmt_ltk_info *key = &cp->keys[i];
8573 if (!ltk_is_valid(key))
8574 return mgmt_cmd_status(sk, hdev->id,
8575 MGMT_OP_LOAD_LONG_TERM_KEYS,
8576 MGMT_STATUS_INVALID_PARAMS);
8581 hci_smp_ltks_clear(hdev);
8583 for (i = 0; i < key_count; i++) {
8584 struct mgmt_ltk_info *key = &cp->keys[i];
8585 u8 type, authenticated;
8587 if (hci_is_blocked_key(hdev,
8588 HCI_BLOCKED_KEY_TYPE_LTK,
8590 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
8595 switch (key->type) {
8596 case MGMT_LTK_UNAUTHENTICATED:
8597 authenticated = 0x00;
8598 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8600 case MGMT_LTK_AUTHENTICATED:
8601 authenticated = 0x01;
8602 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8604 case MGMT_LTK_P256_UNAUTH:
8605 authenticated = 0x00;
8606 type = SMP_LTK_P256;
8608 case MGMT_LTK_P256_AUTH:
8609 authenticated = 0x01;
8610 type = SMP_LTK_P256;
8612 case MGMT_LTK_P256_DEBUG:
8613 authenticated = 0x00;
8614 type = SMP_LTK_P256_DEBUG;
8620 hci_add_ltk(hdev, &key->addr.bdaddr,
8621 le_addr_type(key->addr.type), type, authenticated,
8622 key->val, key->enc_size, key->ediv, key->rand);
8625 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
8628 hci_dev_unlock(hdev);
8633 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8635 struct hci_conn *conn = cmd->user_data;
8636 struct mgmt_rp_get_conn_info rp;
8639 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8641 if (status == MGMT_STATUS_SUCCESS) {
8642 rp.rssi = conn->rssi;
8643 rp.tx_power = conn->tx_power;
8644 rp.max_tx_power = conn->max_tx_power;
8646 rp.rssi = HCI_RSSI_INVALID;
8647 rp.tx_power = HCI_TX_POWER_INVALID;
8648 rp.max_tx_power = HCI_TX_POWER_INVALID;
8651 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
8652 status, &rp, sizeof(rp));
8654 hci_conn_drop(conn);
8660 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
8663 struct hci_cp_read_rssi *cp;
8664 struct mgmt_pending_cmd *cmd;
8665 struct hci_conn *conn;
8669 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
8673 /* Commands sent in request are either Read RSSI or Read Transmit Power
8674 * Level so we check which one was last sent to retrieve connection
8675 * handle. Both commands have handle as first parameter so it's safe to
8676 * cast data on the same command struct.
8678 * First command sent is always Read RSSI and we fail only if it fails.
8679 * In other case we simply override error to indicate success as we
8680 * already remembered if TX power value is actually valid.
8682 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
8684 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
8685 status = MGMT_STATUS_SUCCESS;
8687 status = mgmt_status(hci_status);
8691 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
8695 handle = __le16_to_cpu(cp->handle);
8696 conn = hci_conn_hash_lookup_handle(hdev, handle);
8698 bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
8703 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
8707 cmd->cmd_complete(cmd, status);
8708 mgmt_pending_remove(cmd);
8711 hci_dev_unlock(hdev);
8714 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
8717 struct mgmt_cp_get_conn_info *cp = data;
8718 struct mgmt_rp_get_conn_info rp;
8719 struct hci_conn *conn;
8720 unsigned long conn_info_age;
8723 bt_dev_dbg(hdev, "sock %p", sk);
8725 memset(&rp, 0, sizeof(rp));
8726 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8727 rp.addr.type = cp->addr.type;
8729 if (!bdaddr_type_is_valid(cp->addr.type))
8730 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8731 MGMT_STATUS_INVALID_PARAMS,
8736 if (!hdev_is_powered(hdev)) {
8737 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8738 MGMT_STATUS_NOT_POWERED, &rp,
8743 if (cp->addr.type == BDADDR_BREDR)
8744 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8747 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8749 if (!conn || conn->state != BT_CONNECTED) {
8750 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8751 MGMT_STATUS_NOT_CONNECTED, &rp,
8756 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
8757 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8758 MGMT_STATUS_BUSY, &rp, sizeof(rp));
8762 /* To avoid client trying to guess when to poll again for information we
8763 * calculate conn info age as random value between min/max set in hdev.
8765 conn_info_age = hdev->conn_info_min_age +
8766 prandom_u32_max(hdev->conn_info_max_age -
8767 hdev->conn_info_min_age);
8769 /* Query controller to refresh cached values if they are too old or were
8772 if (time_after(jiffies, conn->conn_info_timestamp +
8773 msecs_to_jiffies(conn_info_age)) ||
8774 !conn->conn_info_timestamp) {
8775 struct hci_request req;
8776 struct hci_cp_read_tx_power req_txp_cp;
8777 struct hci_cp_read_rssi req_rssi_cp;
8778 struct mgmt_pending_cmd *cmd;
8780 hci_req_init(&req, hdev);
8781 req_rssi_cp.handle = cpu_to_le16(conn->handle);
8782 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
8785 /* For LE links TX power does not change thus we don't need to
8786 * query for it once value is known.
8788 if (!bdaddr_type_is_le(cp->addr.type) ||
8789 conn->tx_power == HCI_TX_POWER_INVALID) {
8790 req_txp_cp.handle = cpu_to_le16(conn->handle);
8791 req_txp_cp.type = 0x00;
8792 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8793 sizeof(req_txp_cp), &req_txp_cp);
8796 /* Max TX power needs to be read only once per connection */
8797 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
8798 req_txp_cp.handle = cpu_to_le16(conn->handle);
8799 req_txp_cp.type = 0x01;
8800 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8801 sizeof(req_txp_cp), &req_txp_cp);
8804 err = hci_req_run(&req, conn_info_refresh_complete);
8808 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
8815 hci_conn_hold(conn);
8816 cmd->user_data = hci_conn_get(conn);
8817 cmd->cmd_complete = conn_info_cmd_complete;
8819 conn->conn_info_timestamp = jiffies;
8821 /* Cache is valid, just reply with values cached in hci_conn */
8822 rp.rssi = conn->rssi;
8823 rp.tx_power = conn->tx_power;
8824 rp.max_tx_power = conn->max_tx_power;
8826 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8827 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8831 hci_dev_unlock(hdev);
8835 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8837 struct hci_conn *conn = cmd->user_data;
8838 struct mgmt_rp_get_clock_info rp;
8839 struct hci_dev *hdev;
8842 memset(&rp, 0, sizeof(rp));
8843 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8848 hdev = hci_dev_get(cmd->index);
8850 rp.local_clock = cpu_to_le32(hdev->clock);
8855 rp.piconet_clock = cpu_to_le32(conn->clock);
8856 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
8860 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
8864 hci_conn_drop(conn);
8871 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8873 struct hci_cp_read_clock *hci_cp;
8874 struct mgmt_pending_cmd *cmd;
8875 struct hci_conn *conn;
8877 bt_dev_dbg(hdev, "status %u", status);
8881 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
8885 if (hci_cp->which) {
8886 u16 handle = __le16_to_cpu(hci_cp->handle);
8887 conn = hci_conn_hash_lookup_handle(hdev, handle);
8892 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
8896 cmd->cmd_complete(cmd, mgmt_status(status));
8897 mgmt_pending_remove(cmd);
8900 hci_dev_unlock(hdev);
8903 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
8906 struct mgmt_cp_get_clock_info *cp = data;
8907 struct mgmt_rp_get_clock_info rp;
8908 struct hci_cp_read_clock hci_cp;
8909 struct mgmt_pending_cmd *cmd;
8910 struct hci_request req;
8911 struct hci_conn *conn;
8914 bt_dev_dbg(hdev, "sock %p", sk);
8916 memset(&rp, 0, sizeof(rp));
8917 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8918 rp.addr.type = cp->addr.type;
8920 if (cp->addr.type != BDADDR_BREDR)
8921 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8922 MGMT_STATUS_INVALID_PARAMS,
8927 if (!hdev_is_powered(hdev)) {
8928 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8929 MGMT_STATUS_NOT_POWERED, &rp,
8934 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8935 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8937 if (!conn || conn->state != BT_CONNECTED) {
8938 err = mgmt_cmd_complete(sk, hdev->id,
8939 MGMT_OP_GET_CLOCK_INFO,
8940 MGMT_STATUS_NOT_CONNECTED,
8948 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
8954 cmd->cmd_complete = clock_info_cmd_complete;
8956 hci_req_init(&req, hdev);
8958 memset(&hci_cp, 0, sizeof(hci_cp));
8959 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8962 hci_conn_hold(conn);
8963 cmd->user_data = hci_conn_get(conn);
8965 hci_cp.handle = cpu_to_le16(conn->handle);
8966 hci_cp.which = 0x01; /* Piconet clock */
8967 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8970 err = hci_req_run(&req, get_clock_info_complete);
8972 mgmt_pending_remove(cmd);
8975 hci_dev_unlock(hdev);
8979 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
8981 struct hci_conn *conn;
8983 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
8987 if (conn->dst_type != type)
8990 if (conn->state != BT_CONNECTED)
8996 /* This function requires the caller holds hdev->lock */
8997 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
8998 u8 addr_type, u8 auto_connect)
9000 struct hci_conn_params *params;
9002 params = hci_conn_params_add(hdev, addr, addr_type);
9006 if (params->auto_connect == auto_connect)
9009 list_del_init(¶ms->action);
9011 switch (auto_connect) {
9012 case HCI_AUTO_CONN_DISABLED:
9013 case HCI_AUTO_CONN_LINK_LOSS:
9014 /* If auto connect is being disabled when we're trying to
9015 * connect to device, keep connecting.
9017 if (params->explicit_connect)
9018 list_add(¶ms->action, &hdev->pend_le_conns);
9020 case HCI_AUTO_CONN_REPORT:
9021 if (params->explicit_connect)
9022 list_add(¶ms->action, &hdev->pend_le_conns);
9024 list_add(¶ms->action, &hdev->pend_le_reports);
9026 case HCI_AUTO_CONN_DIRECT:
9027 case HCI_AUTO_CONN_ALWAYS:
9028 if (!is_connected(hdev, addr, addr_type))
9029 list_add(¶ms->action, &hdev->pend_le_conns);
9033 params->auto_connect = auto_connect;
9035 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
9036 addr, addr_type, auto_connect);
9041 static void device_added(struct sock *sk, struct hci_dev *hdev,
9042 bdaddr_t *bdaddr, u8 type, u8 action)
9044 struct mgmt_ev_device_added ev;
9046 bacpy(&ev.addr.bdaddr, bdaddr);
9047 ev.addr.type = type;
9050 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
9053 static int add_device(struct sock *sk, struct hci_dev *hdev,
9054 void *data, u16 len)
9056 struct mgmt_cp_add_device *cp = data;
9057 u8 auto_conn, addr_type;
9058 struct hci_conn_params *params;
9060 u32 current_flags = 0;
9062 bt_dev_dbg(hdev, "sock %p", sk);
9064 if (!bdaddr_type_is_valid(cp->addr.type) ||
9065 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
9066 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9067 MGMT_STATUS_INVALID_PARAMS,
9068 &cp->addr, sizeof(cp->addr));
9070 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
9071 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9072 MGMT_STATUS_INVALID_PARAMS,
9073 &cp->addr, sizeof(cp->addr));
9077 if (cp->addr.type == BDADDR_BREDR) {
9078 /* Only incoming connections action is supported for now */
9079 if (cp->action != 0x01) {
9080 err = mgmt_cmd_complete(sk, hdev->id,
9082 MGMT_STATUS_INVALID_PARAMS,
9083 &cp->addr, sizeof(cp->addr));
9087 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
9093 hci_req_update_scan(hdev);
9098 addr_type = le_addr_type(cp->addr.type);
9100 if (cp->action == 0x02)
9101 auto_conn = HCI_AUTO_CONN_ALWAYS;
9102 else if (cp->action == 0x01)
9103 auto_conn = HCI_AUTO_CONN_DIRECT;
9105 auto_conn = HCI_AUTO_CONN_REPORT;
9107 /* Kernel internally uses conn_params with resolvable private
9108 * address, but Add Device allows only identity addresses.
9109 * Make sure it is enforced before calling
9110 * hci_conn_params_lookup.
9112 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9113 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9114 MGMT_STATUS_INVALID_PARAMS,
9115 &cp->addr, sizeof(cp->addr));
9119 /* If the connection parameters don't exist for this device,
9120 * they will be created and configured with defaults.
9122 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
9124 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9125 MGMT_STATUS_FAILED, &cp->addr,
9129 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9132 current_flags = params->current_flags;
9135 hci_update_background_scan(hdev);
9138 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
9139 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
9140 SUPPORTED_DEVICE_FLAGS(), current_flags);
9142 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9143 MGMT_STATUS_SUCCESS, &cp->addr,
9147 hci_dev_unlock(hdev);
9151 static void device_removed(struct sock *sk, struct hci_dev *hdev,
9152 bdaddr_t *bdaddr, u8 type)
9154 struct mgmt_ev_device_removed ev;
9156 bacpy(&ev.addr.bdaddr, bdaddr);
9157 ev.addr.type = type;
9159 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
9162 static int remove_device(struct sock *sk, struct hci_dev *hdev,
9163 void *data, u16 len)
9165 struct mgmt_cp_remove_device *cp = data;
9168 bt_dev_dbg(hdev, "sock %p", sk);
9172 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9173 struct hci_conn_params *params;
9176 if (!bdaddr_type_is_valid(cp->addr.type)) {
9177 err = mgmt_cmd_complete(sk, hdev->id,
9178 MGMT_OP_REMOVE_DEVICE,
9179 MGMT_STATUS_INVALID_PARAMS,
9180 &cp->addr, sizeof(cp->addr));
9184 if (cp->addr.type == BDADDR_BREDR) {
9185 err = hci_bdaddr_list_del(&hdev->accept_list,
9189 err = mgmt_cmd_complete(sk, hdev->id,
9190 MGMT_OP_REMOVE_DEVICE,
9191 MGMT_STATUS_INVALID_PARAMS,
9197 hci_req_update_scan(hdev);
9199 device_removed(sk, hdev, &cp->addr.bdaddr,
9204 addr_type = le_addr_type(cp->addr.type);
9206 /* Kernel internally uses conn_params with resolvable private
9207 * address, but Remove Device allows only identity addresses.
9208 * Make sure it is enforced before calling
9209 * hci_conn_params_lookup.
9211 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9212 err = mgmt_cmd_complete(sk, hdev->id,
9213 MGMT_OP_REMOVE_DEVICE,
9214 MGMT_STATUS_INVALID_PARAMS,
9215 &cp->addr, sizeof(cp->addr));
9219 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9222 err = mgmt_cmd_complete(sk, hdev->id,
9223 MGMT_OP_REMOVE_DEVICE,
9224 MGMT_STATUS_INVALID_PARAMS,
9225 &cp->addr, sizeof(cp->addr));
9229 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
9230 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
9231 err = mgmt_cmd_complete(sk, hdev->id,
9232 MGMT_OP_REMOVE_DEVICE,
9233 MGMT_STATUS_INVALID_PARAMS,
9234 &cp->addr, sizeof(cp->addr));
9238 list_del(¶ms->action);
9239 list_del(¶ms->list);
9241 hci_update_background_scan(hdev);
9243 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
9245 struct hci_conn_params *p, *tmp;
9246 struct bdaddr_list *b, *btmp;
9248 if (cp->addr.type) {
9249 err = mgmt_cmd_complete(sk, hdev->id,
9250 MGMT_OP_REMOVE_DEVICE,
9251 MGMT_STATUS_INVALID_PARAMS,
9252 &cp->addr, sizeof(cp->addr));
9256 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
9257 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
9262 hci_req_update_scan(hdev);
9264 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
9265 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
9267 device_removed(sk, hdev, &p->addr, p->addr_type);
9268 if (p->explicit_connect) {
9269 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
9272 list_del(&p->action);
9277 bt_dev_dbg(hdev, "All LE connection parameters were removed");
9279 hci_update_background_scan(hdev);
9283 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
9284 MGMT_STATUS_SUCCESS, &cp->addr,
9287 hci_dev_unlock(hdev);
9291 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
9294 struct mgmt_cp_load_conn_param *cp = data;
9295 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
9296 sizeof(struct mgmt_conn_param));
9297 u16 param_count, expected_len;
9300 if (!lmp_le_capable(hdev))
9301 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9302 MGMT_STATUS_NOT_SUPPORTED);
9304 param_count = __le16_to_cpu(cp->param_count);
9305 if (param_count > max_param_count) {
9306 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
9308 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9309 MGMT_STATUS_INVALID_PARAMS);
9312 expected_len = struct_size(cp, params, param_count);
9313 if (expected_len != len) {
9314 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
9316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9317 MGMT_STATUS_INVALID_PARAMS);
9320 bt_dev_dbg(hdev, "param_count %u", param_count);
9324 hci_conn_params_clear_disabled(hdev);
9326 for (i = 0; i < param_count; i++) {
9327 struct mgmt_conn_param *param = &cp->params[i];
9328 struct hci_conn_params *hci_param;
9329 u16 min, max, latency, timeout;
9332 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
9335 if (param->addr.type == BDADDR_LE_PUBLIC) {
9336 addr_type = ADDR_LE_DEV_PUBLIC;
9337 } else if (param->addr.type == BDADDR_LE_RANDOM) {
9338 addr_type = ADDR_LE_DEV_RANDOM;
9340 bt_dev_err(hdev, "ignoring invalid connection parameters");
9344 min = le16_to_cpu(param->min_interval);
9345 max = le16_to_cpu(param->max_interval);
9346 latency = le16_to_cpu(param->latency);
9347 timeout = le16_to_cpu(param->timeout);
9349 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
9350 min, max, latency, timeout);
9352 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
9353 bt_dev_err(hdev, "ignoring invalid connection parameters");
9357 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
9360 bt_dev_err(hdev, "failed to add connection parameters");
9364 hci_param->conn_min_interval = min;
9365 hci_param->conn_max_interval = max;
9366 hci_param->conn_latency = latency;
9367 hci_param->supervision_timeout = timeout;
9370 hci_dev_unlock(hdev);
9372 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
9376 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
9377 void *data, u16 len)
9379 struct mgmt_cp_set_external_config *cp = data;
9383 bt_dev_dbg(hdev, "sock %p", sk);
9385 if (hdev_is_powered(hdev))
9386 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9387 MGMT_STATUS_REJECTED);
9389 if (cp->config != 0x00 && cp->config != 0x01)
9390 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9391 MGMT_STATUS_INVALID_PARAMS);
9393 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
9394 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9395 MGMT_STATUS_NOT_SUPPORTED);
9400 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
9402 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
9404 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
9411 err = new_options(hdev, sk);
9413 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
9414 mgmt_index_removed(hdev);
9416 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
9417 hci_dev_set_flag(hdev, HCI_CONFIG);
9418 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9420 queue_work(hdev->req_workqueue, &hdev->power_on);
9422 set_bit(HCI_RAW, &hdev->flags);
9423 mgmt_index_added(hdev);
9428 hci_dev_unlock(hdev);
9432 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
9433 void *data, u16 len)
9435 struct mgmt_cp_set_public_address *cp = data;
9439 bt_dev_dbg(hdev, "sock %p", sk);
9441 if (hdev_is_powered(hdev))
9442 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9443 MGMT_STATUS_REJECTED);
9445 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
9446 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9447 MGMT_STATUS_INVALID_PARAMS);
9449 if (!hdev->set_bdaddr)
9450 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9451 MGMT_STATUS_NOT_SUPPORTED);
9455 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
9456 bacpy(&hdev->public_addr, &cp->bdaddr);
9458 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
9465 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
9466 err = new_options(hdev, sk);
9468 if (is_configured(hdev)) {
9469 mgmt_index_removed(hdev);
9471 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
9473 hci_dev_set_flag(hdev, HCI_CONFIG);
9474 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9476 queue_work(hdev->req_workqueue, &hdev->power_on);
9480 hci_dev_unlock(hdev);
9485 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
9489 struct mgmt_ev_device_name_update *ev = (void *)buf;
9495 bacpy(&ev->addr.bdaddr, bdaddr);
9496 ev->addr.type = BDADDR_BREDR;
9498 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9501 ev->eir_len = cpu_to_le16(eir_len);
9503 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
9504 sizeof(*ev) + eir_len, NULL);
9507 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9508 u8 link_type, u8 addr_type, u8 status)
9510 struct mgmt_ev_conn_update_failed ev;
9512 bacpy(&ev.addr.bdaddr, bdaddr);
9513 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9516 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
9517 &ev, sizeof(ev), NULL);
9520 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
9521 u8 link_type, u8 addr_type, u16 conn_interval,
9522 u16 conn_latency, u16 supervision_timeout)
9524 struct mgmt_ev_conn_updated ev;
9526 bacpy(&ev.addr.bdaddr, bdaddr);
9527 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9528 ev.conn_interval = cpu_to_le16(conn_interval);
9529 ev.conn_latency = cpu_to_le16(conn_latency);
9530 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
9532 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
9533 &ev, sizeof(ev), NULL);
9536 /* le device found event - Pass adv type */
9537 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9538 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
9539 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
9542 struct mgmt_ev_le_device_found *ev = (void *)buf;
9545 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
9548 /* Make sure that the buffer is big enough. The 5 extra bytes
9549 * are for the potential CoD field.
9551 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9554 memset(buf, 0, sizeof(buf));
9556 bacpy(&ev->addr.bdaddr, bdaddr);
9557 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9559 ev->flags = cpu_to_le32(flags);
9560 ev->adv_type = adv_type;
9563 memcpy(ev->eir, eir, eir_len);
9565 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
9566 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9569 if (scan_rsp_len > 0)
9570 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9572 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9573 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9575 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9579 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
9580 u16 opcode, struct sk_buff *skb)
9582 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
9583 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
9584 u8 *h192, *r192, *h256, *r256;
9585 struct mgmt_pending_cmd *cmd;
9589 bt_dev_dbg(hdev, "status %u", status);
9591 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
9595 mgmt_cp = cmd->param;
9598 status = mgmt_status(status);
9605 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
9606 struct hci_rp_read_local_oob_data *rp;
9608 if (skb->len != sizeof(*rp)) {
9609 status = MGMT_STATUS_FAILED;
9612 status = MGMT_STATUS_SUCCESS;
9613 rp = (void *)skb->data;
9615 eir_len = 5 + 18 + 18;
9622 struct hci_rp_read_local_oob_ext_data *rp;
9624 if (skb->len != sizeof(*rp)) {
9625 status = MGMT_STATUS_FAILED;
9628 status = MGMT_STATUS_SUCCESS;
9629 rp = (void *)skb->data;
9631 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
9632 eir_len = 5 + 18 + 18;
9636 eir_len = 5 + 18 + 18 + 18 + 18;
9646 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
9653 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
9654 hdev->dev_class, 3);
9657 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9658 EIR_SSP_HASH_C192, h192, 16);
9659 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9660 EIR_SSP_RAND_R192, r192, 16);
9664 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9665 EIR_SSP_HASH_C256, h256, 16);
9666 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9667 EIR_SSP_RAND_R256, r256, 16);
9671 mgmt_rp->type = mgmt_cp->type;
9672 mgmt_rp->eir_len = cpu_to_le16(eir_len);
9674 err = mgmt_cmd_complete(cmd->sk, hdev->id,
9675 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
9676 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
9677 if (err < 0 || status)
9680 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
9682 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9683 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
9684 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
9687 mgmt_pending_remove(cmd);
9690 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
9691 struct mgmt_cp_read_local_oob_ext_data *cp)
9693 struct mgmt_pending_cmd *cmd;
9694 struct hci_request req;
9697 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
9702 hci_req_init(&req, hdev);
9704 if (bredr_sc_enabled(hdev))
9705 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
9707 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
9709 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
9711 mgmt_pending_remove(cmd);
9718 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
9719 void *data, u16 data_len)
9721 struct mgmt_cp_read_local_oob_ext_data *cp = data;
9722 struct mgmt_rp_read_local_oob_ext_data *rp;
9725 u8 status, flags, role, addr[7], hash[16], rand[16];
9728 bt_dev_dbg(hdev, "sock %p", sk);
9730 if (hdev_is_powered(hdev)) {
9732 case BIT(BDADDR_BREDR):
9733 status = mgmt_bredr_support(hdev);
9739 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9740 status = mgmt_le_support(hdev);
9744 eir_len = 9 + 3 + 18 + 18 + 3;
9747 status = MGMT_STATUS_INVALID_PARAMS;
9752 status = MGMT_STATUS_NOT_POWERED;
9756 rp_len = sizeof(*rp) + eir_len;
9757 rp = kmalloc(rp_len, GFP_ATOMIC);
9768 case BIT(BDADDR_BREDR):
9769 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9770 err = read_local_ssp_oob_req(hdev, sk, cp);
9771 hci_dev_unlock(hdev);
9775 status = MGMT_STATUS_FAILED;
9778 eir_len = eir_append_data(rp->eir, eir_len,
9780 hdev->dev_class, 3);
9783 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9784 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
9785 smp_generate_oob(hdev, hash, rand) < 0) {
9786 hci_dev_unlock(hdev);
9787 status = MGMT_STATUS_FAILED;
9791 /* This should return the active RPA, but since the RPA
9792 * is only programmed on demand, it is really hard to fill
9793 * this in at the moment. For now disallow retrieving
9794 * local out-of-band data when privacy is in use.
9796 * Returning the identity address will not help here since
9797 * pairing happens before the identity resolving key is
9798 * known and thus the connection establishment happens
9799 * based on the RPA and not the identity address.
9801 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
9802 hci_dev_unlock(hdev);
9803 status = MGMT_STATUS_REJECTED;
9807 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
9808 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
9809 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
9810 bacmp(&hdev->static_addr, BDADDR_ANY))) {
9811 memcpy(addr, &hdev->static_addr, 6);
9814 memcpy(addr, &hdev->bdaddr, 6);
9818 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
9819 addr, sizeof(addr));
9821 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
9826 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
9827 &role, sizeof(role));
9829 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
9830 eir_len = eir_append_data(rp->eir, eir_len,
9832 hash, sizeof(hash));
9834 eir_len = eir_append_data(rp->eir, eir_len,
9836 rand, sizeof(rand));
9839 flags = mgmt_get_adv_discov_flags(hdev);
9841 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
9842 flags |= LE_AD_NO_BREDR;
9844 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
9845 &flags, sizeof(flags));
9849 hci_dev_unlock(hdev);
9851 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
9853 status = MGMT_STATUS_SUCCESS;
9856 rp->type = cp->type;
9857 rp->eir_len = cpu_to_le16(eir_len);
9859 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
9860 status, rp, sizeof(*rp) + eir_len);
9861 if (err < 0 || status)
9864 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9865 rp, sizeof(*rp) + eir_len,
9866 HCI_MGMT_OOB_DATA_EVENTS, sk);
9874 static u32 get_supported_adv_flags(struct hci_dev *hdev)
9878 flags |= MGMT_ADV_FLAG_CONNECTABLE;
9879 flags |= MGMT_ADV_FLAG_DISCOV;
9880 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
9881 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
9882 flags |= MGMT_ADV_FLAG_APPEARANCE;
9883 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
9884 flags |= MGMT_ADV_PARAM_DURATION;
9885 flags |= MGMT_ADV_PARAM_TIMEOUT;
9886 flags |= MGMT_ADV_PARAM_INTERVALS;
9887 flags |= MGMT_ADV_PARAM_TX_POWER;
9888 flags |= MGMT_ADV_PARAM_SCAN_RSP;
9890 /* In extended adv TX_POWER returned from Set Adv Param
9891 * will be always valid.
9893 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
9894 ext_adv_capable(hdev))
9895 flags |= MGMT_ADV_FLAG_TX_POWER;
9897 if (ext_adv_capable(hdev)) {
9898 flags |= MGMT_ADV_FLAG_SEC_1M;
9899 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
9900 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
9902 if (hdev->le_features[1] & HCI_LE_PHY_2M)
9903 flags |= MGMT_ADV_FLAG_SEC_2M;
9905 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
9906 flags |= MGMT_ADV_FLAG_SEC_CODED;
9912 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
9913 void *data, u16 data_len)
9915 struct mgmt_rp_read_adv_features *rp;
9918 struct adv_info *adv_instance;
9919 u32 supported_flags;
9922 bt_dev_dbg(hdev, "sock %p", sk);
9924 if (!lmp_le_capable(hdev))
9925 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9926 MGMT_STATUS_REJECTED);
9928 /* Enabling the experimental LL Privay support disables support for
9931 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9932 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9933 MGMT_STATUS_NOT_SUPPORTED);
9937 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
9938 rp = kmalloc(rp_len, GFP_ATOMIC);
9940 hci_dev_unlock(hdev);
9944 supported_flags = get_supported_adv_flags(hdev);
9946 rp->supported_flags = cpu_to_le32(supported_flags);
9947 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
9948 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
9949 rp->max_instances = hdev->le_num_of_adv_sets;
9950 rp->num_instances = hdev->adv_instance_cnt;
9952 instance = rp->instance;
9953 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
9954 *instance = adv_instance->instance;
9958 hci_dev_unlock(hdev);
9960 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9961 MGMT_STATUS_SUCCESS, rp, rp_len);
9968 static u8 calculate_name_len(struct hci_dev *hdev)
9970 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
9972 return append_local_name(hdev, buf, 0);
9975 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
9978 u8 max_len = HCI_MAX_AD_LENGTH;
9981 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
9982 MGMT_ADV_FLAG_LIMITED_DISCOV |
9983 MGMT_ADV_FLAG_MANAGED_FLAGS))
9986 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
9989 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
9990 max_len -= calculate_name_len(hdev);
9992 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
9999 static bool flags_managed(u32 adv_flags)
10001 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
10002 MGMT_ADV_FLAG_LIMITED_DISCOV |
10003 MGMT_ADV_FLAG_MANAGED_FLAGS);
10006 static bool tx_power_managed(u32 adv_flags)
10008 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
10011 static bool name_managed(u32 adv_flags)
10013 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
10016 static bool appearance_managed(u32 adv_flags)
10018 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
10021 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
10022 u8 len, bool is_adv_data)
10027 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
10032 /* Make sure that the data is correctly formatted. */
10033 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
10039 if (data[i + 1] == EIR_FLAGS &&
10040 (!is_adv_data || flags_managed(adv_flags)))
10043 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
10046 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
10049 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
10052 if (data[i + 1] == EIR_APPEARANCE &&
10053 appearance_managed(adv_flags))
10056 /* If the current field length would exceed the total data
10057 * length, then it's invalid.
10059 if (i + cur_len >= len)
10066 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
10068 u32 supported_flags, phy_flags;
10070 /* The current implementation only supports a subset of the specified
10071 * flags. Also need to check mutual exclusiveness of sec flags.
10073 supported_flags = get_supported_adv_flags(hdev);
10074 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
10075 if (adv_flags & ~supported_flags ||
10076 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
10082 static bool adv_busy(struct hci_dev *hdev)
10084 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
10085 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
10086 pending_find(MGMT_OP_SET_LE, hdev) ||
10087 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
10088 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
10091 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
10094 struct mgmt_pending_cmd *cmd;
10095 struct mgmt_cp_add_advertising *cp;
10096 struct mgmt_rp_add_advertising rp;
10097 struct adv_info *adv_instance, *n;
10100 bt_dev_dbg(hdev, "status %u", status);
10102 hci_dev_lock(hdev);
10104 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
10106 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
10108 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
10109 if (!adv_instance->pending)
10113 adv_instance->pending = false;
10117 instance = adv_instance->instance;
10119 if (hdev->cur_adv_instance == instance)
10120 cancel_adv_timeout(hdev);
10122 hci_remove_adv_instance(hdev, instance);
10123 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
10130 rp.instance = cp->instance;
10133 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10134 mgmt_status(status));
10136 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10137 mgmt_status(status), &rp, sizeof(rp));
10139 mgmt_pending_remove(cmd);
10142 hci_dev_unlock(hdev);
10145 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
10146 void *data, u16 data_len)
10148 struct mgmt_cp_add_advertising *cp = data;
10149 struct mgmt_rp_add_advertising rp;
10152 u16 timeout, duration;
10153 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
10154 u8 schedule_instance = 0;
10155 struct adv_info *next_instance;
10157 struct mgmt_pending_cmd *cmd;
10158 struct hci_request req;
10160 bt_dev_dbg(hdev, "sock %p", sk);
10162 status = mgmt_le_support(hdev);
10164 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10167 /* Enabling the experimental LL Privay support disables support for
10170 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
10171 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10172 MGMT_STATUS_NOT_SUPPORTED);
10174 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10175 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10176 MGMT_STATUS_INVALID_PARAMS);
10178 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
10179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10180 MGMT_STATUS_INVALID_PARAMS);
10182 flags = __le32_to_cpu(cp->flags);
10183 timeout = __le16_to_cpu(cp->timeout);
10184 duration = __le16_to_cpu(cp->duration);
10186 if (!requested_adv_flags_are_valid(hdev, flags))
10187 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10188 MGMT_STATUS_INVALID_PARAMS);
10190 hci_dev_lock(hdev);
10192 if (timeout && !hdev_is_powered(hdev)) {
10193 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10194 MGMT_STATUS_REJECTED);
10198 if (adv_busy(hdev)) {
10199 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10204 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
10205 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
10206 cp->scan_rsp_len, false)) {
10207 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10208 MGMT_STATUS_INVALID_PARAMS);
10212 err = hci_add_adv_instance(hdev, cp->instance, flags,
10213 cp->adv_data_len, cp->data,
10215 cp->data + cp->adv_data_len,
10217 HCI_ADV_TX_POWER_NO_PREFERENCE,
10218 hdev->le_adv_min_interval,
10219 hdev->le_adv_max_interval);
10221 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10222 MGMT_STATUS_FAILED);
10226 /* Only trigger an advertising added event if a new instance was
10229 if (hdev->adv_instance_cnt > prev_instance_cnt)
10230 mgmt_advertising_added(sk, hdev, cp->instance);
10232 if (hdev->cur_adv_instance == cp->instance) {
10233 /* If the currently advertised instance is being changed then
10234 * cancel the current advertising and schedule the next
10235 * instance. If there is only one instance then the overridden
10236 * advertising data will be visible right away.
10238 cancel_adv_timeout(hdev);
10240 next_instance = hci_get_next_instance(hdev, cp->instance);
10242 schedule_instance = next_instance->instance;
10243 } else if (!hdev->adv_instance_timeout) {
10244 /* Immediately advertise the new instance if no other
10245 * instance is currently being advertised.
10247 schedule_instance = cp->instance;
10250 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
10251 * there is no instance to be advertised then we have no HCI
10252 * communication to make. Simply return.
10254 if (!hdev_is_powered(hdev) ||
10255 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10256 !schedule_instance) {
10257 rp.instance = cp->instance;
10258 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10259 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10263 /* We're good to go, update advertising data, parameters, and start
10266 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
10273 hci_req_init(&req, hdev);
10275 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
10278 err = hci_req_run(&req, add_advertising_complete);
10281 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10282 MGMT_STATUS_FAILED);
10283 mgmt_pending_remove(cmd);
10287 hci_dev_unlock(hdev);
10292 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
10295 struct mgmt_pending_cmd *cmd;
10296 struct mgmt_cp_add_ext_adv_params *cp;
10297 struct mgmt_rp_add_ext_adv_params rp;
10298 struct adv_info *adv_instance;
10301 BT_DBG("%s", hdev->name);
10303 hci_dev_lock(hdev);
10305 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
10310 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10314 rp.instance = cp->instance;
10315 rp.tx_power = adv_instance->tx_power;
10317 /* While we're at it, inform userspace of the available space for this
10318 * advertisement, given the flags that will be used.
10320 flags = __le32_to_cpu(cp->flags);
10321 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10322 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10325 /* If this advertisement was previously advertising and we
10326 * failed to update it, we signal that it has been removed and
10327 * delete its structure
10329 if (!adv_instance->pending)
10330 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
10332 hci_remove_adv_instance(hdev, cp->instance);
10334 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10335 mgmt_status(status));
10338 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10339 mgmt_status(status), &rp, sizeof(rp));
10344 mgmt_pending_remove(cmd);
10346 hci_dev_unlock(hdev);
10349 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
10350 void *data, u16 data_len)
10352 struct mgmt_cp_add_ext_adv_params *cp = data;
10353 struct mgmt_rp_add_ext_adv_params rp;
10354 struct mgmt_pending_cmd *cmd = NULL;
10355 struct adv_info *adv_instance;
10356 struct hci_request req;
10357 u32 flags, min_interval, max_interval;
10358 u16 timeout, duration;
10363 BT_DBG("%s", hdev->name);
10365 status = mgmt_le_support(hdev);
10367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10370 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10372 MGMT_STATUS_INVALID_PARAMS);
10374 /* The purpose of breaking add_advertising into two separate MGMT calls
10375 * for params and data is to allow more parameters to be added to this
10376 * structure in the future. For this reason, we verify that we have the
10377 * bare minimum structure we know of when the interface was defined. Any
10378 * extra parameters we don't know about will be ignored in this request.
10380 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
10381 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10382 MGMT_STATUS_INVALID_PARAMS);
10384 flags = __le32_to_cpu(cp->flags);
10386 if (!requested_adv_flags_are_valid(hdev, flags))
10387 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10388 MGMT_STATUS_INVALID_PARAMS);
10390 hci_dev_lock(hdev);
10392 /* In new interface, we require that we are powered to register */
10393 if (!hdev_is_powered(hdev)) {
10394 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10395 MGMT_STATUS_REJECTED);
10399 if (adv_busy(hdev)) {
10400 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10405 /* Parse defined parameters from request, use defaults otherwise */
10406 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
10407 __le16_to_cpu(cp->timeout) : 0;
10409 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
10410 __le16_to_cpu(cp->duration) :
10411 hdev->def_multi_adv_rotation_duration;
10413 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10414 __le32_to_cpu(cp->min_interval) :
10415 hdev->le_adv_min_interval;
10417 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10418 __le32_to_cpu(cp->max_interval) :
10419 hdev->le_adv_max_interval;
10421 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
10423 HCI_ADV_TX_POWER_NO_PREFERENCE;
10425 /* Create advertising instance with no advertising or response data */
10426 err = hci_add_adv_instance(hdev, cp->instance, flags,
10427 0, NULL, 0, NULL, timeout, duration,
10428 tx_power, min_interval, max_interval);
10431 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10432 MGMT_STATUS_FAILED);
10436 /* Submit request for advertising params if ext adv available */
10437 if (ext_adv_capable(hdev)) {
10438 hci_req_init(&req, hdev);
10439 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10441 /* Updating parameters of an active instance will return a
10442 * Command Disallowed error, so we must first disable the
10443 * instance if it is active.
10445 if (!adv_instance->pending)
10446 __hci_req_disable_ext_adv_instance(&req, cp->instance);
10448 __hci_req_setup_ext_adv_instance(&req, cp->instance);
10450 err = hci_req_run(&req, add_ext_adv_params_complete);
10453 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
10454 hdev, data, data_len);
10457 hci_remove_adv_instance(hdev, cp->instance);
10462 rp.instance = cp->instance;
10463 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
10464 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10465 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10466 err = mgmt_cmd_complete(sk, hdev->id,
10467 MGMT_OP_ADD_EXT_ADV_PARAMS,
10468 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10472 hci_dev_unlock(hdev);
10477 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
10480 struct mgmt_cp_add_ext_adv_data *cp = data;
10481 struct mgmt_rp_add_ext_adv_data rp;
10482 u8 schedule_instance = 0;
10483 struct adv_info *next_instance;
10484 struct adv_info *adv_instance;
10486 struct mgmt_pending_cmd *cmd;
10487 struct hci_request req;
10489 BT_DBG("%s", hdev->name);
10491 hci_dev_lock(hdev);
10493 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10495 if (!adv_instance) {
10496 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10497 MGMT_STATUS_INVALID_PARAMS);
10501 /* In new interface, we require that we are powered to register */
10502 if (!hdev_is_powered(hdev)) {
10503 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10504 MGMT_STATUS_REJECTED);
10505 goto clear_new_instance;
10508 if (adv_busy(hdev)) {
10509 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10511 goto clear_new_instance;
10514 /* Validate new data */
10515 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
10516 cp->adv_data_len, true) ||
10517 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
10518 cp->adv_data_len, cp->scan_rsp_len, false)) {
10519 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10520 MGMT_STATUS_INVALID_PARAMS);
10521 goto clear_new_instance;
10524 /* Set the data in the advertising instance */
10525 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
10526 cp->data, cp->scan_rsp_len,
10527 cp->data + cp->adv_data_len);
10529 /* We're good to go, update advertising data, parameters, and start
10533 hci_req_init(&req, hdev);
10535 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
10537 if (ext_adv_capable(hdev)) {
10538 __hci_req_update_adv_data(&req, cp->instance);
10539 __hci_req_update_scan_rsp_data(&req, cp->instance);
10540 __hci_req_enable_ext_advertising(&req, cp->instance);
10543 /* If using software rotation, determine next instance to use */
10545 if (hdev->cur_adv_instance == cp->instance) {
10546 /* If the currently advertised instance is being changed
10547 * then cancel the current advertising and schedule the
10548 * next instance. If there is only one instance then the
10549 * overridden advertising data will be visible right
10552 cancel_adv_timeout(hdev);
10554 next_instance = hci_get_next_instance(hdev,
10557 schedule_instance = next_instance->instance;
10558 } else if (!hdev->adv_instance_timeout) {
10559 /* Immediately advertise the new instance if no other
10560 * instance is currently being advertised.
10562 schedule_instance = cp->instance;
10565 /* If the HCI_ADVERTISING flag is set or there is no instance to
10566 * be advertised then we have no HCI communication to make.
10569 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10570 !schedule_instance) {
10571 if (adv_instance->pending) {
10572 mgmt_advertising_added(sk, hdev, cp->instance);
10573 adv_instance->pending = false;
10575 rp.instance = cp->instance;
10576 err = mgmt_cmd_complete(sk, hdev->id,
10577 MGMT_OP_ADD_EXT_ADV_DATA,
10578 MGMT_STATUS_SUCCESS, &rp,
10583 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
10587 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
10591 goto clear_new_instance;
10595 err = hci_req_run(&req, add_advertising_complete);
10598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10599 MGMT_STATUS_FAILED);
10600 mgmt_pending_remove(cmd);
10601 goto clear_new_instance;
10604 /* We were successful in updating data, so trigger advertising_added
10605 * event if this is an instance that wasn't previously advertising. If
10606 * a failure occurs in the requests we initiated, we will remove the
10607 * instance again in add_advertising_complete
10609 if (adv_instance->pending)
10610 mgmt_advertising_added(sk, hdev, cp->instance);
10614 clear_new_instance:
10615 hci_remove_adv_instance(hdev, cp->instance);
10618 hci_dev_unlock(hdev);
10623 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
10626 struct mgmt_pending_cmd *cmd;
10627 struct mgmt_cp_remove_advertising *cp;
10628 struct mgmt_rp_remove_advertising rp;
10630 bt_dev_dbg(hdev, "status %u", status);
10632 hci_dev_lock(hdev);
10634 /* A failure status here only means that we failed to disable
10635 * advertising. Otherwise, the advertising instance has been removed,
10636 * so report success.
10638 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
10643 rp.instance = cp->instance;
10645 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
10647 mgmt_pending_remove(cmd);
10650 hci_dev_unlock(hdev);
10653 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
10654 void *data, u16 data_len)
10656 struct mgmt_cp_remove_advertising *cp = data;
10657 struct mgmt_rp_remove_advertising rp;
10658 struct mgmt_pending_cmd *cmd;
10659 struct hci_request req;
10662 bt_dev_dbg(hdev, "sock %p", sk);
10664 /* Enabling the experimental LL Privay support disables support for
10667 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
10668 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
10669 MGMT_STATUS_NOT_SUPPORTED);
10671 hci_dev_lock(hdev);
10673 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
10674 err = mgmt_cmd_status(sk, hdev->id,
10675 MGMT_OP_REMOVE_ADVERTISING,
10676 MGMT_STATUS_INVALID_PARAMS);
10680 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
10681 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
10682 pending_find(MGMT_OP_SET_LE, hdev)) {
10683 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10688 if (list_empty(&hdev->adv_instances)) {
10689 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10690 MGMT_STATUS_INVALID_PARAMS);
10694 hci_req_init(&req, hdev);
10696 /* If we use extended advertising, instance is disabled and removed */
10697 if (ext_adv_capable(hdev)) {
10698 __hci_req_disable_ext_adv_instance(&req, cp->instance);
10699 __hci_req_remove_ext_adv_instance(&req, cp->instance);
10702 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
10704 if (list_empty(&hdev->adv_instances))
10705 __hci_req_disable_advertising(&req);
10707 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
10708 * flag is set or the device isn't powered then we have no HCI
10709 * communication to make. Simply return.
10711 if (skb_queue_empty(&req.cmd_q) ||
10712 !hdev_is_powered(hdev) ||
10713 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
10714 hci_req_purge(&req);
10715 rp.instance = cp->instance;
10716 err = mgmt_cmd_complete(sk, hdev->id,
10717 MGMT_OP_REMOVE_ADVERTISING,
10718 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10722 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
10729 err = hci_req_run(&req, remove_advertising_complete);
10731 mgmt_pending_remove(cmd);
10734 hci_dev_unlock(hdev);
10739 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
10740 void *data, u16 data_len)
10742 struct mgmt_cp_get_adv_size_info *cp = data;
10743 struct mgmt_rp_get_adv_size_info rp;
10744 u32 flags, supported_flags;
10747 bt_dev_dbg(hdev, "sock %p", sk);
10749 if (!lmp_le_capable(hdev))
10750 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10751 MGMT_STATUS_REJECTED);
10753 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10755 MGMT_STATUS_INVALID_PARAMS);
10757 flags = __le32_to_cpu(cp->flags);
10759 /* The current implementation only supports a subset of the specified
10762 supported_flags = get_supported_adv_flags(hdev);
10763 if (flags & ~supported_flags)
10764 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10765 MGMT_STATUS_INVALID_PARAMS);
10767 rp.instance = cp->instance;
10768 rp.flags = cp->flags;
10769 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10770 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10772 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10773 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10778 static const struct hci_mgmt_handler mgmt_handlers[] = {
10779 { NULL }, /* 0x0000 (no command) */
10780 { read_version, MGMT_READ_VERSION_SIZE,
10782 HCI_MGMT_UNTRUSTED },
10783 { read_commands, MGMT_READ_COMMANDS_SIZE,
10785 HCI_MGMT_UNTRUSTED },
10786 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
10788 HCI_MGMT_UNTRUSTED },
10789 { read_controller_info, MGMT_READ_INFO_SIZE,
10790 HCI_MGMT_UNTRUSTED },
10791 { set_powered, MGMT_SETTING_SIZE },
10792 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
10793 { set_connectable, MGMT_SETTING_SIZE },
10794 { set_fast_connectable, MGMT_SETTING_SIZE },
10795 { set_bondable, MGMT_SETTING_SIZE },
10796 { set_link_security, MGMT_SETTING_SIZE },
10797 { set_ssp, MGMT_SETTING_SIZE },
10798 { set_hs, MGMT_SETTING_SIZE },
10799 { set_le, MGMT_SETTING_SIZE },
10800 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
10801 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
10802 { add_uuid, MGMT_ADD_UUID_SIZE },
10803 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
10804 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
10805 HCI_MGMT_VAR_LEN },
10806 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
10807 HCI_MGMT_VAR_LEN },
10808 { disconnect, MGMT_DISCONNECT_SIZE },
10809 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
10810 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
10811 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
10812 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
10813 { pair_device, MGMT_PAIR_DEVICE_SIZE },
10814 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
10815 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
10816 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
10817 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
10818 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
10819 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
10820 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
10821 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
10822 HCI_MGMT_VAR_LEN },
10823 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
10824 { start_discovery, MGMT_START_DISCOVERY_SIZE },
10825 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
10826 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
10827 { block_device, MGMT_BLOCK_DEVICE_SIZE },
10828 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
10829 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
10830 { set_advertising, MGMT_SETTING_SIZE },
10831 { set_bredr, MGMT_SETTING_SIZE },
10832 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
10833 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
10834 { set_secure_conn, MGMT_SETTING_SIZE },
10835 { set_debug_keys, MGMT_SETTING_SIZE },
10836 { set_privacy, MGMT_SET_PRIVACY_SIZE },
10837 { load_irks, MGMT_LOAD_IRKS_SIZE,
10838 HCI_MGMT_VAR_LEN },
10839 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
10840 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
10841 { add_device, MGMT_ADD_DEVICE_SIZE },
10842 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
10843 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
10844 HCI_MGMT_VAR_LEN },
10845 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
10847 HCI_MGMT_UNTRUSTED },
10848 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
10849 HCI_MGMT_UNCONFIGURED |
10850 HCI_MGMT_UNTRUSTED },
10851 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
10852 HCI_MGMT_UNCONFIGURED },
10853 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
10854 HCI_MGMT_UNCONFIGURED },
10855 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
10856 HCI_MGMT_VAR_LEN },
10857 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
10858 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
10860 HCI_MGMT_UNTRUSTED },
10861 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
10862 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
10863 HCI_MGMT_VAR_LEN },
10864 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
10865 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
10866 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
10867 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
10868 HCI_MGMT_UNTRUSTED },
10869 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
10870 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
10871 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
10872 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
10873 HCI_MGMT_VAR_LEN },
10874 { set_wideband_speech, MGMT_SETTING_SIZE },
10875 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
10876 HCI_MGMT_UNTRUSTED },
10877 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
10878 HCI_MGMT_UNTRUSTED |
10879 HCI_MGMT_HDEV_OPTIONAL },
10880 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
10882 HCI_MGMT_HDEV_OPTIONAL },
10883 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
10884 HCI_MGMT_UNTRUSTED },
10885 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
10886 HCI_MGMT_VAR_LEN },
10887 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
10888 HCI_MGMT_UNTRUSTED },
10889 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
10890 HCI_MGMT_VAR_LEN },
10891 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
10892 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
10893 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
10894 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
10895 HCI_MGMT_VAR_LEN },
10896 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
10897 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
10898 HCI_MGMT_VAR_LEN },
10899 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
10900 HCI_MGMT_VAR_LEN },
10901 { add_adv_patterns_monitor_rssi,
10902 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
10903 HCI_MGMT_VAR_LEN },
10907 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
10908 { NULL }, /* 0x0000 (no command) */
10909 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
10910 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
10911 HCI_MGMT_VAR_LEN },
10912 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
10913 HCI_MGMT_VAR_LEN },
10914 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
10915 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
10916 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
10917 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
10918 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
10919 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
10920 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
10921 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
10922 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
10923 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
10924 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
10925 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
10926 { set_voice_setting, MGMT_SET_VOICE_SETTING_SIZE },
10927 { get_adv_tx_power, MGMT_GET_ADV_TX_POWER_SIZE },
10928 { enable_bt_6lowpan, MGMT_ENABLE_BT_6LOWPAN_SIZE },
10929 { connect_bt_6lowpan, MGMT_CONNECT_6LOWPAN_SIZE },
10930 { disconnect_bt_6lowpan, MGMT_DISCONNECT_6LOWPAN_SIZE },
10931 { read_maximum_le_data_length,
10932 MGMT_LE_READ_MAXIMUM_DATA_LENGTH_SIZE },
10933 { write_host_suggested_le_data_length,
10934 MGMT_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH_SIZE },
10935 { read_host_suggested_data_length,
10936 MGMT_LE_READ_HOST_SUGGESTED_DATA_LENGTH_SIZE },
10937 { set_le_data_length_params,
10938 MGMT_LE_SET_DATA_LENGTH_SIZE },
10942 void mgmt_index_added(struct hci_dev *hdev)
10944 struct mgmt_ev_ext_index ev;
10946 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10949 switch (hdev->dev_type) {
10951 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10952 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
10953 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10956 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
10957 HCI_MGMT_INDEX_EVENTS);
10968 ev.bus = hdev->bus;
10970 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
10971 HCI_MGMT_EXT_INDEX_EVENTS);
10974 void mgmt_index_removed(struct hci_dev *hdev)
10976 struct mgmt_ev_ext_index ev;
10977 u8 status = MGMT_STATUS_INVALID_INDEX;
10979 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10982 switch (hdev->dev_type) {
10984 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10986 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10987 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
10988 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10991 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
10992 HCI_MGMT_INDEX_EVENTS);
11003 ev.bus = hdev->bus;
11005 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
11006 HCI_MGMT_EXT_INDEX_EVENTS);
11009 /* This function requires the caller holds hdev->lock */
11010 static void restart_le_actions(struct hci_dev *hdev)
11012 struct hci_conn_params *p;
11014 list_for_each_entry(p, &hdev->le_conn_params, list) {
11015 /* Needed for AUTO_OFF case where might not "really"
11016 * have been powered off.
11018 list_del_init(&p->action);
11020 switch (p->auto_connect) {
11021 case HCI_AUTO_CONN_DIRECT:
11022 case HCI_AUTO_CONN_ALWAYS:
11023 list_add(&p->action, &hdev->pend_le_conns);
11025 case HCI_AUTO_CONN_REPORT:
11026 list_add(&p->action, &hdev->pend_le_reports);
11034 void mgmt_power_on(struct hci_dev *hdev, int err)
11036 struct cmd_lookup match = { NULL, hdev };
11038 bt_dev_dbg(hdev, "err %d", err);
11040 hci_dev_lock(hdev);
11043 restart_le_actions(hdev);
11044 hci_update_background_scan(hdev);
11047 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11049 new_settings(hdev, match.sk);
11052 sock_put(match.sk);
11054 hci_dev_unlock(hdev);
11057 void __mgmt_power_off(struct hci_dev *hdev)
11059 struct cmd_lookup match = { NULL, hdev };
11060 u8 status, zero_cod[] = { 0, 0, 0 };
11062 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11064 /* If the power off is because of hdev unregistration let
11065 * use the appropriate INVALID_INDEX status. Otherwise use
11066 * NOT_POWERED. We cover both scenarios here since later in
11067 * mgmt_index_removed() any hci_conn callbacks will have already
11068 * been triggered, potentially causing misleading DISCONNECTED
11069 * status responses.
11071 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
11072 status = MGMT_STATUS_INVALID_INDEX;
11074 status = MGMT_STATUS_NOT_POWERED;
11076 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11078 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
11079 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
11080 zero_cod, sizeof(zero_cod),
11081 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11082 ext_info_changed(hdev, NULL);
11085 new_settings(hdev, match.sk);
11088 sock_put(match.sk);
11091 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
11093 struct mgmt_pending_cmd *cmd;
11096 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11100 if (err == -ERFKILL)
11101 status = MGMT_STATUS_RFKILLED;
11103 status = MGMT_STATUS_FAILED;
11105 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
11107 mgmt_pending_remove(cmd);
11110 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
11113 struct mgmt_ev_new_link_key ev;
11115 memset(&ev, 0, sizeof(ev));
11117 ev.store_hint = persistent;
11118 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11119 ev.key.addr.type = BDADDR_BREDR;
11120 ev.key.type = key->type;
11121 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
11122 ev.key.pin_len = key->pin_len;
11124 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
11127 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
11129 switch (ltk->type) {
11131 case SMP_LTK_RESPONDER:
11132 if (ltk->authenticated)
11133 return MGMT_LTK_AUTHENTICATED;
11134 return MGMT_LTK_UNAUTHENTICATED;
11136 if (ltk->authenticated)
11137 return MGMT_LTK_P256_AUTH;
11138 return MGMT_LTK_P256_UNAUTH;
11139 case SMP_LTK_P256_DEBUG:
11140 return MGMT_LTK_P256_DEBUG;
11143 return MGMT_LTK_UNAUTHENTICATED;
11146 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
11148 struct mgmt_ev_new_long_term_key ev;
11150 memset(&ev, 0, sizeof(ev));
11152 /* Devices using resolvable or non-resolvable random addresses
11153 * without providing an identity resolving key don't require
11154 * to store long term keys. Their addresses will change the
11155 * next time around.
11157 * Only when a remote device provides an identity address
11158 * make sure the long term key is stored. If the remote
11159 * identity is known, the long term keys are internally
11160 * mapped to the identity address. So allow static random
11161 * and public addresses here.
11163 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11164 (key->bdaddr.b[5] & 0xc0) != 0xc0)
11165 ev.store_hint = 0x00;
11167 ev.store_hint = persistent;
11169 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11170 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
11171 ev.key.type = mgmt_ltk_type(key);
11172 ev.key.enc_size = key->enc_size;
11173 ev.key.ediv = key->ediv;
11174 ev.key.rand = key->rand;
11176 if (key->type == SMP_LTK)
11177 ev.key.initiator = 1;
11179 /* Make sure we copy only the significant bytes based on the
11180 * encryption key size, and set the rest of the value to zeroes.
11182 memcpy(ev.key.val, key->val, key->enc_size);
11183 memset(ev.key.val + key->enc_size, 0,
11184 sizeof(ev.key.val) - key->enc_size);
11186 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
11189 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
11191 struct mgmt_ev_new_irk ev;
11193 memset(&ev, 0, sizeof(ev));
11195 ev.store_hint = persistent;
11197 bacpy(&ev.rpa, &irk->rpa);
11198 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
11199 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
11200 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
11202 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
11205 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
11208 struct mgmt_ev_new_csrk ev;
11210 memset(&ev, 0, sizeof(ev));
11212 /* Devices using resolvable or non-resolvable random addresses
11213 * without providing an identity resolving key don't require
11214 * to store signature resolving keys. Their addresses will change
11215 * the next time around.
11217 * Only when a remote device provides an identity address
11218 * make sure the signature resolving key is stored. So allow
11219 * static random and public addresses here.
11221 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11222 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
11223 ev.store_hint = 0x00;
11225 ev.store_hint = persistent;
11227 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
11228 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
11229 ev.key.type = csrk->type;
11230 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
11232 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
11235 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
11236 u8 bdaddr_type, u8 store_hint, u16 min_interval,
11237 u16 max_interval, u16 latency, u16 timeout)
11239 struct mgmt_ev_new_conn_param ev;
11241 if (!hci_is_identity_address(bdaddr, bdaddr_type))
11244 memset(&ev, 0, sizeof(ev));
11245 bacpy(&ev.addr.bdaddr, bdaddr);
11246 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
11247 ev.store_hint = store_hint;
11248 ev.min_interval = cpu_to_le16(min_interval);
11249 ev.max_interval = cpu_to_le16(max_interval);
11250 ev.latency = cpu_to_le16(latency);
11251 ev.timeout = cpu_to_le16(timeout);
11253 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
11256 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
11257 u8 *name, u8 name_len)
11260 struct mgmt_ev_device_connected *ev = (void *) buf;
11264 bacpy(&ev->addr.bdaddr, &conn->dst);
11265 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11268 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
11270 ev->flags = __cpu_to_le32(flags);
11272 /* We must ensure that the EIR Data fields are ordered and
11273 * unique. Keep it simple for now and avoid the problem by not
11274 * adding any BR/EDR data to the LE adv.
11276 if (conn->le_adv_data_len > 0) {
11277 memcpy(&ev->eir[eir_len],
11278 conn->le_adv_data, conn->le_adv_data_len);
11279 eir_len = conn->le_adv_data_len;
11282 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
11285 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
11286 eir_len = eir_append_data(ev->eir, eir_len,
11288 conn->dev_class, 3);
11291 ev->eir_len = cpu_to_le16(eir_len);
11293 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
11294 sizeof(*ev) + eir_len, NULL);
11297 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
11299 struct sock **sk = data;
11301 cmd->cmd_complete(cmd, 0);
11306 mgmt_pending_remove(cmd);
11309 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
11311 struct hci_dev *hdev = data;
11312 struct mgmt_cp_unpair_device *cp = cmd->param;
11314 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
11316 cmd->cmd_complete(cmd, 0);
11317 mgmt_pending_remove(cmd);
11320 bool mgmt_powering_down(struct hci_dev *hdev)
11322 struct mgmt_pending_cmd *cmd;
11323 struct mgmt_mode *cp;
11325 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11336 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
11337 u8 link_type, u8 addr_type, u8 reason,
11338 bool mgmt_connected)
11340 struct mgmt_ev_device_disconnected ev;
11341 struct sock *sk = NULL;
11343 /* The connection is still in hci_conn_hash so test for 1
11344 * instead of 0 to know if this is the last one.
11346 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11347 cancel_delayed_work(&hdev->power_off);
11348 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11351 if (!mgmt_connected)
11354 if (link_type != ACL_LINK && link_type != LE_LINK)
11357 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
11359 bacpy(&ev.addr.bdaddr, bdaddr);
11360 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11361 ev.reason = reason;
11363 /* Report disconnects due to suspend */
11364 if (hdev->suspended)
11365 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
11367 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
11372 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11376 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
11377 u8 link_type, u8 addr_type, u8 status)
11379 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
11380 struct mgmt_cp_disconnect *cp;
11381 struct mgmt_pending_cmd *cmd;
11383 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11386 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
11392 if (bacmp(bdaddr, &cp->addr.bdaddr))
11395 if (cp->addr.type != bdaddr_type)
11398 cmd->cmd_complete(cmd, mgmt_status(status));
11399 mgmt_pending_remove(cmd);
11402 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11403 u8 addr_type, u8 status)
11405 struct mgmt_ev_connect_failed ev;
11407 /* The connection is still in hci_conn_hash so test for 1
11408 * instead of 0 to know if this is the last one.
11410 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11411 cancel_delayed_work(&hdev->power_off);
11412 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11415 bacpy(&ev.addr.bdaddr, bdaddr);
11416 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11417 ev.status = mgmt_status(status);
11419 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
11422 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
11424 struct mgmt_ev_pin_code_request ev;
11426 bacpy(&ev.addr.bdaddr, bdaddr);
11427 ev.addr.type = BDADDR_BREDR;
11428 ev.secure = secure;
11430 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
11433 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11436 struct mgmt_pending_cmd *cmd;
11438 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
11442 cmd->cmd_complete(cmd, mgmt_status(status));
11443 mgmt_pending_remove(cmd);
11446 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11449 struct mgmt_pending_cmd *cmd;
11451 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
11455 cmd->cmd_complete(cmd, mgmt_status(status));
11456 mgmt_pending_remove(cmd);
11459 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11460 u8 link_type, u8 addr_type, u32 value,
11463 struct mgmt_ev_user_confirm_request ev;
11465 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11467 bacpy(&ev.addr.bdaddr, bdaddr);
11468 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11469 ev.confirm_hint = confirm_hint;
11470 ev.value = cpu_to_le32(value);
11472 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
11476 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11477 u8 link_type, u8 addr_type)
11479 struct mgmt_ev_user_passkey_request ev;
11481 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11483 bacpy(&ev.addr.bdaddr, bdaddr);
11484 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11486 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
11490 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11491 u8 link_type, u8 addr_type, u8 status,
11494 struct mgmt_pending_cmd *cmd;
11496 cmd = pending_find(opcode, hdev);
11500 cmd->cmd_complete(cmd, mgmt_status(status));
11501 mgmt_pending_remove(cmd);
11506 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11507 u8 link_type, u8 addr_type, u8 status)
11509 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11510 status, MGMT_OP_USER_CONFIRM_REPLY);
11513 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11514 u8 link_type, u8 addr_type, u8 status)
11516 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11518 MGMT_OP_USER_CONFIRM_NEG_REPLY);
11521 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11522 u8 link_type, u8 addr_type, u8 status)
11524 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11525 status, MGMT_OP_USER_PASSKEY_REPLY);
11528 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11529 u8 link_type, u8 addr_type, u8 status)
11531 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11533 MGMT_OP_USER_PASSKEY_NEG_REPLY);
11536 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
11537 u8 link_type, u8 addr_type, u32 passkey,
11540 struct mgmt_ev_passkey_notify ev;
11542 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11544 bacpy(&ev.addr.bdaddr, bdaddr);
11545 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11546 ev.passkey = __cpu_to_le32(passkey);
11547 ev.entered = entered;
11549 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
11552 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
11554 struct mgmt_ev_auth_failed ev;
11555 struct mgmt_pending_cmd *cmd;
11556 u8 status = mgmt_status(hci_status);
11558 bacpy(&ev.addr.bdaddr, &conn->dst);
11559 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11560 ev.status = status;
11562 cmd = find_pairing(conn);
11564 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
11565 cmd ? cmd->sk : NULL);
11568 cmd->cmd_complete(cmd, status);
11569 mgmt_pending_remove(cmd);
11573 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
11575 struct cmd_lookup match = { NULL, hdev };
11579 u8 mgmt_err = mgmt_status(status);
11580 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
11581 cmd_status_rsp, &mgmt_err);
11585 if (test_bit(HCI_AUTH, &hdev->flags))
11586 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
11588 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
11590 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
11594 new_settings(hdev, match.sk);
11597 sock_put(match.sk);
11600 static void clear_eir(struct hci_request *req)
11602 struct hci_dev *hdev = req->hdev;
11603 struct hci_cp_write_eir cp;
11605 if (!lmp_ext_inq_capable(hdev))
11608 memset(hdev->eir, 0, sizeof(hdev->eir));
11610 memset(&cp, 0, sizeof(cp));
11612 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
11615 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
11617 struct cmd_lookup match = { NULL, hdev };
11618 struct hci_request req;
11619 bool changed = false;
11622 u8 mgmt_err = mgmt_status(status);
11624 if (enable && hci_dev_test_and_clear_flag(hdev,
11625 HCI_SSP_ENABLED)) {
11626 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
11627 new_settings(hdev, NULL);
11630 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
11636 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
11638 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
11640 changed = hci_dev_test_and_clear_flag(hdev,
11643 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
11646 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
11649 new_settings(hdev, match.sk);
11652 sock_put(match.sk);
11654 hci_req_init(&req, hdev);
11656 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
11657 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
11658 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
11659 sizeof(enable), &enable);
11660 __hci_req_update_eir(&req);
11665 hci_req_run(&req, NULL);
11668 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
11670 struct cmd_lookup *match = data;
11672 if (match->sk == NULL) {
11673 match->sk = cmd->sk;
11674 sock_hold(match->sk);
11678 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
11681 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
11683 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
11684 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
11685 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
11688 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
11689 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11690 ext_info_changed(hdev, NULL);
11694 sock_put(match.sk);
11697 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
11699 struct mgmt_cp_set_local_name ev;
11700 struct mgmt_pending_cmd *cmd;
11705 memset(&ev, 0, sizeof(ev));
11706 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
11707 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
11709 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
11711 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
11713 /* If this is a HCI command related to powering on the
11714 * HCI dev don't send any mgmt signals.
11716 if (pending_find(MGMT_OP_SET_POWERED, hdev))
11720 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
11721 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
11722 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
11725 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
11729 for (i = 0; i < uuid_count; i++) {
11730 if (!memcmp(uuid, uuids[i], 16))
11737 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
11741 while (parsed < eir_len) {
11742 u8 field_len = eir[0];
11746 if (field_len == 0)
11749 if (eir_len - parsed < field_len + 1)
11753 case EIR_UUID16_ALL:
11754 case EIR_UUID16_SOME:
11755 for (i = 0; i + 3 <= field_len; i += 2) {
11756 memcpy(uuid, bluetooth_base_uuid, 16);
11757 uuid[13] = eir[i + 3];
11758 uuid[12] = eir[i + 2];
11759 if (has_uuid(uuid, uuid_count, uuids))
11763 case EIR_UUID32_ALL:
11764 case EIR_UUID32_SOME:
11765 for (i = 0; i + 5 <= field_len; i += 4) {
11766 memcpy(uuid, bluetooth_base_uuid, 16);
11767 uuid[15] = eir[i + 5];
11768 uuid[14] = eir[i + 4];
11769 uuid[13] = eir[i + 3];
11770 uuid[12] = eir[i + 2];
11771 if (has_uuid(uuid, uuid_count, uuids))
11775 case EIR_UUID128_ALL:
11776 case EIR_UUID128_SOME:
11777 for (i = 0; i + 17 <= field_len; i += 16) {
11778 memcpy(uuid, eir + i + 2, 16);
11779 if (has_uuid(uuid, uuid_count, uuids))
11785 parsed += field_len + 1;
11786 eir += field_len + 1;
11792 static void restart_le_scan(struct hci_dev *hdev)
11794 /* If controller is not scanning we are done. */
11795 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
11798 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
11799 hdev->discovery.scan_start +
11800 hdev->discovery.scan_duration))
11803 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
11804 DISCOV_LE_RESTART_DELAY);
11807 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
11808 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11810 /* If a RSSI threshold has been specified, and
11811 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
11812 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
11813 * is set, let it through for further processing, as we might need to
11814 * restart the scan.
11816 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
11817 * the results are also dropped.
11819 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11820 (rssi == HCI_RSSI_INVALID ||
11821 (rssi < hdev->discovery.rssi &&
11822 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
11825 if (hdev->discovery.uuid_count != 0) {
11826 /* If a list of UUIDs is provided in filter, results with no
11827 * matching UUID should be dropped.
11829 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
11830 hdev->discovery.uuids) &&
11831 !eir_has_uuids(scan_rsp, scan_rsp_len,
11832 hdev->discovery.uuid_count,
11833 hdev->discovery.uuids))
11837 /* If duplicate filtering does not report RSSI changes, then restart
11838 * scanning to ensure updated result with updated RSSI values.
11840 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
11841 restart_le_scan(hdev);
11843 /* Validate RSSI value against the RSSI threshold once more. */
11844 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11845 rssi < hdev->discovery.rssi)
11852 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11853 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
11854 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11857 struct mgmt_ev_device_found *ev = (void *)buf;
11860 /* Don't send events for a non-kernel initiated discovery. With
11861 * LE one exception is if we have pend_le_reports > 0 in which
11862 * case we're doing passive scanning and want these events.
11864 if (!hci_discovery_active(hdev)) {
11865 if (link_type == ACL_LINK)
11867 if (link_type == LE_LINK &&
11868 list_empty(&hdev->pend_le_reports) &&
11869 !hci_is_adv_monitoring(hdev)) {
11874 if (hdev->discovery.result_filtering) {
11875 /* We are using service discovery */
11876 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
11881 if (hdev->discovery.limited) {
11882 /* Check for limited discoverable bit */
11884 if (!(dev_class[1] & 0x20))
11887 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
11888 if (!flags || !(flags[0] & LE_AD_LIMITED))
11893 /* Make sure that the buffer is big enough. The 5 extra bytes
11894 * are for the potential CoD field.
11896 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
11899 memset(buf, 0, sizeof(buf));
11901 /* In case of device discovery with BR/EDR devices (pre 1.2), the
11902 * RSSI value was reported as 0 when not available. This behavior
11903 * is kept when using device discovery. This is required for full
11904 * backwards compatibility with the API.
11906 * However when using service discovery, the value 127 will be
11907 * returned when the RSSI is not available.
11909 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
11910 link_type == ACL_LINK)
11913 bacpy(&ev->addr.bdaddr, bdaddr);
11914 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11916 ev->flags = cpu_to_le32(flags);
11919 /* Copy EIR or advertising data into event */
11920 memcpy(ev->eir, eir, eir_len);
11922 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11924 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11927 if (scan_rsp_len > 0)
11928 /* Append scan response data to event */
11929 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
11931 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
11932 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
11934 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
11937 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11938 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
11940 struct mgmt_ev_device_found *ev;
11941 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
11944 ev = (struct mgmt_ev_device_found *) buf;
11946 memset(buf, 0, sizeof(buf));
11948 bacpy(&ev->addr.bdaddr, bdaddr);
11949 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11952 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
11955 ev->eir_len = cpu_to_le16(eir_len);
11957 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
11960 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
11962 struct mgmt_ev_discovering ev;
11964 bt_dev_dbg(hdev, "discovering %u", discovering);
11966 memset(&ev, 0, sizeof(ev));
11967 ev.type = hdev->discovery.type;
11968 ev.discovering = discovering;
11970 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
11973 void mgmt_suspending(struct hci_dev *hdev, u8 state)
11975 struct mgmt_ev_controller_suspend ev;
11977 ev.suspend_state = state;
11978 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
11981 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
11984 struct mgmt_ev_controller_resume ev;
11986 ev.wake_reason = reason;
11988 bacpy(&ev.addr.bdaddr, bdaddr);
11989 ev.addr.type = addr_type;
11991 memset(&ev.addr, 0, sizeof(ev.addr));
11994 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
11997 static struct hci_mgmt_chan chan = {
11998 .channel = HCI_CHANNEL_CONTROL,
11999 .handler_count = ARRAY_SIZE(mgmt_handlers),
12000 .handlers = mgmt_handlers,
12002 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
12003 .tizen_handlers = tizen_mgmt_handlers,
12005 .hdev_init = mgmt_init_hdev,
12008 int mgmt_init(void)
12010 return hci_mgmt_chan_register(&chan);
12013 void mgmt_exit(void)
12015 hci_mgmt_chan_unregister(&chan);