2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
40 #include "hci_request.h"
42 #include "mgmt_util.h"
43 #include "mgmt_config.h"
46 #define MGMT_VERSION 1
47 #define MGMT_REVISION 21
49 static const u16 mgmt_commands[] = {
50 MGMT_OP_READ_INDEX_LIST,
53 MGMT_OP_SET_DISCOVERABLE,
54 MGMT_OP_SET_CONNECTABLE,
55 MGMT_OP_SET_FAST_CONNECTABLE,
57 MGMT_OP_SET_LINK_SECURITY,
61 MGMT_OP_SET_DEV_CLASS,
62 MGMT_OP_SET_LOCAL_NAME,
65 MGMT_OP_LOAD_LINK_KEYS,
66 MGMT_OP_LOAD_LONG_TERM_KEYS,
68 MGMT_OP_GET_CONNECTIONS,
69 MGMT_OP_PIN_CODE_REPLY,
70 MGMT_OP_PIN_CODE_NEG_REPLY,
71 MGMT_OP_SET_IO_CAPABILITY,
73 MGMT_OP_CANCEL_PAIR_DEVICE,
74 MGMT_OP_UNPAIR_DEVICE,
75 MGMT_OP_USER_CONFIRM_REPLY,
76 MGMT_OP_USER_CONFIRM_NEG_REPLY,
77 MGMT_OP_USER_PASSKEY_REPLY,
78 MGMT_OP_USER_PASSKEY_NEG_REPLY,
79 MGMT_OP_READ_LOCAL_OOB_DATA,
80 MGMT_OP_ADD_REMOTE_OOB_DATA,
81 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
82 MGMT_OP_START_DISCOVERY,
83 MGMT_OP_STOP_DISCOVERY,
86 MGMT_OP_UNBLOCK_DEVICE,
87 MGMT_OP_SET_DEVICE_ID,
88 MGMT_OP_SET_ADVERTISING,
90 MGMT_OP_SET_STATIC_ADDRESS,
91 MGMT_OP_SET_SCAN_PARAMS,
92 MGMT_OP_SET_SECURE_CONN,
93 MGMT_OP_SET_DEBUG_KEYS,
96 MGMT_OP_GET_CONN_INFO,
97 MGMT_OP_GET_CLOCK_INFO,
99 MGMT_OP_REMOVE_DEVICE,
100 MGMT_OP_LOAD_CONN_PARAM,
101 MGMT_OP_READ_UNCONF_INDEX_LIST,
102 MGMT_OP_READ_CONFIG_INFO,
103 MGMT_OP_SET_EXTERNAL_CONFIG,
104 MGMT_OP_SET_PUBLIC_ADDRESS,
105 MGMT_OP_START_SERVICE_DISCOVERY,
106 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
107 MGMT_OP_READ_EXT_INDEX_LIST,
108 MGMT_OP_READ_ADV_FEATURES,
109 MGMT_OP_ADD_ADVERTISING,
110 MGMT_OP_REMOVE_ADVERTISING,
111 MGMT_OP_GET_ADV_SIZE_INFO,
112 MGMT_OP_START_LIMITED_DISCOVERY,
113 MGMT_OP_READ_EXT_INFO,
114 MGMT_OP_SET_APPEARANCE,
115 MGMT_OP_GET_PHY_CONFIGURATION,
116 MGMT_OP_SET_PHY_CONFIGURATION,
117 MGMT_OP_SET_BLOCKED_KEYS,
118 MGMT_OP_SET_WIDEBAND_SPEECH,
119 MGMT_OP_READ_CONTROLLER_CAP,
120 MGMT_OP_READ_EXP_FEATURES_INFO,
121 MGMT_OP_SET_EXP_FEATURE,
122 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
123 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
124 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
125 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
126 MGMT_OP_GET_DEVICE_FLAGS,
127 MGMT_OP_SET_DEVICE_FLAGS,
128 MGMT_OP_READ_ADV_MONITOR_FEATURES,
129 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
130 MGMT_OP_REMOVE_ADV_MONITOR,
131 MGMT_OP_ADD_EXT_ADV_PARAMS,
132 MGMT_OP_ADD_EXT_ADV_DATA,
133 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
136 static const u16 mgmt_events[] = {
137 MGMT_EV_CONTROLLER_ERROR,
139 MGMT_EV_INDEX_REMOVED,
140 MGMT_EV_NEW_SETTINGS,
141 MGMT_EV_CLASS_OF_DEV_CHANGED,
142 MGMT_EV_LOCAL_NAME_CHANGED,
143 MGMT_EV_NEW_LINK_KEY,
144 MGMT_EV_NEW_LONG_TERM_KEY,
145 MGMT_EV_DEVICE_CONNECTED,
146 MGMT_EV_DEVICE_DISCONNECTED,
147 MGMT_EV_CONNECT_FAILED,
148 MGMT_EV_PIN_CODE_REQUEST,
149 MGMT_EV_USER_CONFIRM_REQUEST,
150 MGMT_EV_USER_PASSKEY_REQUEST,
152 MGMT_EV_DEVICE_FOUND,
154 MGMT_EV_DEVICE_BLOCKED,
155 MGMT_EV_DEVICE_UNBLOCKED,
156 MGMT_EV_DEVICE_UNPAIRED,
157 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_DEVICE_ADDED,
161 MGMT_EV_DEVICE_REMOVED,
162 MGMT_EV_NEW_CONN_PARAM,
163 MGMT_EV_UNCONF_INDEX_ADDED,
164 MGMT_EV_UNCONF_INDEX_REMOVED,
165 MGMT_EV_NEW_CONFIG_OPTIONS,
166 MGMT_EV_EXT_INDEX_ADDED,
167 MGMT_EV_EXT_INDEX_REMOVED,
168 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
169 MGMT_EV_ADVERTISING_ADDED,
170 MGMT_EV_ADVERTISING_REMOVED,
171 MGMT_EV_EXT_INFO_CHANGED,
172 MGMT_EV_PHY_CONFIGURATION_CHANGED,
173 MGMT_EV_EXP_FEATURE_CHANGED,
174 MGMT_EV_DEVICE_FLAGS_CHANGED,
175 MGMT_EV_ADV_MONITOR_ADDED,
176 MGMT_EV_ADV_MONITOR_REMOVED,
177 MGMT_EV_CONTROLLER_SUSPEND,
178 MGMT_EV_CONTROLLER_RESUME,
181 static const u16 mgmt_untrusted_commands[] = {
182 MGMT_OP_READ_INDEX_LIST,
184 MGMT_OP_READ_UNCONF_INDEX_LIST,
185 MGMT_OP_READ_CONFIG_INFO,
186 MGMT_OP_READ_EXT_INDEX_LIST,
187 MGMT_OP_READ_EXT_INFO,
188 MGMT_OP_READ_CONTROLLER_CAP,
189 MGMT_OP_READ_EXP_FEATURES_INFO,
190 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
194 static const u16 mgmt_untrusted_events[] = {
196 MGMT_EV_INDEX_REMOVED,
197 MGMT_EV_NEW_SETTINGS,
198 MGMT_EV_CLASS_OF_DEV_CHANGED,
199 MGMT_EV_LOCAL_NAME_CHANGED,
200 MGMT_EV_UNCONF_INDEX_ADDED,
201 MGMT_EV_UNCONF_INDEX_REMOVED,
202 MGMT_EV_NEW_CONFIG_OPTIONS,
203 MGMT_EV_EXT_INDEX_ADDED,
204 MGMT_EV_EXT_INDEX_REMOVED,
205 MGMT_EV_EXT_INFO_CHANGED,
206 MGMT_EV_EXP_FEATURE_CHANGED,
209 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 "\x00\x00\x00\x00\x00\x00\x00\x00"
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
217 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
218 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
219 MGMT_STATUS_FAILED, /* Hardware Failure */
220 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
221 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
222 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
223 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
224 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
225 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
226 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
227 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
228 MGMT_STATUS_BUSY, /* Command Disallowed */
229 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
230 MGMT_STATUS_REJECTED, /* Rejected Security */
231 MGMT_STATUS_REJECTED, /* Rejected Personal */
232 MGMT_STATUS_TIMEOUT, /* Host Timeout */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
234 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
235 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
236 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
237 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
238 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
239 MGMT_STATUS_BUSY, /* Repeated Attempts */
240 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
241 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
242 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
243 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
244 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
245 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
246 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
247 MGMT_STATUS_FAILED, /* Unspecified Error */
248 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
249 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
250 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
251 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
252 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
253 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
254 MGMT_STATUS_FAILED, /* Unit Link Key Used */
255 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
256 MGMT_STATUS_TIMEOUT, /* Instant Passed */
257 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
258 MGMT_STATUS_FAILED, /* Transaction Collision */
259 MGMT_STATUS_FAILED, /* Reserved for future use */
260 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
261 MGMT_STATUS_REJECTED, /* QoS Rejected */
262 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
263 MGMT_STATUS_REJECTED, /* Insufficient Security */
264 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
265 MGMT_STATUS_FAILED, /* Reserved for future use */
266 MGMT_STATUS_BUSY, /* Role Switch Pending */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_FAILED, /* Slot Violation */
269 MGMT_STATUS_FAILED, /* Role Switch Failed */
270 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
271 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
272 MGMT_STATUS_BUSY, /* Host Busy Pairing */
273 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
274 MGMT_STATUS_BUSY, /* Controller Busy */
275 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
276 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
277 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
278 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
279 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
282 static u8 mgmt_status(u8 hci_status)
284 if (hci_status < ARRAY_SIZE(mgmt_status_table))
285 return mgmt_status_table[hci_status];
287 return MGMT_STATUS_FAILED;
290 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
293 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
297 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
298 u16 len, int flag, struct sock *skip_sk)
300 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
304 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
305 struct sock *skip_sk)
307 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
308 HCI_SOCK_TRUSTED, skip_sk);
311 static u8 le_addr_type(u8 mgmt_addr_type)
313 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
314 return ADDR_LE_DEV_PUBLIC;
316 return ADDR_LE_DEV_RANDOM;
319 void mgmt_fill_version_info(void *ver)
321 struct mgmt_rp_read_version *rp = ver;
323 rp->version = MGMT_VERSION;
324 rp->revision = cpu_to_le16(MGMT_REVISION);
327 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
330 struct mgmt_rp_read_version rp;
332 bt_dev_dbg(hdev, "sock %p", sk);
334 mgmt_fill_version_info(&rp);
336 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
340 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
343 struct mgmt_rp_read_commands *rp;
344 u16 num_commands, num_events;
348 bt_dev_dbg(hdev, "sock %p", sk);
350 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
351 num_commands = ARRAY_SIZE(mgmt_commands);
352 num_events = ARRAY_SIZE(mgmt_events);
354 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
355 num_events = ARRAY_SIZE(mgmt_untrusted_events);
358 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
360 rp = kmalloc(rp_size, GFP_KERNEL);
364 rp->num_commands = cpu_to_le16(num_commands);
365 rp->num_events = cpu_to_le16(num_events);
367 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
368 __le16 *opcode = rp->opcodes;
370 for (i = 0; i < num_commands; i++, opcode++)
371 put_unaligned_le16(mgmt_commands[i], opcode);
373 for (i = 0; i < num_events; i++, opcode++)
374 put_unaligned_le16(mgmt_events[i], opcode);
376 __le16 *opcode = rp->opcodes;
378 for (i = 0; i < num_commands; i++, opcode++)
379 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
381 for (i = 0; i < num_events; i++, opcode++)
382 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
385 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
392 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
395 struct mgmt_rp_read_index_list *rp;
401 bt_dev_dbg(hdev, "sock %p", sk);
403 read_lock(&hci_dev_list_lock);
406 list_for_each_entry(d, &hci_dev_list, list) {
407 if (d->dev_type == HCI_PRIMARY &&
408 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
412 rp_len = sizeof(*rp) + (2 * count);
413 rp = kmalloc(rp_len, GFP_ATOMIC);
415 read_unlock(&hci_dev_list_lock);
420 list_for_each_entry(d, &hci_dev_list, list) {
421 if (hci_dev_test_flag(d, HCI_SETUP) ||
422 hci_dev_test_flag(d, HCI_CONFIG) ||
423 hci_dev_test_flag(d, HCI_USER_CHANNEL))
426 /* Devices marked as raw-only are neither configured
427 * nor unconfigured controllers.
429 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
432 if (d->dev_type == HCI_PRIMARY &&
433 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
434 rp->index[count++] = cpu_to_le16(d->id);
435 bt_dev_dbg(hdev, "Added hci%u", d->id);
439 rp->num_controllers = cpu_to_le16(count);
440 rp_len = sizeof(*rp) + (2 * count);
442 read_unlock(&hci_dev_list_lock);
444 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
452 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
453 void *data, u16 data_len)
455 struct mgmt_rp_read_unconf_index_list *rp;
461 bt_dev_dbg(hdev, "sock %p", sk);
463 read_lock(&hci_dev_list_lock);
466 list_for_each_entry(d, &hci_dev_list, list) {
467 if (d->dev_type == HCI_PRIMARY &&
468 hci_dev_test_flag(d, HCI_UNCONFIGURED))
472 rp_len = sizeof(*rp) + (2 * count);
473 rp = kmalloc(rp_len, GFP_ATOMIC);
475 read_unlock(&hci_dev_list_lock);
480 list_for_each_entry(d, &hci_dev_list, list) {
481 if (hci_dev_test_flag(d, HCI_SETUP) ||
482 hci_dev_test_flag(d, HCI_CONFIG) ||
483 hci_dev_test_flag(d, HCI_USER_CHANNEL))
486 /* Devices marked as raw-only are neither configured
487 * nor unconfigured controllers.
489 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
492 if (d->dev_type == HCI_PRIMARY &&
493 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
494 rp->index[count++] = cpu_to_le16(d->id);
495 bt_dev_dbg(hdev, "Added hci%u", d->id);
499 rp->num_controllers = cpu_to_le16(count);
500 rp_len = sizeof(*rp) + (2 * count);
502 read_unlock(&hci_dev_list_lock);
504 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
505 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
512 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
513 void *data, u16 data_len)
515 struct mgmt_rp_read_ext_index_list *rp;
520 bt_dev_dbg(hdev, "sock %p", sk);
522 read_lock(&hci_dev_list_lock);
525 list_for_each_entry(d, &hci_dev_list, list) {
526 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
530 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
532 read_unlock(&hci_dev_list_lock);
537 list_for_each_entry(d, &hci_dev_list, list) {
538 if (hci_dev_test_flag(d, HCI_SETUP) ||
539 hci_dev_test_flag(d, HCI_CONFIG) ||
540 hci_dev_test_flag(d, HCI_USER_CHANNEL))
543 /* Devices marked as raw-only are neither configured
544 * nor unconfigured controllers.
546 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
549 if (d->dev_type == HCI_PRIMARY) {
550 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
551 rp->entry[count].type = 0x01;
553 rp->entry[count].type = 0x00;
554 } else if (d->dev_type == HCI_AMP) {
555 rp->entry[count].type = 0x02;
560 rp->entry[count].bus = d->bus;
561 rp->entry[count++].index = cpu_to_le16(d->id);
562 bt_dev_dbg(hdev, "Added hci%u", d->id);
565 rp->num_controllers = cpu_to_le16(count);
567 read_unlock(&hci_dev_list_lock);
569 /* If this command is called at least once, then all the
570 * default index and unconfigured index events are disabled
571 * and from now on only extended index events are used.
573 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
574 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
575 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
577 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
578 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
579 struct_size(rp, entry, count));
586 static bool is_configured(struct hci_dev *hdev)
588 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
589 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
592 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
593 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
594 !bacmp(&hdev->public_addr, BDADDR_ANY))
600 static __le32 get_missing_options(struct hci_dev *hdev)
604 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
605 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
606 options |= MGMT_OPTION_EXTERNAL_CONFIG;
608 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
609 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
610 !bacmp(&hdev->public_addr, BDADDR_ANY))
611 options |= MGMT_OPTION_PUBLIC_ADDRESS;
613 return cpu_to_le32(options);
616 static int new_options(struct hci_dev *hdev, struct sock *skip)
618 __le32 options = get_missing_options(hdev);
620 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
621 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
624 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
626 __le32 options = get_missing_options(hdev);
628 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
632 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
633 void *data, u16 data_len)
635 struct mgmt_rp_read_config_info rp;
638 bt_dev_dbg(hdev, "sock %p", sk);
642 memset(&rp, 0, sizeof(rp));
643 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
645 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
646 options |= MGMT_OPTION_EXTERNAL_CONFIG;
648 if (hdev->set_bdaddr)
649 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651 rp.supported_options = cpu_to_le32(options);
652 rp.missing_options = get_missing_options(hdev);
654 hci_dev_unlock(hdev);
656 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
660 static u32 get_supported_phys(struct hci_dev *hdev)
662 u32 supported_phys = 0;
664 if (lmp_bredr_capable(hdev)) {
665 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
667 if (hdev->features[0][0] & LMP_3SLOT)
668 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
670 if (hdev->features[0][0] & LMP_5SLOT)
671 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
673 if (lmp_edr_2m_capable(hdev)) {
674 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
676 if (lmp_edr_3slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
679 if (lmp_edr_5slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
682 if (lmp_edr_3m_capable(hdev)) {
683 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
685 if (lmp_edr_3slot_capable(hdev))
686 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
688 if (lmp_edr_5slot_capable(hdev))
689 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
694 if (lmp_le_capable(hdev)) {
695 supported_phys |= MGMT_PHY_LE_1M_TX;
696 supported_phys |= MGMT_PHY_LE_1M_RX;
698 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
699 supported_phys |= MGMT_PHY_LE_2M_TX;
700 supported_phys |= MGMT_PHY_LE_2M_RX;
703 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
704 supported_phys |= MGMT_PHY_LE_CODED_TX;
705 supported_phys |= MGMT_PHY_LE_CODED_RX;
709 return supported_phys;
712 static u32 get_selected_phys(struct hci_dev *hdev)
714 u32 selected_phys = 0;
716 if (lmp_bredr_capable(hdev)) {
717 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
719 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
720 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
722 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
723 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
725 if (lmp_edr_2m_capable(hdev)) {
726 if (!(hdev->pkt_type & HCI_2DH1))
727 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
729 if (lmp_edr_3slot_capable(hdev) &&
730 !(hdev->pkt_type & HCI_2DH3))
731 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
733 if (lmp_edr_5slot_capable(hdev) &&
734 !(hdev->pkt_type & HCI_2DH5))
735 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
737 if (lmp_edr_3m_capable(hdev)) {
738 if (!(hdev->pkt_type & HCI_3DH1))
739 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
741 if (lmp_edr_3slot_capable(hdev) &&
742 !(hdev->pkt_type & HCI_3DH3))
743 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
745 if (lmp_edr_5slot_capable(hdev) &&
746 !(hdev->pkt_type & HCI_3DH5))
747 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
752 if (lmp_le_capable(hdev)) {
753 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
754 selected_phys |= MGMT_PHY_LE_1M_TX;
756 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
757 selected_phys |= MGMT_PHY_LE_1M_RX;
759 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
760 selected_phys |= MGMT_PHY_LE_2M_TX;
762 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
763 selected_phys |= MGMT_PHY_LE_2M_RX;
765 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
766 selected_phys |= MGMT_PHY_LE_CODED_TX;
768 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
769 selected_phys |= MGMT_PHY_LE_CODED_RX;
772 return selected_phys;
775 static u32 get_configurable_phys(struct hci_dev *hdev)
777 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
778 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
781 static u32 get_supported_settings(struct hci_dev *hdev)
785 settings |= MGMT_SETTING_POWERED;
786 settings |= MGMT_SETTING_BONDABLE;
787 settings |= MGMT_SETTING_DEBUG_KEYS;
788 settings |= MGMT_SETTING_CONNECTABLE;
789 settings |= MGMT_SETTING_DISCOVERABLE;
791 if (lmp_bredr_capable(hdev)) {
792 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
793 settings |= MGMT_SETTING_FAST_CONNECTABLE;
794 settings |= MGMT_SETTING_BREDR;
795 settings |= MGMT_SETTING_LINK_SECURITY;
797 if (lmp_ssp_capable(hdev)) {
798 settings |= MGMT_SETTING_SSP;
799 if (IS_ENABLED(CONFIG_BT_HS))
800 settings |= MGMT_SETTING_HS;
803 if (lmp_sc_capable(hdev))
804 settings |= MGMT_SETTING_SECURE_CONN;
806 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
808 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
811 if (lmp_le_capable(hdev)) {
812 settings |= MGMT_SETTING_LE;
813 settings |= MGMT_SETTING_SECURE_CONN;
814 settings |= MGMT_SETTING_PRIVACY;
815 settings |= MGMT_SETTING_STATIC_ADDRESS;
817 /* When the experimental feature for LL Privacy support is
818 * enabled, then advertising is no longer supported.
820 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
821 settings |= MGMT_SETTING_ADVERTISING;
824 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
826 settings |= MGMT_SETTING_CONFIGURATION;
828 settings |= MGMT_SETTING_PHY_CONFIGURATION;
833 static u32 get_current_settings(struct hci_dev *hdev)
837 if (hdev_is_powered(hdev))
838 settings |= MGMT_SETTING_POWERED;
840 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
841 settings |= MGMT_SETTING_CONNECTABLE;
843 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
844 settings |= MGMT_SETTING_FAST_CONNECTABLE;
846 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
847 settings |= MGMT_SETTING_DISCOVERABLE;
849 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
850 settings |= MGMT_SETTING_BONDABLE;
852 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
853 settings |= MGMT_SETTING_BREDR;
855 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
856 settings |= MGMT_SETTING_LE;
858 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
859 settings |= MGMT_SETTING_LINK_SECURITY;
861 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
862 settings |= MGMT_SETTING_SSP;
864 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
865 settings |= MGMT_SETTING_HS;
867 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
868 settings |= MGMT_SETTING_ADVERTISING;
870 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
871 settings |= MGMT_SETTING_SECURE_CONN;
873 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
874 settings |= MGMT_SETTING_DEBUG_KEYS;
876 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
877 settings |= MGMT_SETTING_PRIVACY;
879 /* The current setting for static address has two purposes. The
880 * first is to indicate if the static address will be used and
881 * the second is to indicate if it is actually set.
883 * This means if the static address is not configured, this flag
884 * will never be set. If the address is configured, then if the
885 * address is actually used decides if the flag is set or not.
887 * For single mode LE only controllers and dual-mode controllers
888 * with BR/EDR disabled, the existence of the static address will
891 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
892 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
893 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
894 if (bacmp(&hdev->static_addr, BDADDR_ANY))
895 settings |= MGMT_SETTING_STATIC_ADDRESS;
898 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
899 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
904 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
906 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
909 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
910 struct hci_dev *hdev,
913 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
916 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
918 struct mgmt_pending_cmd *cmd;
920 /* If there's a pending mgmt command the flags will not yet have
921 * their final values, so check for this first.
923 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
925 struct mgmt_mode *cp = cmd->param;
927 return LE_AD_GENERAL;
928 else if (cp->val == 0x02)
929 return LE_AD_LIMITED;
931 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
932 return LE_AD_LIMITED;
933 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
934 return LE_AD_GENERAL;
940 bool mgmt_get_connectable(struct hci_dev *hdev)
942 struct mgmt_pending_cmd *cmd;
944 /* If there's a pending mgmt command the flag will not yet have
945 * it's final value, so check for this first.
947 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
949 struct mgmt_mode *cp = cmd->param;
954 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
957 static void service_cache_off(struct work_struct *work)
959 struct hci_dev *hdev = container_of(work, struct hci_dev,
961 struct hci_request req;
963 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
966 hci_req_init(&req, hdev);
970 __hci_req_update_eir(&req);
971 __hci_req_update_class(&req);
973 hci_dev_unlock(hdev);
975 hci_req_run(&req, NULL);
978 static void rpa_expired(struct work_struct *work)
980 struct hci_dev *hdev = container_of(work, struct hci_dev,
982 struct hci_request req;
984 bt_dev_dbg(hdev, "");
986 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
988 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
991 /* The generation of a new RPA and programming it into the
992 * controller happens in the hci_req_enable_advertising()
995 hci_req_init(&req, hdev);
996 if (ext_adv_capable(hdev))
997 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
999 __hci_req_enable_advertising(&req);
1000 hci_req_run(&req, NULL);
1003 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1005 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1008 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1009 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1011 /* Non-mgmt controlled devices get this bit set
1012 * implicitly so that pairing works for them, however
1013 * for mgmt we require user-space to explicitly enable
1016 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1019 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1020 void *data, u16 data_len)
1022 struct mgmt_rp_read_info rp;
1024 bt_dev_dbg(hdev, "sock %p", sk);
1028 memset(&rp, 0, sizeof(rp));
1030 bacpy(&rp.bdaddr, &hdev->bdaddr);
1032 rp.version = hdev->hci_ver;
1033 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1035 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1036 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1038 memcpy(rp.dev_class, hdev->dev_class, 3);
1040 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1041 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1043 hci_dev_unlock(hdev);
1045 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1049 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1054 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1055 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1056 hdev->dev_class, 3);
1058 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1059 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1062 name_len = strlen(hdev->dev_name);
1063 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1064 hdev->dev_name, name_len);
1066 name_len = strlen(hdev->short_name);
1067 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1068 hdev->short_name, name_len);
1073 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1074 void *data, u16 data_len)
1077 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1080 bt_dev_dbg(hdev, "sock %p", sk);
1082 memset(&buf, 0, sizeof(buf));
1086 bacpy(&rp->bdaddr, &hdev->bdaddr);
1088 rp->version = hdev->hci_ver;
1089 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1091 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1092 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1095 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1096 rp->eir_len = cpu_to_le16(eir_len);
1098 hci_dev_unlock(hdev);
1100 /* If this command is called at least once, then the events
1101 * for class of device and local name changes are disabled
1102 * and only the new extended controller information event
1105 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1106 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1107 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1109 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1110 sizeof(*rp) + eir_len);
1113 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1116 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1119 memset(buf, 0, sizeof(buf));
1121 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1122 ev->eir_len = cpu_to_le16(eir_len);
1124 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1125 sizeof(*ev) + eir_len,
1126 HCI_MGMT_EXT_INFO_EVENTS, skip);
1129 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1131 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1133 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1137 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1139 bt_dev_dbg(hdev, "status 0x%02x", status);
1141 if (hci_conn_count(hdev) == 0) {
1142 cancel_delayed_work(&hdev->power_off);
1143 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1147 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1149 struct mgmt_ev_advertising_added ev;
1151 ev.instance = instance;
1153 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1156 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1159 struct mgmt_ev_advertising_removed ev;
1161 ev.instance = instance;
1163 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1166 static void cancel_adv_timeout(struct hci_dev *hdev)
1168 if (hdev->adv_instance_timeout) {
1169 hdev->adv_instance_timeout = 0;
1170 cancel_delayed_work(&hdev->adv_instance_expire);
1174 static int clean_up_hci_state(struct hci_dev *hdev)
1176 struct hci_request req;
1177 struct hci_conn *conn;
1178 bool discov_stopped;
1181 hci_req_init(&req, hdev);
1183 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1184 test_bit(HCI_PSCAN, &hdev->flags)) {
1186 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1189 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1191 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1192 __hci_req_disable_advertising(&req);
1194 discov_stopped = hci_req_stop_discovery(&req);
1196 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1197 /* 0x15 == Terminated due to Power Off */
1198 __hci_abort_conn(&req, conn, 0x15);
1201 err = hci_req_run(&req, clean_up_hci_complete);
1202 if (!err && discov_stopped)
1203 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1208 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1211 struct mgmt_mode *cp = data;
1212 struct mgmt_pending_cmd *cmd;
1215 bt_dev_dbg(hdev, "sock %p", sk);
1217 if (cp->val != 0x00 && cp->val != 0x01)
1218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1219 MGMT_STATUS_INVALID_PARAMS);
1223 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1224 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1229 if (!!cp->val == hdev_is_powered(hdev)) {
1230 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1234 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1241 queue_work(hdev->req_workqueue, &hdev->power_on);
1244 /* Disconnect connections, stop scans, etc */
1245 err = clean_up_hci_state(hdev);
1247 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1248 HCI_POWER_OFF_TIMEOUT);
1250 /* ENODATA means there were no HCI commands queued */
1251 if (err == -ENODATA) {
1252 cancel_delayed_work(&hdev->power_off);
1253 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1259 hci_dev_unlock(hdev);
1263 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1265 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1267 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1268 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1271 int mgmt_new_settings(struct hci_dev *hdev)
1273 return new_settings(hdev, NULL);
1278 struct hci_dev *hdev;
1282 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1284 struct cmd_lookup *match = data;
1286 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1288 list_del(&cmd->list);
1290 if (match->sk == NULL) {
1291 match->sk = cmd->sk;
1292 sock_hold(match->sk);
1295 mgmt_pending_free(cmd);
1298 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1302 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1303 mgmt_pending_remove(cmd);
1306 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1308 if (cmd->cmd_complete) {
1311 cmd->cmd_complete(cmd, *status);
1312 mgmt_pending_remove(cmd);
1317 cmd_status_rsp(cmd, data);
1320 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1322 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1323 cmd->param, cmd->param_len);
1326 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1328 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1329 cmd->param, sizeof(struct mgmt_addr_info));
1332 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1334 if (!lmp_bredr_capable(hdev))
1335 return MGMT_STATUS_NOT_SUPPORTED;
1336 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1337 return MGMT_STATUS_REJECTED;
1339 return MGMT_STATUS_SUCCESS;
1342 static u8 mgmt_le_support(struct hci_dev *hdev)
1344 if (!lmp_le_capable(hdev))
1345 return MGMT_STATUS_NOT_SUPPORTED;
1346 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1347 return MGMT_STATUS_REJECTED;
1349 return MGMT_STATUS_SUCCESS;
1352 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1354 struct mgmt_pending_cmd *cmd;
1356 bt_dev_dbg(hdev, "status 0x%02x", status);
1360 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1365 u8 mgmt_err = mgmt_status(status);
1366 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1367 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1371 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1372 hdev->discov_timeout > 0) {
1373 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1374 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1377 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1378 new_settings(hdev, cmd->sk);
1381 mgmt_pending_remove(cmd);
1384 hci_dev_unlock(hdev);
1387 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1390 struct mgmt_cp_set_discoverable *cp = data;
1391 struct mgmt_pending_cmd *cmd;
1395 bt_dev_dbg(hdev, "sock %p", sk);
1397 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1398 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1400 MGMT_STATUS_REJECTED);
1402 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1404 MGMT_STATUS_INVALID_PARAMS);
1406 timeout = __le16_to_cpu(cp->timeout);
1408 /* Disabling discoverable requires that no timeout is set,
1409 * and enabling limited discoverable requires a timeout.
1411 if ((cp->val == 0x00 && timeout > 0) ||
1412 (cp->val == 0x02 && timeout == 0))
1413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1414 MGMT_STATUS_INVALID_PARAMS);
1418 if (!hdev_is_powered(hdev) && timeout > 0) {
1419 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1420 MGMT_STATUS_NOT_POWERED);
1424 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1425 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1431 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1432 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1433 MGMT_STATUS_REJECTED);
1437 if (hdev->advertising_paused) {
1438 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1443 if (!hdev_is_powered(hdev)) {
1444 bool changed = false;
1446 /* Setting limited discoverable when powered off is
1447 * not a valid operation since it requires a timeout
1448 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1450 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1451 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1455 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1460 err = new_settings(hdev, sk);
1465 /* If the current mode is the same, then just update the timeout
1466 * value with the new value. And if only the timeout gets updated,
1467 * then no need for any HCI transactions.
1469 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1470 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1471 HCI_LIMITED_DISCOVERABLE)) {
1472 cancel_delayed_work(&hdev->discov_off);
1473 hdev->discov_timeout = timeout;
1475 if (cp->val && hdev->discov_timeout > 0) {
1476 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1477 queue_delayed_work(hdev->req_workqueue,
1478 &hdev->discov_off, to);
1481 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1485 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1491 /* Cancel any potential discoverable timeout that might be
1492 * still active and store new timeout value. The arming of
1493 * the timeout happens in the complete handler.
1495 cancel_delayed_work(&hdev->discov_off);
1496 hdev->discov_timeout = timeout;
1499 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1501 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1503 /* Limited discoverable mode */
1504 if (cp->val == 0x02)
1505 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1507 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1509 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1513 hci_dev_unlock(hdev);
1517 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1519 struct mgmt_pending_cmd *cmd;
1521 bt_dev_dbg(hdev, "status 0x%02x", status);
1525 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1530 u8 mgmt_err = mgmt_status(status);
1531 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1535 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1536 new_settings(hdev, cmd->sk);
1539 mgmt_pending_remove(cmd);
1542 hci_dev_unlock(hdev);
1545 static int set_connectable_update_settings(struct hci_dev *hdev,
1546 struct sock *sk, u8 val)
1548 bool changed = false;
1551 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1555 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1557 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1558 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1561 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1566 hci_req_update_scan(hdev);
1567 hci_update_background_scan(hdev);
1568 return new_settings(hdev, sk);
1574 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1577 struct mgmt_mode *cp = data;
1578 struct mgmt_pending_cmd *cmd;
1581 bt_dev_dbg(hdev, "sock %p", sk);
1583 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1584 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1585 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1586 MGMT_STATUS_REJECTED);
1588 if (cp->val != 0x00 && cp->val != 0x01)
1589 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1590 MGMT_STATUS_INVALID_PARAMS);
1594 if (!hdev_is_powered(hdev)) {
1595 err = set_connectable_update_settings(hdev, sk, cp->val);
1599 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1600 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1601 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1606 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1613 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1615 if (hdev->discov_timeout > 0)
1616 cancel_delayed_work(&hdev->discov_off);
1618 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1619 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1620 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1623 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1627 hci_dev_unlock(hdev);
1631 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1634 struct mgmt_mode *cp = data;
1638 bt_dev_dbg(hdev, "sock %p", sk);
1640 if (cp->val != 0x00 && cp->val != 0x01)
1641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1642 MGMT_STATUS_INVALID_PARAMS);
1647 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1649 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1651 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1656 /* In limited privacy mode the change of bondable mode
1657 * may affect the local advertising address.
1659 if (hdev_is_powered(hdev) &&
1660 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1661 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1662 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1663 queue_work(hdev->req_workqueue,
1664 &hdev->discoverable_update);
1666 err = new_settings(hdev, sk);
1670 hci_dev_unlock(hdev);
1674 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1677 struct mgmt_mode *cp = data;
1678 struct mgmt_pending_cmd *cmd;
1682 bt_dev_dbg(hdev, "sock %p", sk);
1684 status = mgmt_bredr_support(hdev);
1686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1689 if (cp->val != 0x00 && cp->val != 0x01)
1690 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1691 MGMT_STATUS_INVALID_PARAMS);
1695 if (!hdev_is_powered(hdev)) {
1696 bool changed = false;
1698 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1699 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1703 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1708 err = new_settings(hdev, sk);
1713 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1721 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1722 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1726 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1732 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1734 mgmt_pending_remove(cmd);
1739 hci_dev_unlock(hdev);
1743 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1745 struct mgmt_mode *cp = data;
1746 struct mgmt_pending_cmd *cmd;
1750 bt_dev_dbg(hdev, "sock %p", sk);
1752 status = mgmt_bredr_support(hdev);
1754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1756 if (!lmp_ssp_capable(hdev))
1757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1758 MGMT_STATUS_NOT_SUPPORTED);
1760 if (cp->val != 0x00 && cp->val != 0x01)
1761 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1762 MGMT_STATUS_INVALID_PARAMS);
1766 if (!hdev_is_powered(hdev)) {
1770 changed = !hci_dev_test_and_set_flag(hdev,
1773 changed = hci_dev_test_and_clear_flag(hdev,
1776 changed = hci_dev_test_and_clear_flag(hdev,
1779 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1782 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1787 err = new_settings(hdev, sk);
1792 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1793 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1798 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1799 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1803 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1809 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1810 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1811 sizeof(cp->val), &cp->val);
1813 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1815 mgmt_pending_remove(cmd);
1820 hci_dev_unlock(hdev);
1824 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1826 struct mgmt_mode *cp = data;
1831 bt_dev_dbg(hdev, "sock %p", sk);
1833 if (!IS_ENABLED(CONFIG_BT_HS))
1834 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1835 MGMT_STATUS_NOT_SUPPORTED);
1837 status = mgmt_bredr_support(hdev);
1839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1841 if (!lmp_ssp_capable(hdev))
1842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1843 MGMT_STATUS_NOT_SUPPORTED);
1845 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1847 MGMT_STATUS_REJECTED);
1849 if (cp->val != 0x00 && cp->val != 0x01)
1850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1851 MGMT_STATUS_INVALID_PARAMS);
1855 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1862 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1864 if (hdev_is_powered(hdev)) {
1865 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1866 MGMT_STATUS_REJECTED);
1870 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1873 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1878 err = new_settings(hdev, sk);
1881 hci_dev_unlock(hdev);
1885 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1887 struct cmd_lookup match = { NULL, hdev };
1892 u8 mgmt_err = mgmt_status(status);
1894 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1899 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1901 new_settings(hdev, match.sk);
1906 /* Make sure the controller has a good default for
1907 * advertising data. Restrict the update to when LE
1908 * has actually been enabled. During power on, the
1909 * update in powered_update_hci will take care of it.
1911 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1912 struct hci_request req;
1913 hci_req_init(&req, hdev);
1914 if (ext_adv_capable(hdev)) {
1917 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1919 __hci_req_update_scan_rsp_data(&req, 0x00);
1921 __hci_req_update_adv_data(&req, 0x00);
1922 __hci_req_update_scan_rsp_data(&req, 0x00);
1924 hci_req_run(&req, NULL);
1925 hci_update_background_scan(hdev);
1929 hci_dev_unlock(hdev);
1932 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1934 struct mgmt_mode *cp = data;
1935 struct hci_cp_write_le_host_supported hci_cp;
1936 struct mgmt_pending_cmd *cmd;
1937 struct hci_request req;
1941 bt_dev_dbg(hdev, "sock %p", sk);
1943 if (!lmp_le_capable(hdev))
1944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1945 MGMT_STATUS_NOT_SUPPORTED);
1947 if (cp->val != 0x00 && cp->val != 0x01)
1948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1949 MGMT_STATUS_INVALID_PARAMS);
1951 /* Bluetooth single mode LE only controllers or dual-mode
1952 * controllers configured as LE only devices, do not allow
1953 * switching LE off. These have either LE enabled explicitly
1954 * or BR/EDR has been previously switched off.
1956 * When trying to enable an already enabled LE, then gracefully
1957 * send a positive response. Trying to disable it however will
1958 * result into rejection.
1960 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1961 if (cp->val == 0x01)
1962 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1965 MGMT_STATUS_REJECTED);
1971 enabled = lmp_host_le_capable(hdev);
1974 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1976 if (!hdev_is_powered(hdev) || val == enabled) {
1977 bool changed = false;
1979 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1980 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1984 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1985 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1989 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1994 err = new_settings(hdev, sk);
1999 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2000 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2006 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2012 hci_req_init(&req, hdev);
2014 memset(&hci_cp, 0, sizeof(hci_cp));
2018 hci_cp.simul = 0x00;
2020 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2021 __hci_req_disable_advertising(&req);
2023 if (ext_adv_capable(hdev))
2024 __hci_req_clear_ext_adv_sets(&req);
2027 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2030 err = hci_req_run(&req, le_enable_complete);
2032 mgmt_pending_remove(cmd);
2035 hci_dev_unlock(hdev);
2039 /* This is a helper function to test for pending mgmt commands that can
2040 * cause CoD or EIR HCI commands. We can only allow one such pending
2041 * mgmt command at a time since otherwise we cannot easily track what
2042 * the current values are, will be, and based on that calculate if a new
2043 * HCI command needs to be sent and if yes with what value.
2045 static bool pending_eir_or_class(struct hci_dev *hdev)
2047 struct mgmt_pending_cmd *cmd;
2049 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2050 switch (cmd->opcode) {
2051 case MGMT_OP_ADD_UUID:
2052 case MGMT_OP_REMOVE_UUID:
2053 case MGMT_OP_SET_DEV_CLASS:
2054 case MGMT_OP_SET_POWERED:
2062 static const u8 bluetooth_base_uuid[] = {
2063 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2064 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2067 static u8 get_uuid_size(const u8 *uuid)
2071 if (memcmp(uuid, bluetooth_base_uuid, 12))
2074 val = get_unaligned_le32(&uuid[12]);
2081 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2083 struct mgmt_pending_cmd *cmd;
2087 cmd = pending_find(mgmt_op, hdev);
2091 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2092 mgmt_status(status), hdev->dev_class, 3);
2094 mgmt_pending_remove(cmd);
2097 hci_dev_unlock(hdev);
2100 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2102 bt_dev_dbg(hdev, "status 0x%02x", status);
2104 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2107 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2109 struct mgmt_cp_add_uuid *cp = data;
2110 struct mgmt_pending_cmd *cmd;
2111 struct hci_request req;
2112 struct bt_uuid *uuid;
2115 bt_dev_dbg(hdev, "sock %p", sk);
2119 if (pending_eir_or_class(hdev)) {
2120 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2125 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2131 memcpy(uuid->uuid, cp->uuid, 16);
2132 uuid->svc_hint = cp->svc_hint;
2133 uuid->size = get_uuid_size(cp->uuid);
2135 list_add_tail(&uuid->list, &hdev->uuids);
2137 hci_req_init(&req, hdev);
2139 __hci_req_update_class(&req);
2140 __hci_req_update_eir(&req);
2142 err = hci_req_run(&req, add_uuid_complete);
2144 if (err != -ENODATA)
2147 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2148 hdev->dev_class, 3);
2152 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2161 hci_dev_unlock(hdev);
2165 static bool enable_service_cache(struct hci_dev *hdev)
2167 if (!hdev_is_powered(hdev))
2170 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2171 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2179 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2181 bt_dev_dbg(hdev, "status 0x%02x", status);
2183 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2186 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2189 struct mgmt_cp_remove_uuid *cp = data;
2190 struct mgmt_pending_cmd *cmd;
2191 struct bt_uuid *match, *tmp;
2192 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2193 struct hci_request req;
2196 bt_dev_dbg(hdev, "sock %p", sk);
2200 if (pending_eir_or_class(hdev)) {
2201 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2206 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2207 hci_uuids_clear(hdev);
2209 if (enable_service_cache(hdev)) {
2210 err = mgmt_cmd_complete(sk, hdev->id,
2211 MGMT_OP_REMOVE_UUID,
2212 0, hdev->dev_class, 3);
2221 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2222 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2225 list_del(&match->list);
2231 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2232 MGMT_STATUS_INVALID_PARAMS);
2237 hci_req_init(&req, hdev);
2239 __hci_req_update_class(&req);
2240 __hci_req_update_eir(&req);
2242 err = hci_req_run(&req, remove_uuid_complete);
2244 if (err != -ENODATA)
2247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2248 hdev->dev_class, 3);
2252 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2261 hci_dev_unlock(hdev);
2265 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2267 bt_dev_dbg(hdev, "status 0x%02x", status);
2269 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2272 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2275 struct mgmt_cp_set_dev_class *cp = data;
2276 struct mgmt_pending_cmd *cmd;
2277 struct hci_request req;
2280 bt_dev_dbg(hdev, "sock %p", sk);
2282 if (!lmp_bredr_capable(hdev))
2283 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2284 MGMT_STATUS_NOT_SUPPORTED);
2288 if (pending_eir_or_class(hdev)) {
2289 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2294 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2295 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2296 MGMT_STATUS_INVALID_PARAMS);
2300 hdev->major_class = cp->major;
2301 hdev->minor_class = cp->minor;
2303 if (!hdev_is_powered(hdev)) {
2304 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2305 hdev->dev_class, 3);
2309 hci_req_init(&req, hdev);
2311 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2312 hci_dev_unlock(hdev);
2313 cancel_delayed_work_sync(&hdev->service_cache);
2315 __hci_req_update_eir(&req);
2318 __hci_req_update_class(&req);
2320 err = hci_req_run(&req, set_class_complete);
2322 if (err != -ENODATA)
2325 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2326 hdev->dev_class, 3);
2330 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2339 hci_dev_unlock(hdev);
2343 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2346 struct mgmt_cp_load_link_keys *cp = data;
2347 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2348 sizeof(struct mgmt_link_key_info));
2349 u16 key_count, expected_len;
2353 bt_dev_dbg(hdev, "sock %p", sk);
2355 if (!lmp_bredr_capable(hdev))
2356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2357 MGMT_STATUS_NOT_SUPPORTED);
2359 key_count = __le16_to_cpu(cp->key_count);
2360 if (key_count > max_key_count) {
2361 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2364 MGMT_STATUS_INVALID_PARAMS);
2367 expected_len = struct_size(cp, keys, key_count);
2368 if (expected_len != len) {
2369 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2372 MGMT_STATUS_INVALID_PARAMS);
2375 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2376 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2377 MGMT_STATUS_INVALID_PARAMS);
2379 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2382 for (i = 0; i < key_count; i++) {
2383 struct mgmt_link_key_info *key = &cp->keys[i];
2385 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2386 return mgmt_cmd_status(sk, hdev->id,
2387 MGMT_OP_LOAD_LINK_KEYS,
2388 MGMT_STATUS_INVALID_PARAMS);
2393 hci_link_keys_clear(hdev);
2396 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2398 changed = hci_dev_test_and_clear_flag(hdev,
2399 HCI_KEEP_DEBUG_KEYS);
2402 new_settings(hdev, NULL);
2404 for (i = 0; i < key_count; i++) {
2405 struct mgmt_link_key_info *key = &cp->keys[i];
2407 if (hci_is_blocked_key(hdev,
2408 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2410 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2415 /* Always ignore debug keys and require a new pairing if
2416 * the user wants to use them.
2418 if (key->type == HCI_LK_DEBUG_COMBINATION)
2421 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2422 key->type, key->pin_len, NULL);
2425 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2427 hci_dev_unlock(hdev);
2432 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2433 u8 addr_type, struct sock *skip_sk)
2435 struct mgmt_ev_device_unpaired ev;
2437 bacpy(&ev.addr.bdaddr, bdaddr);
2438 ev.addr.type = addr_type;
2440 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2444 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2447 struct mgmt_cp_unpair_device *cp = data;
2448 struct mgmt_rp_unpair_device rp;
2449 struct hci_conn_params *params;
2450 struct mgmt_pending_cmd *cmd;
2451 struct hci_conn *conn;
2455 memset(&rp, 0, sizeof(rp));
2456 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2457 rp.addr.type = cp->addr.type;
2459 if (!bdaddr_type_is_valid(cp->addr.type))
2460 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2461 MGMT_STATUS_INVALID_PARAMS,
2464 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2465 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2466 MGMT_STATUS_INVALID_PARAMS,
2471 if (!hdev_is_powered(hdev)) {
2472 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2473 MGMT_STATUS_NOT_POWERED, &rp,
2478 if (cp->addr.type == BDADDR_BREDR) {
2479 /* If disconnection is requested, then look up the
2480 * connection. If the remote device is connected, it
2481 * will be later used to terminate the link.
2483 * Setting it to NULL explicitly will cause no
2484 * termination of the link.
2487 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2492 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2494 err = mgmt_cmd_complete(sk, hdev->id,
2495 MGMT_OP_UNPAIR_DEVICE,
2496 MGMT_STATUS_NOT_PAIRED, &rp,
2504 /* LE address type */
2505 addr_type = le_addr_type(cp->addr.type);
2507 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2508 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2510 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2511 MGMT_STATUS_NOT_PAIRED, &rp,
2516 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2518 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2523 /* Defer clearing up the connection parameters until closing to
2524 * give a chance of keeping them if a repairing happens.
2526 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2528 /* Disable auto-connection parameters if present */
2529 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2531 if (params->explicit_connect)
2532 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2534 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2537 /* If disconnection is not requested, then clear the connection
2538 * variable so that the link is not terminated.
2540 if (!cp->disconnect)
2544 /* If the connection variable is set, then termination of the
2545 * link is requested.
2548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2550 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2554 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2561 cmd->cmd_complete = addr_cmd_complete;
2563 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2565 mgmt_pending_remove(cmd);
2568 hci_dev_unlock(hdev);
2572 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2575 struct mgmt_cp_disconnect *cp = data;
2576 struct mgmt_rp_disconnect rp;
2577 struct mgmt_pending_cmd *cmd;
2578 struct hci_conn *conn;
2581 bt_dev_dbg(hdev, "sock %p", sk);
2583 memset(&rp, 0, sizeof(rp));
2584 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2585 rp.addr.type = cp->addr.type;
2587 if (!bdaddr_type_is_valid(cp->addr.type))
2588 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2589 MGMT_STATUS_INVALID_PARAMS,
2594 if (!test_bit(HCI_UP, &hdev->flags)) {
2595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2596 MGMT_STATUS_NOT_POWERED, &rp,
2601 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2602 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2603 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2607 if (cp->addr.type == BDADDR_BREDR)
2608 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2611 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2612 le_addr_type(cp->addr.type));
2614 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2615 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2616 MGMT_STATUS_NOT_CONNECTED, &rp,
2621 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2627 cmd->cmd_complete = generic_cmd_complete;
2629 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2631 mgmt_pending_remove(cmd);
2634 hci_dev_unlock(hdev);
2638 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2640 switch (link_type) {
2642 switch (addr_type) {
2643 case ADDR_LE_DEV_PUBLIC:
2644 return BDADDR_LE_PUBLIC;
2647 /* Fallback to LE Random address type */
2648 return BDADDR_LE_RANDOM;
2652 /* Fallback to BR/EDR type */
2653 return BDADDR_BREDR;
2657 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2660 struct mgmt_rp_get_connections *rp;
2665 bt_dev_dbg(hdev, "sock %p", sk);
2669 if (!hdev_is_powered(hdev)) {
2670 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2671 MGMT_STATUS_NOT_POWERED);
2676 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2677 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2681 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2688 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2689 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2691 bacpy(&rp->addr[i].bdaddr, &c->dst);
2692 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2693 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2698 rp->conn_count = cpu_to_le16(i);
2700 /* Recalculate length in case of filtered SCO connections, etc */
2701 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2702 struct_size(rp, addr, i));
2707 hci_dev_unlock(hdev);
2711 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2712 struct mgmt_cp_pin_code_neg_reply *cp)
2714 struct mgmt_pending_cmd *cmd;
2717 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2722 cmd->cmd_complete = addr_cmd_complete;
2724 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2725 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2727 mgmt_pending_remove(cmd);
2732 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2735 struct hci_conn *conn;
2736 struct mgmt_cp_pin_code_reply *cp = data;
2737 struct hci_cp_pin_code_reply reply;
2738 struct mgmt_pending_cmd *cmd;
2741 bt_dev_dbg(hdev, "sock %p", sk);
2745 if (!hdev_is_powered(hdev)) {
2746 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2747 MGMT_STATUS_NOT_POWERED);
2751 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2753 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2754 MGMT_STATUS_NOT_CONNECTED);
2758 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2759 struct mgmt_cp_pin_code_neg_reply ncp;
2761 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2763 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2765 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2767 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2768 MGMT_STATUS_INVALID_PARAMS);
2773 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2779 cmd->cmd_complete = addr_cmd_complete;
2781 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2782 reply.pin_len = cp->pin_len;
2783 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2785 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2787 mgmt_pending_remove(cmd);
2790 hci_dev_unlock(hdev);
2794 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2797 struct mgmt_cp_set_io_capability *cp = data;
2799 bt_dev_dbg(hdev, "sock %p", sk);
2801 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2803 MGMT_STATUS_INVALID_PARAMS);
2807 hdev->io_capability = cp->io_capability;
2809 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2811 hci_dev_unlock(hdev);
2813 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2817 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2819 struct hci_dev *hdev = conn->hdev;
2820 struct mgmt_pending_cmd *cmd;
2822 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2823 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2826 if (cmd->user_data != conn)
2835 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2837 struct mgmt_rp_pair_device rp;
2838 struct hci_conn *conn = cmd->user_data;
2841 bacpy(&rp.addr.bdaddr, &conn->dst);
2842 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2844 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2845 status, &rp, sizeof(rp));
2847 /* So we don't get further callbacks for this connection */
2848 conn->connect_cfm_cb = NULL;
2849 conn->security_cfm_cb = NULL;
2850 conn->disconn_cfm_cb = NULL;
2852 hci_conn_drop(conn);
2854 /* The device is paired so there is no need to remove
2855 * its connection parameters anymore.
2857 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2864 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2866 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2867 struct mgmt_pending_cmd *cmd;
2869 cmd = find_pairing(conn);
2871 cmd->cmd_complete(cmd, status);
2872 mgmt_pending_remove(cmd);
2876 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2878 struct mgmt_pending_cmd *cmd;
2880 BT_DBG("status %u", status);
2882 cmd = find_pairing(conn);
2884 BT_DBG("Unable to find a pending command");
2888 cmd->cmd_complete(cmd, mgmt_status(status));
2889 mgmt_pending_remove(cmd);
2892 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2894 struct mgmt_pending_cmd *cmd;
2896 BT_DBG("status %u", status);
2901 cmd = find_pairing(conn);
2903 BT_DBG("Unable to find a pending command");
2907 cmd->cmd_complete(cmd, mgmt_status(status));
2908 mgmt_pending_remove(cmd);
2911 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2914 struct mgmt_cp_pair_device *cp = data;
2915 struct mgmt_rp_pair_device rp;
2916 struct mgmt_pending_cmd *cmd;
2917 u8 sec_level, auth_type;
2918 struct hci_conn *conn;
2921 bt_dev_dbg(hdev, "sock %p", sk);
2923 memset(&rp, 0, sizeof(rp));
2924 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2925 rp.addr.type = cp->addr.type;
2927 if (!bdaddr_type_is_valid(cp->addr.type))
2928 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2929 MGMT_STATUS_INVALID_PARAMS,
2932 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2933 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2934 MGMT_STATUS_INVALID_PARAMS,
2939 if (!hdev_is_powered(hdev)) {
2940 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2941 MGMT_STATUS_NOT_POWERED, &rp,
2946 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2947 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2948 MGMT_STATUS_ALREADY_PAIRED, &rp,
2953 sec_level = BT_SECURITY_MEDIUM;
2954 auth_type = HCI_AT_DEDICATED_BONDING;
2956 if (cp->addr.type == BDADDR_BREDR) {
2957 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2958 auth_type, CONN_REASON_PAIR_DEVICE);
2960 u8 addr_type = le_addr_type(cp->addr.type);
2961 struct hci_conn_params *p;
2963 /* When pairing a new device, it is expected to remember
2964 * this device for future connections. Adding the connection
2965 * parameter information ahead of time allows tracking
2966 * of the peripheral preferred values and will speed up any
2967 * further connection establishment.
2969 * If connection parameters already exist, then they
2970 * will be kept and this function does nothing.
2972 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2974 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2975 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2977 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2978 sec_level, HCI_LE_CONN_TIMEOUT,
2979 CONN_REASON_PAIR_DEVICE);
2985 if (PTR_ERR(conn) == -EBUSY)
2986 status = MGMT_STATUS_BUSY;
2987 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2988 status = MGMT_STATUS_NOT_SUPPORTED;
2989 else if (PTR_ERR(conn) == -ECONNREFUSED)
2990 status = MGMT_STATUS_REJECTED;
2992 status = MGMT_STATUS_CONNECT_FAILED;
2994 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2995 status, &rp, sizeof(rp));
2999 if (conn->connect_cfm_cb) {
3000 hci_conn_drop(conn);
3001 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3002 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3006 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3009 hci_conn_drop(conn);
3013 cmd->cmd_complete = pairing_complete;
3015 /* For LE, just connecting isn't a proof that the pairing finished */
3016 if (cp->addr.type == BDADDR_BREDR) {
3017 conn->connect_cfm_cb = pairing_complete_cb;
3018 conn->security_cfm_cb = pairing_complete_cb;
3019 conn->disconn_cfm_cb = pairing_complete_cb;
3021 conn->connect_cfm_cb = le_pairing_complete_cb;
3022 conn->security_cfm_cb = le_pairing_complete_cb;
3023 conn->disconn_cfm_cb = le_pairing_complete_cb;
3026 conn->io_capability = cp->io_cap;
3027 cmd->user_data = hci_conn_get(conn);
3029 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3030 hci_conn_security(conn, sec_level, auth_type, true)) {
3031 cmd->cmd_complete(cmd, 0);
3032 mgmt_pending_remove(cmd);
3038 hci_dev_unlock(hdev);
3042 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3045 struct mgmt_addr_info *addr = data;
3046 struct mgmt_pending_cmd *cmd;
3047 struct hci_conn *conn;
3050 bt_dev_dbg(hdev, "sock %p", sk);
3054 if (!hdev_is_powered(hdev)) {
3055 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3056 MGMT_STATUS_NOT_POWERED);
3060 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3062 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3063 MGMT_STATUS_INVALID_PARAMS);
3067 conn = cmd->user_data;
3069 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3070 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3071 MGMT_STATUS_INVALID_PARAMS);
3075 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3076 mgmt_pending_remove(cmd);
3078 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3079 addr, sizeof(*addr));
3081 /* Since user doesn't want to proceed with the connection, abort any
3082 * ongoing pairing and then terminate the link if it was created
3083 * because of the pair device action.
3085 if (addr->type == BDADDR_BREDR)
3086 hci_remove_link_key(hdev, &addr->bdaddr);
3088 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3089 le_addr_type(addr->type));
3091 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3092 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3095 hci_dev_unlock(hdev);
3099 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3100 struct mgmt_addr_info *addr, u16 mgmt_op,
3101 u16 hci_op, __le32 passkey)
3103 struct mgmt_pending_cmd *cmd;
3104 struct hci_conn *conn;
3109 if (!hdev_is_powered(hdev)) {
3110 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3111 MGMT_STATUS_NOT_POWERED, addr,
3116 if (addr->type == BDADDR_BREDR)
3117 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3119 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3120 le_addr_type(addr->type));
3123 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3124 MGMT_STATUS_NOT_CONNECTED, addr,
3129 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3130 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3132 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3133 MGMT_STATUS_SUCCESS, addr,
3136 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3137 MGMT_STATUS_FAILED, addr,
3143 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3149 cmd->cmd_complete = addr_cmd_complete;
3151 /* Continue with pairing via HCI */
3152 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3153 struct hci_cp_user_passkey_reply cp;
3155 bacpy(&cp.bdaddr, &addr->bdaddr);
3156 cp.passkey = passkey;
3157 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3159 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3163 mgmt_pending_remove(cmd);
3166 hci_dev_unlock(hdev);
3170 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3171 void *data, u16 len)
3173 struct mgmt_cp_pin_code_neg_reply *cp = data;
3175 bt_dev_dbg(hdev, "sock %p", sk);
3177 return user_pairing_resp(sk, hdev, &cp->addr,
3178 MGMT_OP_PIN_CODE_NEG_REPLY,
3179 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3182 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3185 struct mgmt_cp_user_confirm_reply *cp = data;
3187 bt_dev_dbg(hdev, "sock %p", sk);
3189 if (len != sizeof(*cp))
3190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3191 MGMT_STATUS_INVALID_PARAMS);
3193 return user_pairing_resp(sk, hdev, &cp->addr,
3194 MGMT_OP_USER_CONFIRM_REPLY,
3195 HCI_OP_USER_CONFIRM_REPLY, 0);
3198 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3199 void *data, u16 len)
3201 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3203 bt_dev_dbg(hdev, "sock %p", sk);
3205 return user_pairing_resp(sk, hdev, &cp->addr,
3206 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3207 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3210 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3213 struct mgmt_cp_user_passkey_reply *cp = data;
3215 bt_dev_dbg(hdev, "sock %p", sk);
3217 return user_pairing_resp(sk, hdev, &cp->addr,
3218 MGMT_OP_USER_PASSKEY_REPLY,
3219 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3222 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3223 void *data, u16 len)
3225 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3227 bt_dev_dbg(hdev, "sock %p", sk);
3229 return user_pairing_resp(sk, hdev, &cp->addr,
3230 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3231 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3234 static void adv_expire(struct hci_dev *hdev, u32 flags)
3236 struct adv_info *adv_instance;
3237 struct hci_request req;
3240 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3244 /* stop if current instance doesn't need to be changed */
3245 if (!(adv_instance->flags & flags))
3248 cancel_adv_timeout(hdev);
3250 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3254 hci_req_init(&req, hdev);
3255 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3260 hci_req_run(&req, NULL);
3263 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3265 struct mgmt_cp_set_local_name *cp;
3266 struct mgmt_pending_cmd *cmd;
3268 bt_dev_dbg(hdev, "status 0x%02x", status);
3272 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3279 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3280 mgmt_status(status));
3282 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3285 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3286 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3289 mgmt_pending_remove(cmd);
3292 hci_dev_unlock(hdev);
3295 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3298 struct mgmt_cp_set_local_name *cp = data;
3299 struct mgmt_pending_cmd *cmd;
3300 struct hci_request req;
3303 bt_dev_dbg(hdev, "sock %p", sk);
3307 /* If the old values are the same as the new ones just return a
3308 * direct command complete event.
3310 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3311 !memcmp(hdev->short_name, cp->short_name,
3312 sizeof(hdev->short_name))) {
3313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3318 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3320 if (!hdev_is_powered(hdev)) {
3321 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3323 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3328 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3329 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3330 ext_info_changed(hdev, sk);
3335 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3341 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3343 hci_req_init(&req, hdev);
3345 if (lmp_bredr_capable(hdev)) {
3346 __hci_req_update_name(&req);
3347 __hci_req_update_eir(&req);
3350 /* The name is stored in the scan response data and so
3351 * no need to update the advertising data here.
3353 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3354 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3356 err = hci_req_run(&req, set_name_complete);
3358 mgmt_pending_remove(cmd);
3361 hci_dev_unlock(hdev);
3365 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3368 struct mgmt_cp_set_appearance *cp = data;
3372 bt_dev_dbg(hdev, "sock %p", sk);
3374 if (!lmp_le_capable(hdev))
3375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3376 MGMT_STATUS_NOT_SUPPORTED);
3378 appearance = le16_to_cpu(cp->appearance);
3382 if (hdev->appearance != appearance) {
3383 hdev->appearance = appearance;
3385 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3386 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3388 ext_info_changed(hdev, sk);
3391 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3394 hci_dev_unlock(hdev);
3399 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3400 void *data, u16 len)
3402 struct mgmt_rp_get_phy_configuration rp;
3404 bt_dev_dbg(hdev, "sock %p", sk);
3408 memset(&rp, 0, sizeof(rp));
3410 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3411 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3412 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3414 hci_dev_unlock(hdev);
3416 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3420 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3422 struct mgmt_ev_phy_configuration_changed ev;
3424 memset(&ev, 0, sizeof(ev));
3426 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3428 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3432 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3433 u16 opcode, struct sk_buff *skb)
3435 struct mgmt_pending_cmd *cmd;
3437 bt_dev_dbg(hdev, "status 0x%02x", status);
3441 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3446 mgmt_cmd_status(cmd->sk, hdev->id,
3447 MGMT_OP_SET_PHY_CONFIGURATION,
3448 mgmt_status(status));
3450 mgmt_cmd_complete(cmd->sk, hdev->id,
3451 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3454 mgmt_phy_configuration_changed(hdev, cmd->sk);
3457 mgmt_pending_remove(cmd);
3460 hci_dev_unlock(hdev);
3463 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3464 void *data, u16 len)
3466 struct mgmt_cp_set_phy_configuration *cp = data;
3467 struct hci_cp_le_set_default_phy cp_phy;
3468 struct mgmt_pending_cmd *cmd;
3469 struct hci_request req;
3470 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3471 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3472 bool changed = false;
3475 bt_dev_dbg(hdev, "sock %p", sk);
3477 configurable_phys = get_configurable_phys(hdev);
3478 supported_phys = get_supported_phys(hdev);
3479 selected_phys = __le32_to_cpu(cp->selected_phys);
3481 if (selected_phys & ~supported_phys)
3482 return mgmt_cmd_status(sk, hdev->id,
3483 MGMT_OP_SET_PHY_CONFIGURATION,
3484 MGMT_STATUS_INVALID_PARAMS);
3486 unconfigure_phys = supported_phys & ~configurable_phys;
3488 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3489 return mgmt_cmd_status(sk, hdev->id,
3490 MGMT_OP_SET_PHY_CONFIGURATION,
3491 MGMT_STATUS_INVALID_PARAMS);
3493 if (selected_phys == get_selected_phys(hdev))
3494 return mgmt_cmd_complete(sk, hdev->id,
3495 MGMT_OP_SET_PHY_CONFIGURATION,
3500 if (!hdev_is_powered(hdev)) {
3501 err = mgmt_cmd_status(sk, hdev->id,
3502 MGMT_OP_SET_PHY_CONFIGURATION,
3503 MGMT_STATUS_REJECTED);
3507 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3508 err = mgmt_cmd_status(sk, hdev->id,
3509 MGMT_OP_SET_PHY_CONFIGURATION,
3514 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3515 pkt_type |= (HCI_DH3 | HCI_DM3);
3517 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3519 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3520 pkt_type |= (HCI_DH5 | HCI_DM5);
3522 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3524 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3525 pkt_type &= ~HCI_2DH1;
3527 pkt_type |= HCI_2DH1;
3529 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3530 pkt_type &= ~HCI_2DH3;
3532 pkt_type |= HCI_2DH3;
3534 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3535 pkt_type &= ~HCI_2DH5;
3537 pkt_type |= HCI_2DH5;
3539 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3540 pkt_type &= ~HCI_3DH1;
3542 pkt_type |= HCI_3DH1;
3544 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3545 pkt_type &= ~HCI_3DH3;
3547 pkt_type |= HCI_3DH3;
3549 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3550 pkt_type &= ~HCI_3DH5;
3552 pkt_type |= HCI_3DH5;
3554 if (pkt_type != hdev->pkt_type) {
3555 hdev->pkt_type = pkt_type;
3559 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3560 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3562 mgmt_phy_configuration_changed(hdev, sk);
3564 err = mgmt_cmd_complete(sk, hdev->id,
3565 MGMT_OP_SET_PHY_CONFIGURATION,
3571 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3578 hci_req_init(&req, hdev);
3580 memset(&cp_phy, 0, sizeof(cp_phy));
3582 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3583 cp_phy.all_phys |= 0x01;
3585 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3586 cp_phy.all_phys |= 0x02;
3588 if (selected_phys & MGMT_PHY_LE_1M_TX)
3589 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3591 if (selected_phys & MGMT_PHY_LE_2M_TX)
3592 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3594 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3595 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3597 if (selected_phys & MGMT_PHY_LE_1M_RX)
3598 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3600 if (selected_phys & MGMT_PHY_LE_2M_RX)
3601 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3603 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3604 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3606 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3608 err = hci_req_run_skb(&req, set_default_phy_complete);
3610 mgmt_pending_remove(cmd);
3613 hci_dev_unlock(hdev);
3618 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3621 int err = MGMT_STATUS_SUCCESS;
3622 struct mgmt_cp_set_blocked_keys *keys = data;
3623 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3624 sizeof(struct mgmt_blocked_key_info));
3625 u16 key_count, expected_len;
3628 bt_dev_dbg(hdev, "sock %p", sk);
3630 key_count = __le16_to_cpu(keys->key_count);
3631 if (key_count > max_key_count) {
3632 bt_dev_err(hdev, "too big key_count value %u", key_count);
3633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3634 MGMT_STATUS_INVALID_PARAMS);
3637 expected_len = struct_size(keys, keys, key_count);
3638 if (expected_len != len) {
3639 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3642 MGMT_STATUS_INVALID_PARAMS);
3647 hci_blocked_keys_clear(hdev);
3649 for (i = 0; i < keys->key_count; ++i) {
3650 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3653 err = MGMT_STATUS_NO_RESOURCES;
3657 b->type = keys->keys[i].type;
3658 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3659 list_add_rcu(&b->list, &hdev->blocked_keys);
3661 hci_dev_unlock(hdev);
3663 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3667 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3668 void *data, u16 len)
3670 struct mgmt_mode *cp = data;
3672 bool changed = false;
3674 bt_dev_dbg(hdev, "sock %p", sk);
3676 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3677 return mgmt_cmd_status(sk, hdev->id,
3678 MGMT_OP_SET_WIDEBAND_SPEECH,
3679 MGMT_STATUS_NOT_SUPPORTED);
3681 if (cp->val != 0x00 && cp->val != 0x01)
3682 return mgmt_cmd_status(sk, hdev->id,
3683 MGMT_OP_SET_WIDEBAND_SPEECH,
3684 MGMT_STATUS_INVALID_PARAMS);
3688 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3689 err = mgmt_cmd_status(sk, hdev->id,
3690 MGMT_OP_SET_WIDEBAND_SPEECH,
3695 if (hdev_is_powered(hdev) &&
3696 !!cp->val != hci_dev_test_flag(hdev,
3697 HCI_WIDEBAND_SPEECH_ENABLED)) {
3698 err = mgmt_cmd_status(sk, hdev->id,
3699 MGMT_OP_SET_WIDEBAND_SPEECH,
3700 MGMT_STATUS_REJECTED);
3705 changed = !hci_dev_test_and_set_flag(hdev,
3706 HCI_WIDEBAND_SPEECH_ENABLED);
3708 changed = hci_dev_test_and_clear_flag(hdev,
3709 HCI_WIDEBAND_SPEECH_ENABLED);
3711 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3716 err = new_settings(hdev, sk);
3719 hci_dev_unlock(hdev);
3723 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3724 void *data, u16 data_len)
3727 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3730 u8 tx_power_range[2];
3732 bt_dev_dbg(hdev, "sock %p", sk);
3734 memset(&buf, 0, sizeof(buf));
3738 /* When the Read Simple Pairing Options command is supported, then
3739 * the remote public key validation is supported.
3741 * Alternatively, when Microsoft extensions are available, they can
3742 * indicate support for public key validation as well.
3744 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3745 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3747 flags |= 0x02; /* Remote public key validation (LE) */
3749 /* When the Read Encryption Key Size command is supported, then the
3750 * encryption key size is enforced.
3752 if (hdev->commands[20] & 0x10)
3753 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3755 flags |= 0x08; /* Encryption key size enforcement (LE) */
3757 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3760 /* When the Read Simple Pairing Options command is supported, then
3761 * also max encryption key size information is provided.
3763 if (hdev->commands[41] & 0x08)
3764 cap_len = eir_append_le16(rp->cap, cap_len,
3765 MGMT_CAP_MAX_ENC_KEY_SIZE,
3766 hdev->max_enc_key_size);
3768 cap_len = eir_append_le16(rp->cap, cap_len,
3769 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3770 SMP_MAX_ENC_KEY_SIZE);
3772 /* Append the min/max LE tx power parameters if we were able to fetch
3773 * it from the controller
3775 if (hdev->commands[38] & 0x80) {
3776 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3777 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3778 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3782 rp->cap_len = cpu_to_le16(cap_len);
3784 hci_dev_unlock(hdev);
3786 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3787 rp, sizeof(*rp) + cap_len);
3790 #ifdef CONFIG_BT_FEATURE_DEBUG
3791 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3792 static const u8 debug_uuid[16] = {
3793 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3794 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3798 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3799 static const u8 simult_central_periph_uuid[16] = {
3800 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3801 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3804 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3805 static const u8 rpa_resolution_uuid[16] = {
3806 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3807 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3810 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3811 void *data, u16 data_len)
3813 char buf[62]; /* Enough space for 3 features */
3814 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3818 bt_dev_dbg(hdev, "sock %p", sk);
3820 memset(&buf, 0, sizeof(buf));
3822 #ifdef CONFIG_BT_FEATURE_DEBUG
3824 flags = bt_dbg_get() ? BIT(0) : 0;
3826 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3827 rp->features[idx].flags = cpu_to_le32(flags);
3833 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3834 (hdev->le_states[4] & 0x08) && /* Central */
3835 (hdev->le_states[4] & 0x40) && /* Peripheral */
3836 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3841 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3842 rp->features[idx].flags = cpu_to_le32(flags);
3846 if (hdev && use_ll_privacy(hdev)) {
3847 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3848 flags = BIT(0) | BIT(1);
3852 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3853 rp->features[idx].flags = cpu_to_le32(flags);
3857 rp->feature_count = cpu_to_le16(idx);
3859 /* After reading the experimental features information, enable
3860 * the events to update client on any future change.
3862 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3864 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3865 MGMT_OP_READ_EXP_FEATURES_INFO,
3866 0, rp, sizeof(*rp) + (20 * idx));
3869 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3872 struct mgmt_ev_exp_feature_changed ev;
3874 memset(&ev, 0, sizeof(ev));
3875 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3876 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3878 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3880 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3884 #ifdef CONFIG_BT_FEATURE_DEBUG
3885 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3887 struct mgmt_ev_exp_feature_changed ev;
3889 memset(&ev, 0, sizeof(ev));
3890 memcpy(ev.uuid, debug_uuid, 16);
3891 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3893 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3895 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3899 #define EXP_FEAT(_uuid, _set_func) \
3902 .set_func = _set_func, \
3905 /* The zero key uuid is special. Multiple exp features are set through it. */
3906 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
3907 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3909 struct mgmt_rp_set_exp_feature rp;
3911 memset(rp.uuid, 0, 16);
3912 rp.flags = cpu_to_le32(0);
3914 #ifdef CONFIG_BT_FEATURE_DEBUG
3916 bool changed = bt_dbg_get();
3921 exp_debug_feature_changed(false, sk);
3925 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3928 changed = hci_dev_test_and_clear_flag(hdev,
3929 HCI_ENABLE_LL_PRIVACY);
3931 exp_ll_privacy_feature_changed(false, hdev, sk);
3934 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3936 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3937 MGMT_OP_SET_EXP_FEATURE, 0,
3941 #ifdef CONFIG_BT_FEATURE_DEBUG
3942 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
3943 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3945 struct mgmt_rp_set_exp_feature rp;
3950 /* Command requires to use the non-controller index */
3952 return mgmt_cmd_status(sk, hdev->id,
3953 MGMT_OP_SET_EXP_FEATURE,
3954 MGMT_STATUS_INVALID_INDEX);
3956 /* Parameters are limited to a single octet */
3957 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3958 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3959 MGMT_OP_SET_EXP_FEATURE,
3960 MGMT_STATUS_INVALID_PARAMS);
3962 /* Only boolean on/off is supported */
3963 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3964 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3965 MGMT_OP_SET_EXP_FEATURE,
3966 MGMT_STATUS_INVALID_PARAMS);
3968 val = !!cp->param[0];
3969 changed = val ? !bt_dbg_get() : bt_dbg_get();
3972 memcpy(rp.uuid, debug_uuid, 16);
3973 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3975 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3977 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3978 MGMT_OP_SET_EXP_FEATURE, 0,
3982 exp_debug_feature_changed(val, sk);
3988 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
3989 struct mgmt_cp_set_exp_feature *cp,
3992 struct mgmt_rp_set_exp_feature rp;
3997 /* Command requires to use the controller index */
3999 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4000 MGMT_OP_SET_EXP_FEATURE,
4001 MGMT_STATUS_INVALID_INDEX);
4003 /* Changes can only be made when controller is powered down */
4004 if (hdev_is_powered(hdev))
4005 return mgmt_cmd_status(sk, hdev->id,
4006 MGMT_OP_SET_EXP_FEATURE,
4007 MGMT_STATUS_REJECTED);
4009 /* Parameters are limited to a single octet */
4010 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4011 return mgmt_cmd_status(sk, hdev->id,
4012 MGMT_OP_SET_EXP_FEATURE,
4013 MGMT_STATUS_INVALID_PARAMS);
4015 /* Only boolean on/off is supported */
4016 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4017 return mgmt_cmd_status(sk, hdev->id,
4018 MGMT_OP_SET_EXP_FEATURE,
4019 MGMT_STATUS_INVALID_PARAMS);
4021 val = !!cp->param[0];
4024 changed = !hci_dev_test_and_set_flag(hdev,
4025 HCI_ENABLE_LL_PRIVACY);
4026 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4028 /* Enable LL privacy + supported settings changed */
4029 flags = BIT(0) | BIT(1);
4031 changed = hci_dev_test_and_clear_flag(hdev,
4032 HCI_ENABLE_LL_PRIVACY);
4034 /* Disable LL privacy + supported settings changed */
4038 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4039 rp.flags = cpu_to_le32(flags);
4041 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4043 err = mgmt_cmd_complete(sk, hdev->id,
4044 MGMT_OP_SET_EXP_FEATURE, 0,
4048 exp_ll_privacy_feature_changed(val, hdev, sk);
4053 static const struct mgmt_exp_feature {
4055 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4056 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4057 } exp_features[] = {
4058 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4059 #ifdef CONFIG_BT_FEATURE_DEBUG
4060 EXP_FEAT(debug_uuid, set_debug_func),
4062 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4064 /* end with a null feature */
4065 EXP_FEAT(NULL, NULL)
4068 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4069 void *data, u16 data_len)
4071 struct mgmt_cp_set_exp_feature *cp = data;
4074 bt_dev_dbg(hdev, "sock %p", sk);
4076 for (i = 0; exp_features[i].uuid; i++) {
4077 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4078 return exp_features[i].set_func(sk, hdev, cp, data_len);
4081 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4082 MGMT_OP_SET_EXP_FEATURE,
4083 MGMT_STATUS_NOT_SUPPORTED);
4086 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4088 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4091 struct mgmt_cp_get_device_flags *cp = data;
4092 struct mgmt_rp_get_device_flags rp;
4093 struct bdaddr_list_with_flags *br_params;
4094 struct hci_conn_params *params;
4095 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4096 u32 current_flags = 0;
4097 u8 status = MGMT_STATUS_INVALID_PARAMS;
4099 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4100 &cp->addr.bdaddr, cp->addr.type);
4104 memset(&rp, 0, sizeof(rp));
4106 if (cp->addr.type == BDADDR_BREDR) {
4107 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4113 current_flags = br_params->current_flags;
4115 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4116 le_addr_type(cp->addr.type));
4121 current_flags = params->current_flags;
4124 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4125 rp.addr.type = cp->addr.type;
4126 rp.supported_flags = cpu_to_le32(supported_flags);
4127 rp.current_flags = cpu_to_le32(current_flags);
4129 status = MGMT_STATUS_SUCCESS;
4132 hci_dev_unlock(hdev);
4134 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4138 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4139 bdaddr_t *bdaddr, u8 bdaddr_type,
4140 u32 supported_flags, u32 current_flags)
4142 struct mgmt_ev_device_flags_changed ev;
4144 bacpy(&ev.addr.bdaddr, bdaddr);
4145 ev.addr.type = bdaddr_type;
4146 ev.supported_flags = cpu_to_le32(supported_flags);
4147 ev.current_flags = cpu_to_le32(current_flags);
4149 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4152 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4155 struct mgmt_cp_set_device_flags *cp = data;
4156 struct bdaddr_list_with_flags *br_params;
4157 struct hci_conn_params *params;
4158 u8 status = MGMT_STATUS_INVALID_PARAMS;
4159 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4160 u32 current_flags = __le32_to_cpu(cp->current_flags);
4162 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4163 &cp->addr.bdaddr, cp->addr.type,
4164 __le32_to_cpu(current_flags));
4166 if ((supported_flags | current_flags) != supported_flags) {
4167 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4168 current_flags, supported_flags);
4174 if (cp->addr.type == BDADDR_BREDR) {
4175 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4180 br_params->current_flags = current_flags;
4181 status = MGMT_STATUS_SUCCESS;
4183 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4184 &cp->addr.bdaddr, cp->addr.type);
4187 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4188 le_addr_type(cp->addr.type));
4190 params->current_flags = current_flags;
4191 status = MGMT_STATUS_SUCCESS;
4193 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4195 le_addr_type(cp->addr.type));
4200 hci_dev_unlock(hdev);
4202 if (status == MGMT_STATUS_SUCCESS)
4203 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4204 supported_flags, current_flags);
4206 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4207 &cp->addr, sizeof(cp->addr));
4210 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4213 struct mgmt_ev_adv_monitor_added ev;
4215 ev.monitor_handle = cpu_to_le16(handle);
4217 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4220 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4222 struct mgmt_ev_adv_monitor_removed ev;
4223 struct mgmt_pending_cmd *cmd;
4224 struct sock *sk_skip = NULL;
4225 struct mgmt_cp_remove_adv_monitor *cp;
4227 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4231 if (cp->monitor_handle)
4235 ev.monitor_handle = cpu_to_le16(handle);
4237 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4240 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4241 void *data, u16 len)
4243 struct adv_monitor *monitor = NULL;
4244 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4247 __u32 supported = 0;
4249 __u16 num_handles = 0;
4250 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4252 BT_DBG("request for %s", hdev->name);
4256 if (msft_monitor_supported(hdev))
4257 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4259 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4260 handles[num_handles++] = monitor->handle;
4262 hci_dev_unlock(hdev);
4264 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4265 rp = kmalloc(rp_size, GFP_KERNEL);
4269 /* All supported features are currently enabled */
4270 enabled = supported;
4272 rp->supported_features = cpu_to_le32(supported);
4273 rp->enabled_features = cpu_to_le32(enabled);
4274 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4275 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4276 rp->num_handles = cpu_to_le16(num_handles);
4278 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4280 err = mgmt_cmd_complete(sk, hdev->id,
4281 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4282 MGMT_STATUS_SUCCESS, rp, rp_size);
4289 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4291 struct mgmt_rp_add_adv_patterns_monitor rp;
4292 struct mgmt_pending_cmd *cmd;
4293 struct adv_monitor *monitor;
4298 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4300 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4305 monitor = cmd->user_data;
4306 rp.monitor_handle = cpu_to_le16(monitor->handle);
4309 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4310 hdev->adv_monitors_cnt++;
4311 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4312 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4313 hci_update_background_scan(hdev);
4316 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4317 mgmt_status(status), &rp, sizeof(rp));
4318 mgmt_pending_remove(cmd);
4321 hci_dev_unlock(hdev);
4322 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4323 rp.monitor_handle, status);
4328 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4329 struct adv_monitor *m, u8 status,
4330 void *data, u16 len, u16 op)
4332 struct mgmt_rp_add_adv_patterns_monitor rp;
4333 struct mgmt_pending_cmd *cmd;
4342 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4343 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4344 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4345 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4346 status = MGMT_STATUS_BUSY;
4350 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4352 status = MGMT_STATUS_NO_RESOURCES;
4357 pending = hci_add_adv_monitor(hdev, m, &err);
4359 if (err == -ENOSPC || err == -ENOMEM)
4360 status = MGMT_STATUS_NO_RESOURCES;
4361 else if (err == -EINVAL)
4362 status = MGMT_STATUS_INVALID_PARAMS;
4364 status = MGMT_STATUS_FAILED;
4366 mgmt_pending_remove(cmd);
4371 mgmt_pending_remove(cmd);
4372 rp.monitor_handle = cpu_to_le16(m->handle);
4373 mgmt_adv_monitor_added(sk, hdev, m->handle);
4374 m->state = ADV_MONITOR_STATE_REGISTERED;
4375 hdev->adv_monitors_cnt++;
4377 hci_dev_unlock(hdev);
4378 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4382 hci_dev_unlock(hdev);
4387 hci_free_adv_monitor(hdev, m);
4388 hci_dev_unlock(hdev);
4389 return mgmt_cmd_status(sk, hdev->id, op, status);
4392 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4393 struct mgmt_adv_rssi_thresholds *rssi)
4396 m->rssi.low_threshold = rssi->low_threshold;
4397 m->rssi.low_threshold_timeout =
4398 __le16_to_cpu(rssi->low_threshold_timeout);
4399 m->rssi.high_threshold = rssi->high_threshold;
4400 m->rssi.high_threshold_timeout =
4401 __le16_to_cpu(rssi->high_threshold_timeout);
4402 m->rssi.sampling_period = rssi->sampling_period;
4404 /* Default values. These numbers are the least constricting
4405 * parameters for MSFT API to work, so it behaves as if there
4406 * are no rssi parameter to consider. May need to be changed
4407 * if other API are to be supported.
4409 m->rssi.low_threshold = -127;
4410 m->rssi.low_threshold_timeout = 60;
4411 m->rssi.high_threshold = -127;
4412 m->rssi.high_threshold_timeout = 0;
4413 m->rssi.sampling_period = 0;
4417 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4418 struct mgmt_adv_pattern *patterns)
4420 u8 offset = 0, length = 0;
4421 struct adv_pattern *p = NULL;
4424 for (i = 0; i < pattern_count; i++) {
4425 offset = patterns[i].offset;
4426 length = patterns[i].length;
4427 if (offset >= HCI_MAX_AD_LENGTH ||
4428 length > HCI_MAX_AD_LENGTH ||
4429 (offset + length) > HCI_MAX_AD_LENGTH)
4430 return MGMT_STATUS_INVALID_PARAMS;
4432 p = kmalloc(sizeof(*p), GFP_KERNEL);
4434 return MGMT_STATUS_NO_RESOURCES;
4436 p->ad_type = patterns[i].ad_type;
4437 p->offset = patterns[i].offset;
4438 p->length = patterns[i].length;
4439 memcpy(p->value, patterns[i].value, p->length);
4441 INIT_LIST_HEAD(&p->list);
4442 list_add(&p->list, &m->patterns);
4445 return MGMT_STATUS_SUCCESS;
4448 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4449 void *data, u16 len)
4451 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4452 struct adv_monitor *m = NULL;
4453 u8 status = MGMT_STATUS_SUCCESS;
4454 size_t expected_size = sizeof(*cp);
4456 BT_DBG("request for %s", hdev->name);
4458 if (len <= sizeof(*cp)) {
4459 status = MGMT_STATUS_INVALID_PARAMS;
4463 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4464 if (len != expected_size) {
4465 status = MGMT_STATUS_INVALID_PARAMS;
4469 m = kzalloc(sizeof(*m), GFP_KERNEL);
4471 status = MGMT_STATUS_NO_RESOURCES;
4475 INIT_LIST_HEAD(&m->patterns);
4477 parse_adv_monitor_rssi(m, NULL);
4478 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4481 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4482 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4485 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4486 void *data, u16 len)
4488 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4489 struct adv_monitor *m = NULL;
4490 u8 status = MGMT_STATUS_SUCCESS;
4491 size_t expected_size = sizeof(*cp);
4493 BT_DBG("request for %s", hdev->name);
4495 if (len <= sizeof(*cp)) {
4496 status = MGMT_STATUS_INVALID_PARAMS;
4500 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4501 if (len != expected_size) {
4502 status = MGMT_STATUS_INVALID_PARAMS;
4506 m = kzalloc(sizeof(*m), GFP_KERNEL);
4508 status = MGMT_STATUS_NO_RESOURCES;
4512 INIT_LIST_HEAD(&m->patterns);
4514 parse_adv_monitor_rssi(m, &cp->rssi);
4515 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4518 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4519 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4522 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4524 struct mgmt_rp_remove_adv_monitor rp;
4525 struct mgmt_cp_remove_adv_monitor *cp;
4526 struct mgmt_pending_cmd *cmd;
4531 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4536 rp.monitor_handle = cp->monitor_handle;
4539 hci_update_background_scan(hdev);
4541 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4542 mgmt_status(status), &rp, sizeof(rp));
4543 mgmt_pending_remove(cmd);
4546 hci_dev_unlock(hdev);
4547 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4548 rp.monitor_handle, status);
4553 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4554 void *data, u16 len)
4556 struct mgmt_cp_remove_adv_monitor *cp = data;
4557 struct mgmt_rp_remove_adv_monitor rp;
4558 struct mgmt_pending_cmd *cmd;
4559 u16 handle = __le16_to_cpu(cp->monitor_handle);
4563 BT_DBG("request for %s", hdev->name);
4564 rp.monitor_handle = cp->monitor_handle;
4568 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4569 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4570 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4571 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4572 status = MGMT_STATUS_BUSY;
4576 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4578 status = MGMT_STATUS_NO_RESOURCES;
4583 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4585 pending = hci_remove_all_adv_monitor(hdev, &err);
4588 mgmt_pending_remove(cmd);
4591 status = MGMT_STATUS_INVALID_INDEX;
4593 status = MGMT_STATUS_FAILED;
4598 /* monitor can be removed without forwarding request to controller */
4600 mgmt_pending_remove(cmd);
4601 hci_dev_unlock(hdev);
4603 return mgmt_cmd_complete(sk, hdev->id,
4604 MGMT_OP_REMOVE_ADV_MONITOR,
4605 MGMT_STATUS_SUCCESS,
4609 hci_dev_unlock(hdev);
4613 hci_dev_unlock(hdev);
4614 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4618 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4619 u16 opcode, struct sk_buff *skb)
4621 struct mgmt_rp_read_local_oob_data mgmt_rp;
4622 size_t rp_size = sizeof(mgmt_rp);
4623 struct mgmt_pending_cmd *cmd;
4625 bt_dev_dbg(hdev, "status %u", status);
4627 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4631 if (status || !skb) {
4632 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4633 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4637 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4639 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4640 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4642 if (skb->len < sizeof(*rp)) {
4643 mgmt_cmd_status(cmd->sk, hdev->id,
4644 MGMT_OP_READ_LOCAL_OOB_DATA,
4645 MGMT_STATUS_FAILED);
4649 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4650 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4652 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4654 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4656 if (skb->len < sizeof(*rp)) {
4657 mgmt_cmd_status(cmd->sk, hdev->id,
4658 MGMT_OP_READ_LOCAL_OOB_DATA,
4659 MGMT_STATUS_FAILED);
4663 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4664 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4666 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4667 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4670 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4671 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4674 mgmt_pending_remove(cmd);
4677 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4678 void *data, u16 data_len)
4680 struct mgmt_pending_cmd *cmd;
4681 struct hci_request req;
4684 bt_dev_dbg(hdev, "sock %p", sk);
4688 if (!hdev_is_powered(hdev)) {
4689 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4690 MGMT_STATUS_NOT_POWERED);
4694 if (!lmp_ssp_capable(hdev)) {
4695 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4696 MGMT_STATUS_NOT_SUPPORTED);
4700 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4701 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4706 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4712 hci_req_init(&req, hdev);
4714 if (bredr_sc_enabled(hdev))
4715 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4717 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4719 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4721 mgmt_pending_remove(cmd);
4724 hci_dev_unlock(hdev);
4728 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4729 void *data, u16 len)
4731 struct mgmt_addr_info *addr = data;
4734 bt_dev_dbg(hdev, "sock %p", sk);
4736 if (!bdaddr_type_is_valid(addr->type))
4737 return mgmt_cmd_complete(sk, hdev->id,
4738 MGMT_OP_ADD_REMOTE_OOB_DATA,
4739 MGMT_STATUS_INVALID_PARAMS,
4740 addr, sizeof(*addr));
4744 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4745 struct mgmt_cp_add_remote_oob_data *cp = data;
4748 if (cp->addr.type != BDADDR_BREDR) {
4749 err = mgmt_cmd_complete(sk, hdev->id,
4750 MGMT_OP_ADD_REMOTE_OOB_DATA,
4751 MGMT_STATUS_INVALID_PARAMS,
4752 &cp->addr, sizeof(cp->addr));
4756 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4757 cp->addr.type, cp->hash,
4758 cp->rand, NULL, NULL);
4760 status = MGMT_STATUS_FAILED;
4762 status = MGMT_STATUS_SUCCESS;
4764 err = mgmt_cmd_complete(sk, hdev->id,
4765 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4766 &cp->addr, sizeof(cp->addr));
4767 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4768 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4769 u8 *rand192, *hash192, *rand256, *hash256;
4772 if (bdaddr_type_is_le(cp->addr.type)) {
4773 /* Enforce zero-valued 192-bit parameters as
4774 * long as legacy SMP OOB isn't implemented.
4776 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4777 memcmp(cp->hash192, ZERO_KEY, 16)) {
4778 err = mgmt_cmd_complete(sk, hdev->id,
4779 MGMT_OP_ADD_REMOTE_OOB_DATA,
4780 MGMT_STATUS_INVALID_PARAMS,
4781 addr, sizeof(*addr));
4788 /* In case one of the P-192 values is set to zero,
4789 * then just disable OOB data for P-192.
4791 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4792 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4796 rand192 = cp->rand192;
4797 hash192 = cp->hash192;
4801 /* In case one of the P-256 values is set to zero, then just
4802 * disable OOB data for P-256.
4804 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4805 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4809 rand256 = cp->rand256;
4810 hash256 = cp->hash256;
4813 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4814 cp->addr.type, hash192, rand192,
4817 status = MGMT_STATUS_FAILED;
4819 status = MGMT_STATUS_SUCCESS;
4821 err = mgmt_cmd_complete(sk, hdev->id,
4822 MGMT_OP_ADD_REMOTE_OOB_DATA,
4823 status, &cp->addr, sizeof(cp->addr));
4825 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4827 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4828 MGMT_STATUS_INVALID_PARAMS);
4832 hci_dev_unlock(hdev);
4836 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4837 void *data, u16 len)
4839 struct mgmt_cp_remove_remote_oob_data *cp = data;
4843 bt_dev_dbg(hdev, "sock %p", sk);
4845 if (cp->addr.type != BDADDR_BREDR)
4846 return mgmt_cmd_complete(sk, hdev->id,
4847 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4848 MGMT_STATUS_INVALID_PARAMS,
4849 &cp->addr, sizeof(cp->addr));
4853 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4854 hci_remote_oob_data_clear(hdev);
4855 status = MGMT_STATUS_SUCCESS;
4859 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4861 status = MGMT_STATUS_INVALID_PARAMS;
4863 status = MGMT_STATUS_SUCCESS;
4866 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4867 status, &cp->addr, sizeof(cp->addr));
4869 hci_dev_unlock(hdev);
4873 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4875 struct mgmt_pending_cmd *cmd;
4877 bt_dev_dbg(hdev, "status %u", status);
4881 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4883 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4886 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4889 cmd->cmd_complete(cmd, mgmt_status(status));
4890 mgmt_pending_remove(cmd);
4893 hci_dev_unlock(hdev);
4895 /* Handle suspend notifier */
4896 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4897 hdev->suspend_tasks)) {
4898 bt_dev_dbg(hdev, "Unpaused discovery");
4899 wake_up(&hdev->suspend_wait_q);
4903 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4904 uint8_t *mgmt_status)
4907 case DISCOV_TYPE_LE:
4908 *mgmt_status = mgmt_le_support(hdev);
4912 case DISCOV_TYPE_INTERLEAVED:
4913 *mgmt_status = mgmt_le_support(hdev);
4917 case DISCOV_TYPE_BREDR:
4918 *mgmt_status = mgmt_bredr_support(hdev);
4923 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4930 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4931 u16 op, void *data, u16 len)
4933 struct mgmt_cp_start_discovery *cp = data;
4934 struct mgmt_pending_cmd *cmd;
4938 bt_dev_dbg(hdev, "sock %p", sk);
4942 if (!hdev_is_powered(hdev)) {
4943 err = mgmt_cmd_complete(sk, hdev->id, op,
4944 MGMT_STATUS_NOT_POWERED,
4945 &cp->type, sizeof(cp->type));
4949 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4950 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4951 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4952 &cp->type, sizeof(cp->type));
4956 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4957 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4958 &cp->type, sizeof(cp->type));
4962 /* Can't start discovery when it is paused */
4963 if (hdev->discovery_paused) {
4964 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4965 &cp->type, sizeof(cp->type));
4969 /* Clear the discovery filter first to free any previously
4970 * allocated memory for the UUID list.
4972 hci_discovery_filter_clear(hdev);
4974 hdev->discovery.type = cp->type;
4975 hdev->discovery.report_invalid_rssi = false;
4976 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4977 hdev->discovery.limited = true;
4979 hdev->discovery.limited = false;
4981 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4987 cmd->cmd_complete = generic_cmd_complete;
4989 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4990 queue_work(hdev->req_workqueue, &hdev->discov_update);
4994 hci_dev_unlock(hdev);
4998 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4999 void *data, u16 len)
5001 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5005 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5006 void *data, u16 len)
5008 return start_discovery_internal(sk, hdev,
5009 MGMT_OP_START_LIMITED_DISCOVERY,
5013 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
5016 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
5020 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5021 void *data, u16 len)
5023 struct mgmt_cp_start_service_discovery *cp = data;
5024 struct mgmt_pending_cmd *cmd;
5025 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5026 u16 uuid_count, expected_len;
5030 bt_dev_dbg(hdev, "sock %p", sk);
5034 if (!hdev_is_powered(hdev)) {
5035 err = mgmt_cmd_complete(sk, hdev->id,
5036 MGMT_OP_START_SERVICE_DISCOVERY,
5037 MGMT_STATUS_NOT_POWERED,
5038 &cp->type, sizeof(cp->type));
5042 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5043 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5044 err = mgmt_cmd_complete(sk, hdev->id,
5045 MGMT_OP_START_SERVICE_DISCOVERY,
5046 MGMT_STATUS_BUSY, &cp->type,
5051 if (hdev->discovery_paused) {
5052 err = mgmt_cmd_complete(sk, hdev->id,
5053 MGMT_OP_START_SERVICE_DISCOVERY,
5054 MGMT_STATUS_BUSY, &cp->type,
5059 uuid_count = __le16_to_cpu(cp->uuid_count);
5060 if (uuid_count > max_uuid_count) {
5061 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5063 err = mgmt_cmd_complete(sk, hdev->id,
5064 MGMT_OP_START_SERVICE_DISCOVERY,
5065 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5070 expected_len = sizeof(*cp) + uuid_count * 16;
5071 if (expected_len != len) {
5072 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5074 err = mgmt_cmd_complete(sk, hdev->id,
5075 MGMT_OP_START_SERVICE_DISCOVERY,
5076 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5081 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5082 err = mgmt_cmd_complete(sk, hdev->id,
5083 MGMT_OP_START_SERVICE_DISCOVERY,
5084 status, &cp->type, sizeof(cp->type));
5088 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5095 cmd->cmd_complete = service_discovery_cmd_complete;
5097 /* Clear the discovery filter first to free any previously
5098 * allocated memory for the UUID list.
5100 hci_discovery_filter_clear(hdev);
5102 hdev->discovery.result_filtering = true;
5103 hdev->discovery.type = cp->type;
5104 hdev->discovery.rssi = cp->rssi;
5105 hdev->discovery.uuid_count = uuid_count;
5107 if (uuid_count > 0) {
5108 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5110 if (!hdev->discovery.uuids) {
5111 err = mgmt_cmd_complete(sk, hdev->id,
5112 MGMT_OP_START_SERVICE_DISCOVERY,
5114 &cp->type, sizeof(cp->type));
5115 mgmt_pending_remove(cmd);
5120 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5121 queue_work(hdev->req_workqueue, &hdev->discov_update);
5125 hci_dev_unlock(hdev);
5129 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5131 struct mgmt_pending_cmd *cmd;
5133 bt_dev_dbg(hdev, "status %u", status);
5137 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5139 cmd->cmd_complete(cmd, mgmt_status(status));
5140 mgmt_pending_remove(cmd);
5143 hci_dev_unlock(hdev);
5145 /* Handle suspend notifier */
5146 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5147 bt_dev_dbg(hdev, "Paused discovery");
5148 wake_up(&hdev->suspend_wait_q);
5152 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5155 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5156 struct mgmt_pending_cmd *cmd;
5159 bt_dev_dbg(hdev, "sock %p", sk);
5163 if (!hci_discovery_active(hdev)) {
5164 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5165 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5166 sizeof(mgmt_cp->type));
5170 if (hdev->discovery.type != mgmt_cp->type) {
5171 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5172 MGMT_STATUS_INVALID_PARAMS,
5173 &mgmt_cp->type, sizeof(mgmt_cp->type));
5177 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5183 cmd->cmd_complete = generic_cmd_complete;
5185 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5186 queue_work(hdev->req_workqueue, &hdev->discov_update);
5190 hci_dev_unlock(hdev);
5194 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5197 struct mgmt_cp_confirm_name *cp = data;
5198 struct inquiry_entry *e;
5201 bt_dev_dbg(hdev, "sock %p", sk);
5205 if (!hci_discovery_active(hdev)) {
5206 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5207 MGMT_STATUS_FAILED, &cp->addr,
5212 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5214 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5215 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5220 if (cp->name_known) {
5221 e->name_state = NAME_KNOWN;
5224 e->name_state = NAME_NEEDED;
5225 hci_inquiry_cache_update_resolve(hdev, e);
5228 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5229 &cp->addr, sizeof(cp->addr));
5232 hci_dev_unlock(hdev);
5236 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5239 struct mgmt_cp_block_device *cp = data;
5243 bt_dev_dbg(hdev, "sock %p", sk);
5245 if (!bdaddr_type_is_valid(cp->addr.type))
5246 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5247 MGMT_STATUS_INVALID_PARAMS,
5248 &cp->addr, sizeof(cp->addr));
5252 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5255 status = MGMT_STATUS_FAILED;
5259 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5261 status = MGMT_STATUS_SUCCESS;
5264 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5265 &cp->addr, sizeof(cp->addr));
5267 hci_dev_unlock(hdev);
5272 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5275 struct mgmt_cp_unblock_device *cp = data;
5279 bt_dev_dbg(hdev, "sock %p", sk);
5281 if (!bdaddr_type_is_valid(cp->addr.type))
5282 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5283 MGMT_STATUS_INVALID_PARAMS,
5284 &cp->addr, sizeof(cp->addr));
5288 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5291 status = MGMT_STATUS_INVALID_PARAMS;
5295 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5297 status = MGMT_STATUS_SUCCESS;
5300 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5301 &cp->addr, sizeof(cp->addr));
5303 hci_dev_unlock(hdev);
5308 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5311 struct mgmt_cp_set_device_id *cp = data;
5312 struct hci_request req;
5316 bt_dev_dbg(hdev, "sock %p", sk);
5318 source = __le16_to_cpu(cp->source);
5320 if (source > 0x0002)
5321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5322 MGMT_STATUS_INVALID_PARAMS);
5326 hdev->devid_source = source;
5327 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5328 hdev->devid_product = __le16_to_cpu(cp->product);
5329 hdev->devid_version = __le16_to_cpu(cp->version);
5331 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5334 hci_req_init(&req, hdev);
5335 __hci_req_update_eir(&req);
5336 hci_req_run(&req, NULL);
5338 hci_dev_unlock(hdev);
5343 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5346 bt_dev_dbg(hdev, "status %u", status);
5349 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5352 struct cmd_lookup match = { NULL, hdev };
5353 struct hci_request req;
5355 struct adv_info *adv_instance;
5361 u8 mgmt_err = mgmt_status(status);
5363 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5364 cmd_status_rsp, &mgmt_err);
5368 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5369 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5371 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5373 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5376 new_settings(hdev, match.sk);
5381 /* Handle suspend notifier */
5382 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5383 hdev->suspend_tasks)) {
5384 bt_dev_dbg(hdev, "Paused advertising");
5385 wake_up(&hdev->suspend_wait_q);
5386 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5387 hdev->suspend_tasks)) {
5388 bt_dev_dbg(hdev, "Unpaused advertising");
5389 wake_up(&hdev->suspend_wait_q);
5392 /* If "Set Advertising" was just disabled and instance advertising was
5393 * set up earlier, then re-enable multi-instance advertising.
5395 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5396 list_empty(&hdev->adv_instances))
5399 instance = hdev->cur_adv_instance;
5401 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5402 struct adv_info, list);
5406 instance = adv_instance->instance;
5409 hci_req_init(&req, hdev);
5411 err = __hci_req_schedule_adv_instance(&req, instance, true);
5414 err = hci_req_run(&req, enable_advertising_instance);
5417 bt_dev_err(hdev, "failed to re-configure advertising");
5420 hci_dev_unlock(hdev);
5423 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5426 struct mgmt_mode *cp = data;
5427 struct mgmt_pending_cmd *cmd;
5428 struct hci_request req;
5432 bt_dev_dbg(hdev, "sock %p", sk);
5434 status = mgmt_le_support(hdev);
5436 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5439 /* Enabling the experimental LL Privay support disables support for
5442 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5444 MGMT_STATUS_NOT_SUPPORTED);
5446 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5448 MGMT_STATUS_INVALID_PARAMS);
5450 if (hdev->advertising_paused)
5451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5458 /* The following conditions are ones which mean that we should
5459 * not do any HCI communication but directly send a mgmt
5460 * response to user space (after toggling the flag if
5463 if (!hdev_is_powered(hdev) ||
5464 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5465 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5466 hci_conn_num(hdev, LE_LINK) > 0 ||
5467 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5468 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5472 hdev->cur_adv_instance = 0x00;
5473 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5474 if (cp->val == 0x02)
5475 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5477 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5479 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5480 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5483 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5488 err = new_settings(hdev, sk);
5493 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5494 pending_find(MGMT_OP_SET_LE, hdev)) {
5495 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5500 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5506 hci_req_init(&req, hdev);
5508 if (cp->val == 0x02)
5509 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5511 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5513 cancel_adv_timeout(hdev);
5516 /* Switch to instance "0" for the Set Advertising setting.
5517 * We cannot use update_[adv|scan_rsp]_data() here as the
5518 * HCI_ADVERTISING flag is not yet set.
5520 hdev->cur_adv_instance = 0x00;
5522 if (ext_adv_capable(hdev)) {
5523 __hci_req_start_ext_adv(&req, 0x00);
5525 __hci_req_update_adv_data(&req, 0x00);
5526 __hci_req_update_scan_rsp_data(&req, 0x00);
5527 __hci_req_enable_advertising(&req);
5530 __hci_req_disable_advertising(&req);
5533 err = hci_req_run(&req, set_advertising_complete);
5535 mgmt_pending_remove(cmd);
5538 hci_dev_unlock(hdev);
5542 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5543 void *data, u16 len)
5545 struct mgmt_cp_set_static_address *cp = data;
5548 bt_dev_dbg(hdev, "sock %p", sk);
5550 if (!lmp_le_capable(hdev))
5551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5552 MGMT_STATUS_NOT_SUPPORTED);
5554 if (hdev_is_powered(hdev))
5555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5556 MGMT_STATUS_REJECTED);
5558 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5559 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5560 return mgmt_cmd_status(sk, hdev->id,
5561 MGMT_OP_SET_STATIC_ADDRESS,
5562 MGMT_STATUS_INVALID_PARAMS);
5564 /* Two most significant bits shall be set */
5565 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5566 return mgmt_cmd_status(sk, hdev->id,
5567 MGMT_OP_SET_STATIC_ADDRESS,
5568 MGMT_STATUS_INVALID_PARAMS);
5573 bacpy(&hdev->static_addr, &cp->bdaddr);
5575 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5579 err = new_settings(hdev, sk);
5582 hci_dev_unlock(hdev);
5586 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5587 void *data, u16 len)
5589 struct mgmt_cp_set_scan_params *cp = data;
5590 __u16 interval, window;
5593 bt_dev_dbg(hdev, "sock %p", sk);
5595 if (!lmp_le_capable(hdev))
5596 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5597 MGMT_STATUS_NOT_SUPPORTED);
5599 interval = __le16_to_cpu(cp->interval);
5601 if (interval < 0x0004 || interval > 0x4000)
5602 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5603 MGMT_STATUS_INVALID_PARAMS);
5605 window = __le16_to_cpu(cp->window);
5607 if (window < 0x0004 || window > 0x4000)
5608 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5609 MGMT_STATUS_INVALID_PARAMS);
5611 if (window > interval)
5612 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5613 MGMT_STATUS_INVALID_PARAMS);
5617 hdev->le_scan_interval = interval;
5618 hdev->le_scan_window = window;
5620 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5623 /* If background scan is running, restart it so new parameters are
5626 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5627 hdev->discovery.state == DISCOVERY_STOPPED) {
5628 struct hci_request req;
5630 hci_req_init(&req, hdev);
5632 hci_req_add_le_scan_disable(&req, false);
5633 hci_req_add_le_passive_scan(&req);
5635 hci_req_run(&req, NULL);
5638 hci_dev_unlock(hdev);
5643 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5646 struct mgmt_pending_cmd *cmd;
5648 bt_dev_dbg(hdev, "status 0x%02x", status);
5652 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5657 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5658 mgmt_status(status));
5660 struct mgmt_mode *cp = cmd->param;
5663 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5665 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5667 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5668 new_settings(hdev, cmd->sk);
5671 mgmt_pending_remove(cmd);
5674 hci_dev_unlock(hdev);
5677 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5678 void *data, u16 len)
5680 struct mgmt_mode *cp = data;
5681 struct mgmt_pending_cmd *cmd;
5682 struct hci_request req;
5685 bt_dev_dbg(hdev, "sock %p", sk);
5687 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5688 hdev->hci_ver < BLUETOOTH_VER_1_2)
5689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5690 MGMT_STATUS_NOT_SUPPORTED);
5692 if (cp->val != 0x00 && cp->val != 0x01)
5693 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5694 MGMT_STATUS_INVALID_PARAMS);
5698 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5699 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5704 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5705 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5710 if (!hdev_is_powered(hdev)) {
5711 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5712 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5714 new_settings(hdev, sk);
5718 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5725 hci_req_init(&req, hdev);
5727 __hci_req_write_fast_connectable(&req, cp->val);
5729 err = hci_req_run(&req, fast_connectable_complete);
5731 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5732 MGMT_STATUS_FAILED);
5733 mgmt_pending_remove(cmd);
5737 hci_dev_unlock(hdev);
5742 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5744 struct mgmt_pending_cmd *cmd;
5746 bt_dev_dbg(hdev, "status 0x%02x", status);
5750 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5755 u8 mgmt_err = mgmt_status(status);
5757 /* We need to restore the flag if related HCI commands
5760 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5762 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5764 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5765 new_settings(hdev, cmd->sk);
5768 mgmt_pending_remove(cmd);
5771 hci_dev_unlock(hdev);
5774 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5776 struct mgmt_mode *cp = data;
5777 struct mgmt_pending_cmd *cmd;
5778 struct hci_request req;
5781 bt_dev_dbg(hdev, "sock %p", sk);
5783 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5785 MGMT_STATUS_NOT_SUPPORTED);
5787 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5789 MGMT_STATUS_REJECTED);
5791 if (cp->val != 0x00 && cp->val != 0x01)
5792 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5793 MGMT_STATUS_INVALID_PARAMS);
5797 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5798 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5802 if (!hdev_is_powered(hdev)) {
5804 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5805 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5806 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5807 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5808 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5811 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5813 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5817 err = new_settings(hdev, sk);
5821 /* Reject disabling when powered on */
5823 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5824 MGMT_STATUS_REJECTED);
5827 /* When configuring a dual-mode controller to operate
5828 * with LE only and using a static address, then switching
5829 * BR/EDR back on is not allowed.
5831 * Dual-mode controllers shall operate with the public
5832 * address as its identity address for BR/EDR and LE. So
5833 * reject the attempt to create an invalid configuration.
5835 * The same restrictions applies when secure connections
5836 * has been enabled. For BR/EDR this is a controller feature
5837 * while for LE it is a host stack feature. This means that
5838 * switching BR/EDR back on when secure connections has been
5839 * enabled is not a supported transaction.
5841 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5842 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5843 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5844 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5845 MGMT_STATUS_REJECTED);
5850 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5851 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5856 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5862 /* We need to flip the bit already here so that
5863 * hci_req_update_adv_data generates the correct flags.
5865 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5867 hci_req_init(&req, hdev);
5869 __hci_req_write_fast_connectable(&req, false);
5870 __hci_req_update_scan(&req);
5872 /* Since only the advertising data flags will change, there
5873 * is no need to update the scan response data.
5875 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5877 err = hci_req_run(&req, set_bredr_complete);
5879 mgmt_pending_remove(cmd);
5882 hci_dev_unlock(hdev);
5886 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5888 struct mgmt_pending_cmd *cmd;
5889 struct mgmt_mode *cp;
5891 bt_dev_dbg(hdev, "status %u", status);
5895 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5900 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5901 mgmt_status(status));
5909 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5910 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5913 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5914 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5917 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5918 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5922 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5923 new_settings(hdev, cmd->sk);
5926 mgmt_pending_remove(cmd);
5928 hci_dev_unlock(hdev);
5931 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5932 void *data, u16 len)
5934 struct mgmt_mode *cp = data;
5935 struct mgmt_pending_cmd *cmd;
5936 struct hci_request req;
5940 bt_dev_dbg(hdev, "sock %p", sk);
5942 if (!lmp_sc_capable(hdev) &&
5943 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5945 MGMT_STATUS_NOT_SUPPORTED);
5947 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5948 lmp_sc_capable(hdev) &&
5949 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5951 MGMT_STATUS_REJECTED);
5953 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5954 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5955 MGMT_STATUS_INVALID_PARAMS);
5959 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5960 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5964 changed = !hci_dev_test_and_set_flag(hdev,
5966 if (cp->val == 0x02)
5967 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5969 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5971 changed = hci_dev_test_and_clear_flag(hdev,
5973 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5976 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5981 err = new_settings(hdev, sk);
5986 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5987 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5994 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5995 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5996 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6000 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6006 hci_req_init(&req, hdev);
6007 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
6008 err = hci_req_run(&req, sc_enable_complete);
6010 mgmt_pending_remove(cmd);
6015 hci_dev_unlock(hdev);
6019 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6020 void *data, u16 len)
6022 struct mgmt_mode *cp = data;
6023 bool changed, use_changed;
6026 bt_dev_dbg(hdev, "sock %p", sk);
6028 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6030 MGMT_STATUS_INVALID_PARAMS);
6035 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6037 changed = hci_dev_test_and_clear_flag(hdev,
6038 HCI_KEEP_DEBUG_KEYS);
6040 if (cp->val == 0x02)
6041 use_changed = !hci_dev_test_and_set_flag(hdev,
6042 HCI_USE_DEBUG_KEYS);
6044 use_changed = hci_dev_test_and_clear_flag(hdev,
6045 HCI_USE_DEBUG_KEYS);
6047 if (hdev_is_powered(hdev) && use_changed &&
6048 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6049 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6050 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6051 sizeof(mode), &mode);
6054 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6059 err = new_settings(hdev, sk);
6062 hci_dev_unlock(hdev);
6066 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6069 struct mgmt_cp_set_privacy *cp = cp_data;
6073 bt_dev_dbg(hdev, "sock %p", sk);
6075 if (!lmp_le_capable(hdev))
6076 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6077 MGMT_STATUS_NOT_SUPPORTED);
6079 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6080 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6081 MGMT_STATUS_INVALID_PARAMS);
6084 /* commenting out since set privacy command is always rejected
6085 * if this condition is enabled.
6087 if (hdev_is_powered(hdev))
6088 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6089 MGMT_STATUS_REJECTED);
6094 /* If user space supports this command it is also expected to
6095 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6097 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6100 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6101 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6102 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6103 hci_adv_instances_set_rpa_expired(hdev, true);
6104 if (cp->privacy == 0x02)
6105 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6107 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6109 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6110 memset(hdev->irk, 0, sizeof(hdev->irk));
6111 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6112 hci_adv_instances_set_rpa_expired(hdev, false);
6113 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6116 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6121 err = new_settings(hdev, sk);
6124 hci_dev_unlock(hdev);
6128 static bool irk_is_valid(struct mgmt_irk_info *irk)
6130 switch (irk->addr.type) {
6131 case BDADDR_LE_PUBLIC:
6134 case BDADDR_LE_RANDOM:
6135 /* Two most significant bits shall be set */
6136 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6144 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6147 struct mgmt_cp_load_irks *cp = cp_data;
6148 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6149 sizeof(struct mgmt_irk_info));
6150 u16 irk_count, expected_len;
6153 bt_dev_dbg(hdev, "sock %p", sk);
6155 if (!lmp_le_capable(hdev))
6156 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6157 MGMT_STATUS_NOT_SUPPORTED);
6159 irk_count = __le16_to_cpu(cp->irk_count);
6160 if (irk_count > max_irk_count) {
6161 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6163 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6164 MGMT_STATUS_INVALID_PARAMS);
6167 expected_len = struct_size(cp, irks, irk_count);
6168 if (expected_len != len) {
6169 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6171 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6172 MGMT_STATUS_INVALID_PARAMS);
6175 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6177 for (i = 0; i < irk_count; i++) {
6178 struct mgmt_irk_info *key = &cp->irks[i];
6180 if (!irk_is_valid(key))
6181 return mgmt_cmd_status(sk, hdev->id,
6183 MGMT_STATUS_INVALID_PARAMS);
6188 hci_smp_irks_clear(hdev);
6190 for (i = 0; i < irk_count; i++) {
6191 struct mgmt_irk_info *irk = &cp->irks[i];
6193 if (hci_is_blocked_key(hdev,
6194 HCI_BLOCKED_KEY_TYPE_IRK,
6196 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6201 hci_add_irk(hdev, &irk->addr.bdaddr,
6202 le_addr_type(irk->addr.type), irk->val,
6206 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6208 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6210 hci_dev_unlock(hdev);
6216 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
6217 void *data, u16 len)
6219 struct mgmt_cp_set_advertising_params *cp = data;
6224 BT_DBG("%s", hdev->name);
6226 if (!lmp_le_capable(hdev))
6227 return mgmt_cmd_status(sk, hdev->id,
6228 MGMT_OP_SET_ADVERTISING_PARAMS,
6229 MGMT_STATUS_NOT_SUPPORTED);
6231 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6232 return mgmt_cmd_status(sk, hdev->id,
6233 MGMT_OP_SET_ADVERTISING_PARAMS,
6236 min_interval = __le16_to_cpu(cp->interval_min);
6237 max_interval = __le16_to_cpu(cp->interval_max);
6239 if (min_interval > max_interval ||
6240 min_interval < 0x0020 || max_interval > 0x4000)
6241 return mgmt_cmd_status(sk, hdev->id,
6242 MGMT_OP_SET_ADVERTISING_PARAMS,
6243 MGMT_STATUS_INVALID_PARAMS);
6247 hdev->le_adv_min_interval = min_interval;
6248 hdev->le_adv_max_interval = max_interval;
6249 hdev->adv_filter_policy = cp->filter_policy;
6250 hdev->adv_type = cp->type;
6252 err = mgmt_cmd_complete(sk, hdev->id,
6253 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
6255 hci_dev_unlock(hdev);
6260 static void set_advertising_data_complete(struct hci_dev *hdev,
6261 u8 status, u16 opcode)
6263 struct mgmt_cp_set_advertising_data *cp;
6264 struct mgmt_pending_cmd *cmd;
6266 BT_DBG("status 0x%02x", status);
6270 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
6277 mgmt_cmd_status(cmd->sk, hdev->id,
6278 MGMT_OP_SET_ADVERTISING_DATA,
6279 mgmt_status(status));
6281 mgmt_cmd_complete(cmd->sk, hdev->id,
6282 MGMT_OP_SET_ADVERTISING_DATA, 0,
6285 mgmt_pending_remove(cmd);
6288 hci_dev_unlock(hdev);
6291 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
6292 void *data, u16 len)
6294 struct mgmt_pending_cmd *cmd;
6295 struct hci_request req;
6296 struct mgmt_cp_set_advertising_data *cp = data;
6297 struct hci_cp_le_set_adv_data adv;
6300 BT_DBG("%s", hdev->name);
6302 if (!lmp_le_capable(hdev)) {
6303 return mgmt_cmd_status(sk, hdev->id,
6304 MGMT_OP_SET_ADVERTISING_DATA,
6305 MGMT_STATUS_NOT_SUPPORTED);
6310 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
6311 err = mgmt_cmd_status(sk, hdev->id,
6312 MGMT_OP_SET_ADVERTISING_DATA,
6317 if (len > HCI_MAX_AD_LENGTH) {
6318 err = mgmt_cmd_status(sk, hdev->id,
6319 MGMT_OP_SET_ADVERTISING_DATA,
6320 MGMT_STATUS_INVALID_PARAMS);
6324 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
6331 hci_req_init(&req, hdev);
6333 memset(&adv, 0, sizeof(adv));
6334 memcpy(adv.data, cp->data, len);
6337 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
6339 err = hci_req_run(&req, set_advertising_data_complete);
6341 mgmt_pending_remove(cmd);
6344 hci_dev_unlock(hdev);
6349 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
6352 struct mgmt_cp_set_scan_rsp_data *cp;
6353 struct mgmt_pending_cmd *cmd;
6355 BT_DBG("status 0x%02x", status);
6359 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
6366 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6367 mgmt_status(status));
6369 mgmt_cmd_complete(cmd->sk, hdev->id,
6370 MGMT_OP_SET_SCAN_RSP_DATA, 0,
6373 mgmt_pending_remove(cmd);
6376 hci_dev_unlock(hdev);
6379 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
6382 struct mgmt_pending_cmd *cmd;
6383 struct hci_request req;
6384 struct mgmt_cp_set_scan_rsp_data *cp = data;
6385 struct hci_cp_le_set_scan_rsp_data rsp;
6388 BT_DBG("%s", hdev->name);
6390 if (!lmp_le_capable(hdev))
6391 return mgmt_cmd_status(sk, hdev->id,
6392 MGMT_OP_SET_SCAN_RSP_DATA,
6393 MGMT_STATUS_NOT_SUPPORTED);
6397 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
6398 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6403 if (len > HCI_MAX_AD_LENGTH) {
6404 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6405 MGMT_STATUS_INVALID_PARAMS);
6409 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
6415 hci_req_init(&req, hdev);
6417 memset(&rsp, 0, sizeof(rsp));
6418 memcpy(rsp.data, cp->data, len);
6421 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
6423 err = hci_req_run(&req, set_scan_rsp_data_complete);
6425 mgmt_pending_remove(cmd);
6428 hci_dev_unlock(hdev);
6433 /* Adv White List feature */
6434 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6436 struct mgmt_cp_add_dev_white_list *cp;
6437 struct mgmt_pending_cmd *cmd;
6439 BT_DBG("status 0x%02x", status);
6443 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
6450 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6451 mgmt_status(status));
6453 mgmt_cmd_complete(cmd->sk, hdev->id,
6454 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
6456 mgmt_pending_remove(cmd);
6459 hci_dev_unlock(hdev);
6462 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
6463 void *data, u16 len)
6465 struct mgmt_pending_cmd *cmd;
6466 struct mgmt_cp_add_dev_white_list *cp = data;
6467 struct hci_request req;
6470 BT_DBG("%s", hdev->name);
6472 if (!lmp_le_capable(hdev))
6473 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6474 MGMT_STATUS_NOT_SUPPORTED);
6476 if (!hdev_is_powered(hdev))
6477 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6478 MGMT_STATUS_REJECTED);
6482 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
6483 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6488 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
6494 hci_req_init(&req, hdev);
6496 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
6498 err = hci_req_run(&req, add_white_list_complete);
6500 mgmt_pending_remove(cmd);
6505 hci_dev_unlock(hdev);
6510 static void remove_from_white_list_complete(struct hci_dev *hdev,
6511 u8 status, u16 opcode)
6513 struct mgmt_cp_remove_dev_from_white_list *cp;
6514 struct mgmt_pending_cmd *cmd;
6516 BT_DBG("status 0x%02x", status);
6520 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
6527 mgmt_cmd_status(cmd->sk, hdev->id,
6528 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6529 mgmt_status(status));
6531 mgmt_cmd_complete(cmd->sk, hdev->id,
6532 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
6535 mgmt_pending_remove(cmd);
6538 hci_dev_unlock(hdev);
6541 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
6542 void *data, u16 len)
6544 struct mgmt_pending_cmd *cmd;
6545 struct mgmt_cp_remove_dev_from_white_list *cp = data;
6546 struct hci_request req;
6549 BT_DBG("%s", hdev->name);
6551 if (!lmp_le_capable(hdev))
6552 return mgmt_cmd_status(sk, hdev->id,
6553 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6554 MGMT_STATUS_NOT_SUPPORTED);
6556 if (!hdev_is_powered(hdev))
6557 return mgmt_cmd_status(sk, hdev->id,
6558 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6559 MGMT_STATUS_REJECTED);
6563 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
6564 err = mgmt_cmd_status(sk, hdev->id,
6565 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6570 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6577 hci_req_init(&req, hdev);
6579 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
6581 err = hci_req_run(&req, remove_from_white_list_complete);
6583 mgmt_pending_remove(cmd);
6588 hci_dev_unlock(hdev);
6593 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
6596 struct mgmt_pending_cmd *cmd;
6598 BT_DBG("status 0x%02x", status);
6602 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
6607 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6608 mgmt_status(status));
6610 mgmt_cmd_complete(cmd->sk, hdev->id,
6611 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6614 mgmt_pending_remove(cmd);
6617 hci_dev_unlock(hdev);
6620 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
6621 void *data, u16 len)
6623 struct mgmt_pending_cmd *cmd;
6624 struct hci_request req;
6627 BT_DBG("%s", hdev->name);
6629 if (!lmp_le_capable(hdev))
6630 return mgmt_cmd_status(sk, hdev->id,
6631 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6632 MGMT_STATUS_NOT_SUPPORTED);
6634 if (!hdev_is_powered(hdev))
6635 return mgmt_cmd_status(sk, hdev->id,
6636 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6637 MGMT_STATUS_REJECTED);
6641 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
6642 err = mgmt_cmd_status(sk, hdev->id,
6643 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6648 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6655 hci_req_init(&req, hdev);
6657 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
6659 err = hci_req_run(&req, clear_white_list_complete);
6661 mgmt_pending_remove(cmd);
6666 hci_dev_unlock(hdev);
6671 static void set_rssi_threshold_complete(struct hci_dev *hdev,
6672 u8 status, u16 opcode)
6674 struct mgmt_pending_cmd *cmd;
6676 BT_DBG("status 0x%02x", status);
6680 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6685 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6686 mgmt_status(status));
6688 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6691 mgmt_pending_remove(cmd);
6694 hci_dev_unlock(hdev);
6697 static void set_rssi_disable_complete(struct hci_dev *hdev,
6698 u8 status, u16 opcode)
6700 struct mgmt_pending_cmd *cmd;
6702 BT_DBG("status 0x%02x", status);
6706 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6711 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6712 mgmt_status(status));
6714 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6717 mgmt_pending_remove(cmd);
6720 hci_dev_unlock(hdev);
6723 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
6724 void *data, u16 len)
6727 struct hci_cp_set_rssi_threshold th = { 0, };
6728 struct mgmt_cp_set_enable_rssi *cp = data;
6729 struct hci_conn *conn;
6730 struct mgmt_pending_cmd *cmd;
6731 struct hci_request req;
6736 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6738 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6739 MGMT_STATUS_FAILED);
6743 if (!lmp_le_capable(hdev)) {
6744 mgmt_pending_remove(cmd);
6745 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6746 MGMT_STATUS_NOT_SUPPORTED);
6750 if (!hdev_is_powered(hdev)) {
6751 BT_DBG("%s", hdev->name);
6752 mgmt_pending_remove(cmd);
6753 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6754 MGMT_STATUS_NOT_POWERED);
6758 if (cp->link_type == 0x01)
6759 dest_type = LE_LINK;
6761 dest_type = ACL_LINK;
6763 /* Get LE/ACL link handle info */
6764 conn = hci_conn_hash_lookup_ba(hdev,
6765 dest_type, &cp->bdaddr);
6768 err = mgmt_cmd_complete(sk, hdev->id,
6769 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
6770 mgmt_pending_remove(cmd);
6774 hci_req_init(&req, hdev);
6776 th.hci_le_ext_opcode = 0x0B;
6778 th.conn_handle = conn->handle;
6779 th.alert_mask = 0x07;
6780 th.low_th = cp->low_th;
6781 th.in_range_th = cp->in_range_th;
6782 th.high_th = cp->high_th;
6784 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6785 err = hci_req_run(&req, set_rssi_threshold_complete);
6788 mgmt_pending_remove(cmd);
6789 BT_ERR("Error in requesting hci_req_run");
6794 hci_dev_unlock(hdev);
6798 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
6799 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6801 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
6802 struct mgmt_cp_set_enable_rssi *cp = data;
6803 struct mgmt_pending_cmd *cmd;
6808 mgmt_rp.status = rp->status;
6809 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6810 mgmt_rp.bt_address = cp->bdaddr;
6811 mgmt_rp.link_type = cp->link_type;
6813 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6814 MGMT_STATUS_SUCCESS, &mgmt_rp,
6815 sizeof(struct mgmt_cc_rsp_enable_rssi));
6817 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
6818 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6820 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
6821 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6822 &mgmt_rp.bt_address, true);
6826 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6828 mgmt_pending_remove(cmd);
6830 hci_dev_unlock(hdev);
6833 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
6834 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6836 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
6837 struct mgmt_cp_disable_rssi *cp = data;
6838 struct mgmt_pending_cmd *cmd;
6843 mgmt_rp.status = rp->status;
6844 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6845 mgmt_rp.bt_address = cp->bdaddr;
6846 mgmt_rp.link_type = cp->link_type;
6848 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6849 MGMT_STATUS_SUCCESS, &mgmt_rp,
6850 sizeof(struct mgmt_cc_rsp_enable_rssi));
6852 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
6853 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6855 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6856 &mgmt_rp.bt_address, false);
6860 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6862 mgmt_pending_remove(cmd);
6864 hci_dev_unlock(hdev);
6867 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
6868 void *data, u16 len)
6870 struct mgmt_pending_cmd *cmd;
6871 struct hci_request req;
6872 struct hci_cp_set_enable_rssi cp_en = { 0, };
6875 BT_DBG("Set Disable RSSI.");
6877 cp_en.hci_le_ext_opcode = 0x01;
6878 cp_en.le_enable_cs_Features = 0x00;
6879 cp_en.data[0] = 0x00;
6880 cp_en.data[1] = 0x00;
6881 cp_en.data[2] = 0x00;
6885 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6887 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6888 MGMT_STATUS_FAILED);
6892 if (!lmp_le_capable(hdev)) {
6893 mgmt_pending_remove(cmd);
6894 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6895 MGMT_STATUS_NOT_SUPPORTED);
6899 if (!hdev_is_powered(hdev)) {
6900 BT_DBG("%s", hdev->name);
6901 mgmt_pending_remove(cmd);
6902 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6903 MGMT_STATUS_NOT_POWERED);
6907 hci_req_init(&req, hdev);
6909 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6910 sizeof(struct hci_cp_set_enable_rssi),
6911 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6912 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6914 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6915 err = hci_req_run(&req, set_rssi_disable_complete);
6918 mgmt_pending_remove(cmd);
6919 BT_ERR("Error in requesting hci_req_run");
6924 hci_dev_unlock(hdev);
6928 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
6930 struct hci_cc_rsp_enable_rssi *rp = response;
6931 struct mgmt_pending_cmd *cmd_enable = NULL;
6932 struct mgmt_pending_cmd *cmd_disable = NULL;
6933 struct mgmt_cp_set_enable_rssi *cp_en;
6934 struct mgmt_cp_disable_rssi *cp_dis;
6937 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6938 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6939 hci_dev_unlock(hdev);
6942 BT_DBG("Enable Request");
6945 BT_DBG("Disable Request");
6948 cp_en = cmd_enable->param;
6953 switch (rp->le_ext_opcode) {
6955 BT_DBG("RSSI enabled.. Setting Threshold...");
6956 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
6957 cp_en, sizeof(*cp_en));
6961 BT_DBG("Sending RSSI enable success");
6962 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
6963 cp_en, rp, rp->status);
6967 } else if (cmd_disable) {
6968 cp_dis = cmd_disable->param;
6973 switch (rp->le_ext_opcode) {
6975 BT_DBG("Sending RSSI disable success");
6976 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6977 cp_dis, rp, rp->status);
6982 * Only unset RSSI Threshold values for the Link if
6983 * RSSI is monitored for other BREDR or LE Links
6985 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
6986 BT_DBG("Unset Threshold. Other links being monitored");
6987 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6988 cp_dis, rp, rp->status);
6990 BT_DBG("Unset Threshold. Disabling...");
6991 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
6992 cp_dis, sizeof(*cp_dis));
6999 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
7002 struct mgmt_pending_cmd *cmd;
7004 BT_DBG("status 0x%02x", status);
7008 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7013 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7014 mgmt_status(status));
7016 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7019 mgmt_pending_remove(cmd);
7022 hci_dev_unlock(hdev);
7025 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7026 void *data, u16 len)
7028 struct mgmt_pending_cmd *cmd;
7029 struct hci_request req;
7030 struct mgmt_cp_set_enable_rssi *cp = data;
7031 struct hci_cp_set_enable_rssi cp_en = { 0, };
7034 BT_DBG("Set Enable RSSI.");
7036 cp_en.hci_le_ext_opcode = 0x01;
7037 cp_en.le_enable_cs_Features = 0x04;
7038 cp_en.data[0] = 0x00;
7039 cp_en.data[1] = 0x00;
7040 cp_en.data[2] = 0x00;
7044 if (!lmp_le_capable(hdev)) {
7045 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7046 MGMT_STATUS_NOT_SUPPORTED);
7050 if (!hdev_is_powered(hdev)) {
7051 BT_DBG("%s", hdev->name);
7052 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7053 MGMT_STATUS_NOT_POWERED);
7057 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
7058 BT_DBG("%s", hdev->name);
7059 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7064 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
7067 BT_DBG("%s", hdev->name);
7072 /* If RSSI is already enabled directly set Threshold values */
7073 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
7074 hci_dev_unlock(hdev);
7075 BT_DBG("RSSI Enabled. Directly set Threshold");
7076 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
7080 hci_req_init(&req, hdev);
7082 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7083 sizeof(struct hci_cp_set_enable_rssi),
7084 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7085 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7087 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7088 err = hci_req_run(&req, set_rssi_enable_complete);
7091 mgmt_pending_remove(cmd);
7092 BT_ERR("Error in requesting hci_req_run");
7097 hci_dev_unlock(hdev);
7102 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7104 struct mgmt_pending_cmd *cmd;
7106 BT_DBG("status 0x%02x", status);
7110 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
7114 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7115 MGMT_STATUS_SUCCESS, &status, 1);
7117 mgmt_pending_remove(cmd);
7120 hci_dev_unlock(hdev);
7123 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
7126 struct mgmt_pending_cmd *cmd;
7127 struct hci_request req;
7128 struct mgmt_cp_get_raw_rssi *cp = data;
7129 struct hci_cp_get_raw_rssi hci_cp;
7131 struct hci_conn *conn;
7135 BT_DBG("Get Raw RSSI.");
7139 if (!lmp_le_capable(hdev)) {
7140 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7141 MGMT_STATUS_NOT_SUPPORTED);
7145 if (cp->link_type == 0x01)
7146 dest_type = LE_LINK;
7148 dest_type = ACL_LINK;
7150 /* Get LE/BREDR link handle info */
7151 conn = hci_conn_hash_lookup_ba(hdev,
7152 dest_type, &cp->bt_address);
7154 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7155 MGMT_STATUS_NOT_CONNECTED);
7158 hci_cp.conn_handle = conn->handle;
7160 if (!hdev_is_powered(hdev)) {
7161 BT_DBG("%s", hdev->name);
7162 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7163 MGMT_STATUS_NOT_POWERED);
7167 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
7168 BT_DBG("%s", hdev->name);
7169 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7174 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
7176 BT_DBG("%s", hdev->name);
7181 hci_req_init(&req, hdev);
7183 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
7184 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
7185 err = hci_req_run(&req, get_raw_rssi_complete);
7188 mgmt_pending_remove(cmd);
7189 BT_ERR("Error in requesting hci_req_run");
7193 hci_dev_unlock(hdev);
7198 void mgmt_raw_rssi_response(struct hci_dev *hdev,
7199 struct hci_cc_rp_get_raw_rssi *rp, int success)
7201 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
7202 struct hci_conn *conn;
7204 mgmt_rp.status = rp->status;
7205 mgmt_rp.rssi_dbm = rp->rssi_dbm;
7207 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
7211 bacpy(&mgmt_rp.bt_address, &conn->dst);
7212 if (conn->type == LE_LINK)
7213 mgmt_rp.link_type = 0x01;
7215 mgmt_rp.link_type = 0x00;
7217 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
7218 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
7221 static void set_disable_threshold_complete(struct hci_dev *hdev,
7222 u8 status, u16 opcode)
7224 struct mgmt_pending_cmd *cmd;
7226 BT_DBG("status 0x%02x", status);
7230 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7234 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7235 MGMT_STATUS_SUCCESS, &status, 1);
7237 mgmt_pending_remove(cmd);
7240 hci_dev_unlock(hdev);
7243 /** Removes monitoring for a link*/
7244 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
7245 void *data, u16 len)
7248 struct hci_cp_set_rssi_threshold th = { 0, };
7249 struct mgmt_cp_disable_rssi *cp = data;
7250 struct hci_conn *conn;
7251 struct mgmt_pending_cmd *cmd;
7252 struct hci_request req;
7255 BT_DBG("Set Disable RSSI.");
7259 if (!lmp_le_capable(hdev)) {
7260 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7261 MGMT_STATUS_NOT_SUPPORTED);
7265 /* Get LE/ACL link handle info*/
7266 if (cp->link_type == 0x01)
7267 dest_type = LE_LINK;
7269 dest_type = ACL_LINK;
7271 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
7273 err = mgmt_cmd_complete(sk, hdev->id,
7274 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
7278 th.hci_le_ext_opcode = 0x0B;
7280 th.conn_handle = conn->handle;
7281 th.alert_mask = 0x00;
7283 th.in_range_th = 0x00;
7286 if (!hdev_is_powered(hdev)) {
7287 BT_DBG("%s", hdev->name);
7288 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7293 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
7294 BT_DBG("%s", hdev->name);
7295 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7300 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
7303 BT_DBG("%s", hdev->name);
7308 hci_req_init(&req, hdev);
7310 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7311 err = hci_req_run(&req, set_disable_threshold_complete);
7313 mgmt_pending_remove(cmd);
7314 BT_ERR("Error in requesting hci_req_run");
7319 hci_dev_unlock(hdev);
7324 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
7325 s8 alert_type, s8 rssi_dbm)
7327 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
7328 struct hci_conn *conn;
7330 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
7331 conn_handle, alert_type, rssi_dbm);
7333 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
7336 BT_ERR("RSSI alert Error: Device not found for handle");
7339 bacpy(&mgmt_ev.bdaddr, &conn->dst);
7341 if (conn->type == LE_LINK)
7342 mgmt_ev.link_type = 0x01;
7344 mgmt_ev.link_type = 0x00;
7346 mgmt_ev.alert_type = alert_type;
7347 mgmt_ev.rssi_dbm = rssi_dbm;
7349 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
7350 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
7354 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
7356 struct mgmt_pending_cmd *cmd;
7360 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7362 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7366 type = hdev->le_discovery.type;
7368 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7369 mgmt_status(status), &type, sizeof(type));
7370 mgmt_pending_remove(cmd);
7375 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
7378 unsigned long timeout = 0;
7380 BT_DBG("status %d", status);
7384 mgmt_start_le_discovery_failed(hdev, status);
7385 hci_dev_unlock(hdev);
7390 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
7391 hci_dev_unlock(hdev);
7393 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
7394 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
7399 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
7402 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
7403 void *data, u16 len)
7405 struct mgmt_cp_start_le_discovery *cp = data;
7406 struct mgmt_pending_cmd *cmd;
7407 struct hci_cp_le_set_scan_param param_cp;
7408 struct hci_cp_le_set_scan_enable enable_cp;
7409 struct hci_request req;
7410 u8 status, own_addr_type;
7413 BT_DBG("%s", hdev->name);
7417 if (!hdev_is_powered(hdev)) {
7418 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7419 MGMT_STATUS_NOT_POWERED);
7423 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
7424 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7429 if (cp->type != DISCOV_TYPE_LE) {
7430 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7431 MGMT_STATUS_INVALID_PARAMS);
7435 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
7441 hdev->le_discovery.type = cp->type;
7443 hci_req_init(&req, hdev);
7445 status = mgmt_le_support(hdev);
7447 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7449 mgmt_pending_remove(cmd);
7453 /* If controller is scanning, it means the background scanning
7454 * is running. Thus, we should temporarily stop it in order to
7455 * set the discovery scanning parameters.
7457 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
7458 hci_req_add_le_scan_disable(&req, false);
7460 memset(¶m_cp, 0, sizeof(param_cp));
7462 /* All active scans will be done with either a resolvable
7463 * private address (when privacy feature has been enabled)
7464 * or unresolvable private address.
7466 err = hci_update_random_address(&req, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
7468 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7469 MGMT_STATUS_FAILED);
7470 mgmt_pending_remove(cmd);
7474 param_cp.type = hdev->le_scan_type;
7475 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
7476 param_cp.window = cpu_to_le16(hdev->le_scan_window);
7477 param_cp.own_address_type = own_addr_type;
7478 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
7481 memset(&enable_cp, 0, sizeof(enable_cp));
7482 enable_cp.enable = LE_SCAN_ENABLE;
7483 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
7485 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
7488 err = hci_req_run(&req, start_le_discovery_complete);
7490 mgmt_pending_remove(cmd);
7492 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
7495 hci_dev_unlock(hdev);
7499 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
7501 struct mgmt_pending_cmd *cmd;
7504 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7508 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7509 mgmt_status(status), &hdev->le_discovery.type,
7510 sizeof(hdev->le_discovery.type));
7511 mgmt_pending_remove(cmd);
7516 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
7519 BT_DBG("status %d", status);
7524 mgmt_stop_le_discovery_failed(hdev, status);
7528 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7531 hci_dev_unlock(hdev);
7534 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
7535 void *data, u16 len)
7537 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
7538 struct mgmt_pending_cmd *cmd;
7539 struct hci_request req;
7542 BT_DBG("%s", hdev->name);
7546 if (!hci_le_discovery_active(hdev)) {
7547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7548 MGMT_STATUS_REJECTED, &mgmt_cp->type,
7549 sizeof(mgmt_cp->type));
7553 if (hdev->le_discovery.type != mgmt_cp->type) {
7554 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7555 MGMT_STATUS_INVALID_PARAMS,
7556 &mgmt_cp->type, sizeof(mgmt_cp->type));
7560 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
7566 hci_req_init(&req, hdev);
7568 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
7569 BT_DBG("unknown le discovery state %u",
7570 hdev->le_discovery.state);
7572 mgmt_pending_remove(cmd);
7573 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7574 MGMT_STATUS_FAILED, &mgmt_cp->type,
7575 sizeof(mgmt_cp->type));
7579 cancel_delayed_work(&hdev->le_scan_disable);
7580 hci_req_add_le_scan_disable(&req, false);
7582 err = hci_req_run(&req, stop_le_discovery_complete);
7584 mgmt_pending_remove(cmd);
7586 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
7589 hci_dev_unlock(hdev);
7593 /* Separate LE discovery */
7594 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
7596 struct mgmt_ev_discovering ev;
7597 struct mgmt_pending_cmd *cmd;
7599 BT_DBG("%s le discovering %u", hdev->name, discovering);
7602 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7604 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7607 u8 type = hdev->le_discovery.type;
7609 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
7611 mgmt_pending_remove(cmd);
7614 memset(&ev, 0, sizeof(ev));
7615 ev.type = hdev->le_discovery.type;
7616 ev.discovering = discovering;
7618 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7621 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
7622 void *data, u16 len)
7626 BT_DBG("%s", hdev->name);
7630 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
7632 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
7634 hci_dev_unlock(hdev);
7639 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
7644 if (min > max || min < 6 || max > 3200)
7647 if (to_multiplier < 10 || to_multiplier > 3200)
7650 if (max >= to_multiplier * 8)
7653 max_latency = (to_multiplier * 8 / max) - 1;
7655 if (latency > 499 || latency > max_latency)
7661 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
7664 struct mgmt_cp_le_conn_update *cp = data;
7666 struct hci_conn *conn;
7667 u16 min, max, latency, supervision_timeout;
7670 if (!hdev_is_powered(hdev))
7671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7672 MGMT_STATUS_NOT_POWERED);
7674 min = __le16_to_cpu(cp->conn_interval_min);
7675 max = __le16_to_cpu(cp->conn_interval_max);
7676 latency = __le16_to_cpu(cp->conn_latency);
7677 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
7679 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
7680 min, max, latency, supervision_timeout);
7682 err = check_le_conn_update_param(min, max, latency,
7683 supervision_timeout);
7686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7687 MGMT_STATUS_INVALID_PARAMS);
7691 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
7693 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7694 MGMT_STATUS_NOT_CONNECTED);
7695 hci_dev_unlock(hdev);
7699 hci_dev_unlock(hdev);
7701 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
7703 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
7707 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
7710 struct mgmt_cp_set_manufacturer_data *cp;
7711 struct mgmt_pending_cmd *cmd;
7713 BT_DBG("status 0x%02x", status);
7717 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
7724 mgmt_cmd_status(cmd->sk, hdev->id,
7725 MGMT_OP_SET_MANUFACTURER_DATA,
7726 mgmt_status(status));
7728 mgmt_cmd_complete(cmd->sk, hdev->id,
7729 MGMT_OP_SET_MANUFACTURER_DATA, 0,
7732 mgmt_pending_remove(cmd);
7735 hci_dev_unlock(hdev);
7738 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
7739 void *data, u16 len)
7741 struct mgmt_pending_cmd *cmd;
7742 struct hci_request req;
7743 struct mgmt_cp_set_manufacturer_data *cp = data;
7744 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
7748 BT_DBG("%s", hdev->name);
7750 if (!lmp_bredr_capable(hdev))
7751 return mgmt_cmd_status(sk, hdev->id,
7752 MGMT_OP_SET_MANUFACTURER_DATA,
7753 MGMT_STATUS_NOT_SUPPORTED);
7755 if (cp->data[0] == 0 ||
7756 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
7757 return mgmt_cmd_status(sk, hdev->id,
7758 MGMT_OP_SET_MANUFACTURER_DATA,
7759 MGMT_STATUS_INVALID_PARAMS);
7761 if (cp->data[1] != 0xFF)
7762 return mgmt_cmd_status(sk, hdev->id,
7763 MGMT_OP_SET_MANUFACTURER_DATA,
7764 MGMT_STATUS_NOT_SUPPORTED);
7768 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
7769 err = mgmt_cmd_status(sk, hdev->id,
7770 MGMT_OP_SET_MANUFACTURER_DATA,
7775 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
7782 hci_req_init(&req, hdev);
7784 /* if new data is same as previous data then return command
7787 if (hdev->manufacturer_len == cp->data[0] - 1 &&
7788 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
7789 mgmt_pending_remove(cmd);
7790 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
7791 0, cp, sizeof(*cp));
7796 old_len = hdev->manufacturer_len;
7798 memcpy(old_data, hdev->manufacturer_data, old_len);
7800 hdev->manufacturer_len = cp->data[0] - 1;
7801 if (hdev->manufacturer_len > 0)
7802 memcpy(hdev->manufacturer_data, cp->data + 2,
7803 hdev->manufacturer_len);
7805 __hci_req_update_eir(&req);
7807 err = hci_req_run(&req, set_manufacturer_data_complete);
7809 mgmt_pending_remove(cmd);
7814 hci_dev_unlock(hdev);
7819 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
7820 hdev->manufacturer_len = old_len;
7821 if (hdev->manufacturer_len > 0)
7822 memcpy(hdev->manufacturer_data, old_data,
7823 hdev->manufacturer_len);
7824 hci_dev_unlock(hdev);
7828 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
7829 void *data, u16 len)
7831 struct mgmt_cp_le_set_scan_params *cp = data;
7832 __u16 interval, window;
7835 BT_DBG("%s", hdev->name);
7837 if (!lmp_le_capable(hdev))
7838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7839 MGMT_STATUS_NOT_SUPPORTED);
7841 interval = __le16_to_cpu(cp->interval);
7843 if (interval < 0x0004 || interval > 0x4000)
7844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7845 MGMT_STATUS_INVALID_PARAMS);
7847 window = __le16_to_cpu(cp->window);
7849 if (window < 0x0004 || window > 0x4000)
7850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7851 MGMT_STATUS_INVALID_PARAMS);
7853 if (window > interval)
7854 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7855 MGMT_STATUS_INVALID_PARAMS);
7859 hdev->le_scan_type = cp->type;
7860 hdev->le_scan_interval = interval;
7861 hdev->le_scan_window = window;
7863 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
7866 /* If background scan is running, restart it so new parameters are
7869 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
7870 hdev->discovery.state == DISCOVERY_STOPPED) {
7871 struct hci_request req;
7873 hci_req_init(&req, hdev);
7875 hci_req_add_le_scan_disable(&req, false);
7876 hci_req_add_le_passive_scan(&req);
7878 hci_req_run(&req, NULL);
7881 hci_dev_unlock(hdev);
7886 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
7887 void *data, u16 len)
7889 struct mgmt_cp_set_voice_setting *cp = data;
7890 struct hci_conn *conn;
7891 struct hci_conn *sco_conn;
7895 BT_DBG("%s", hdev->name);
7897 if (!lmp_bredr_capable(hdev)) {
7898 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
7899 MGMT_STATUS_NOT_SUPPORTED);
7904 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
7906 err = mgmt_cmd_complete(sk, hdev->id,
7907 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
7911 conn->voice_setting = cp->voice_setting;
7912 conn->sco_role = cp->sco_role;
7914 sco_conn = hci_conn_hash_lookup_sco(hdev);
7915 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
7916 BT_ERR("There is other SCO connection.");
7920 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
7921 if (conn->voice_setting == 0x0063)
7922 sco_connect_set_wbc(hdev);
7924 sco_connect_set_nbc(hdev);
7926 if (conn->voice_setting == 0x0063)
7927 sco_connect_set_gw_wbc(hdev);
7929 sco_connect_set_gw_nbc(hdev);
7933 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
7937 hci_dev_unlock(hdev);
7941 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
7942 void *data, u16 len)
7944 struct mgmt_rp_get_adv_tx_power *rp;
7948 BT_DBG("%s", hdev->name);
7952 rp_len = sizeof(*rp);
7953 rp = kmalloc(rp_len, GFP_KERNEL);
7959 rp->adv_tx_power = hdev->adv_tx_power;
7961 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
7967 hci_dev_unlock(hdev);
7972 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
7974 struct mgmt_ev_hardware_error ev;
7976 ev.error_code = err_code;
7977 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
7980 void mgmt_tx_timeout_error(struct hci_dev *hdev)
7982 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
7985 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
7986 u8 state_change_reason, u16 connection_handle)
7988 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
7990 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
7991 adv_instance, state_change_reason, connection_handle);
7993 mgmt_ev.adv_instance = adv_instance;
7994 mgmt_ev.state_change_reason = state_change_reason;
7995 mgmt_ev.connection_handle = connection_handle;
7997 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
7998 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
8002 static int enable_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8003 void *data, u16 len)
8006 struct mgmt_cp_enable_6lowpan *cp = data;
8008 BT_DBG("%s", hdev->name);
8012 if (!hdev_is_powered(hdev)) {
8013 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8014 MGMT_STATUS_NOT_POWERED);
8018 if (!lmp_le_capable(hdev)) {
8019 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8020 MGMT_STATUS_NOT_SUPPORTED);
8024 if (cp->enable_6lowpan)
8025 bt_6lowpan_enable();
8027 bt_6lowpan_disable();
8029 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8030 MGMT_STATUS_SUCCESS, NULL, 0);
8032 hci_dev_unlock(hdev);
8036 static int connect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8037 void *data, u16 len)
8039 struct mgmt_cp_connect_6lowpan *cp = data;
8040 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
8043 BT_DBG("%s", hdev->name);
8047 if (!lmp_le_capable(hdev)) {
8048 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8049 MGMT_STATUS_NOT_SUPPORTED);
8053 if (!hdev_is_powered(hdev)) {
8054 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8055 MGMT_STATUS_REJECTED);
8059 if (bdaddr_type_is_le(cp->addr.type)) {
8060 if (cp->addr.type == BDADDR_LE_PUBLIC)
8061 addr_type = ADDR_LE_DEV_PUBLIC;
8063 addr_type = ADDR_LE_DEV_RANDOM;
8065 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8066 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
8070 hci_dev_unlock(hdev);
8072 /* 6lowpan Connect */
8073 err = _bt_6lowpan_connect(&cp->addr.bdaddr, cp->addr.type);
8078 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8079 MGMT_STATUS_REJECTED, NULL, 0);
8084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
8087 hci_dev_unlock(hdev);
8091 static int disconnect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8092 void *data, u16 len)
8094 struct mgmt_cp_disconnect_6lowpan *cp = data;
8095 struct hci_conn *conn = NULL;
8096 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
8099 BT_DBG("%s", hdev->name);
8103 if (!lmp_le_capable(hdev)) {
8104 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
8105 MGMT_STATUS_NOT_SUPPORTED);
8109 if (!hdev_is_powered(hdev)) {
8110 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
8111 MGMT_STATUS_REJECTED);
8115 if (bdaddr_type_is_le(cp->addr.type)) {
8116 if (cp->addr.type == BDADDR_LE_PUBLIC)
8117 addr_type = ADDR_LE_DEV_PUBLIC;
8119 addr_type = ADDR_LE_DEV_RANDOM;
8121 err = mgmt_cmd_complete(sk, hdev->id,
8122 MGMT_OP_DISCONNECT_6LOWPAN,
8123 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
8127 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8129 err = mgmt_cmd_complete(sk, hdev->id,
8130 MGMT_OP_DISCONNECT_6LOWPAN,
8131 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
8135 if (conn->dst_type != addr_type) {
8136 err = mgmt_cmd_complete(sk, hdev->id,
8137 MGMT_OP_DISCONNECT_6LOWPAN,
8138 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
8142 if (conn->state != BT_CONNECTED) {
8143 err = mgmt_cmd_complete(sk, hdev->id,
8144 MGMT_OP_DISCONNECT_6LOWPAN,
8145 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
8149 /* 6lowpan Disconnect */
8150 err = _bt_6lowpan_disconnect(conn->l2cap_data, cp->addr.type);
8152 err = mgmt_cmd_complete(sk, hdev->id,
8153 MGMT_OP_DISCONNECT_6LOWPAN,
8154 MGMT_STATUS_REJECTED, NULL, 0);
8158 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
8162 hci_dev_unlock(hdev);
8166 void mgmt_6lowpan_conn_changed(struct hci_dev *hdev, char if_name[16],
8167 bdaddr_t *bdaddr, u8 addr_type, bool connected)
8170 struct mgmt_ev_6lowpan_conn_state_changed *ev = (void *)buf;
8173 memset(buf, 0, sizeof(buf));
8174 bacpy(&ev->addr.bdaddr, bdaddr);
8175 ev->addr.type = addr_type;
8176 ev->connected = connected;
8177 memcpy(ev->ifname, (__u8 *)if_name, 16);
8179 ev_size = sizeof(*ev);
8181 mgmt_event(MGMT_EV_6LOWPAN_CONN_STATE_CHANGED, hdev, ev, ev_size, NULL);
8184 void mgmt_le_read_maximum_data_length_complete(struct hci_dev *hdev, u8 status)
8186 struct mgmt_pending_cmd *cmd;
8187 struct mgmt_rp_le_read_maximum_data_length rp;
8189 BT_DBG("%s status %u", hdev->name, status);
8191 cmd = pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev);
8196 mgmt_cmd_status(cmd->sk, hdev->id,
8197 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8198 mgmt_status(status));
8200 memset(&rp, 0, sizeof(rp));
8202 rp.max_tx_octets = cpu_to_le16(hdev->le_max_tx_len);
8203 rp.max_tx_time = cpu_to_le16(hdev->le_max_tx_time);
8204 rp.max_rx_octets = cpu_to_le16(hdev->le_max_rx_len);
8205 rp.max_rx_time = cpu_to_le16(hdev->le_max_rx_time);
8207 mgmt_cmd_complete(cmd->sk, hdev->id,
8208 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, 0,
8211 mgmt_pending_remove(cmd);
8214 static int read_maximum_le_data_length(struct sock *sk,
8215 struct hci_dev *hdev, void *data, u16 len)
8217 struct mgmt_pending_cmd *cmd;
8220 BT_DBG("read_maximum_le_data_length %s", hdev->name);
8224 if (!hdev_is_powered(hdev)) {
8225 err = mgmt_cmd_status(sk, hdev->id,
8226 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8227 MGMT_STATUS_NOT_POWERED);
8231 if (!lmp_le_capable(hdev)) {
8232 err = mgmt_cmd_status(sk, hdev->id,
8233 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8234 MGMT_STATUS_NOT_SUPPORTED);
8238 if (pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev)) {
8239 err = mgmt_cmd_status(sk, hdev->id,
8240 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8245 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
8252 err = hci_send_cmd(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
8254 mgmt_pending_remove(cmd);
8257 hci_dev_unlock(hdev);
8261 void mgmt_le_write_host_suggested_data_length_complete(struct hci_dev *hdev,
8264 struct mgmt_pending_cmd *cmd;
8266 BT_DBG("status 0x%02x", status);
8270 cmd = pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev);
8272 BT_ERR("cmd not found in the pending list");
8277 mgmt_cmd_status(cmd->sk, hdev->id,
8278 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8279 mgmt_status(status));
8281 mgmt_cmd_complete(cmd->sk, hdev->id,
8282 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8285 mgmt_pending_remove(cmd);
8288 hci_dev_unlock(hdev);
8291 static int write_host_suggested_le_data_length(struct sock *sk,
8292 struct hci_dev *hdev, void *data, u16 len)
8294 struct mgmt_pending_cmd *cmd;
8295 struct mgmt_cp_le_write_host_suggested_data_length *cp = data;
8296 struct hci_cp_le_write_def_data_len hci_data;
8299 BT_DBG("Write host suggested data length request for %s", hdev->name);
8303 if (!hdev_is_powered(hdev)) {
8304 err = mgmt_cmd_status(sk, hdev->id,
8305 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8306 MGMT_STATUS_NOT_POWERED);
8310 if (!lmp_le_capable(hdev)) {
8311 err = mgmt_cmd_status(sk, hdev->id,
8312 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8313 MGMT_STATUS_NOT_SUPPORTED);
8317 if (pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev)) {
8318 err = mgmt_cmd_status(sk, hdev->id,
8319 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8324 cmd = mgmt_pending_add(sk, MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
8331 hci_data.tx_len = cp->def_tx_octets;
8332 hci_data.tx_time = cp->def_tx_time;
8334 err = hci_send_cmd(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
8335 sizeof(hci_data), &hci_data);
8337 mgmt_pending_remove(cmd);
8340 hci_dev_unlock(hdev);
8345 void mgmt_le_read_host_suggested_data_length_complete(struct hci_dev *hdev,
8348 struct mgmt_pending_cmd *cmd;
8349 struct mgmt_rp_le_read_host_suggested_data_length rp;
8351 BT_DBG("%s status %u", hdev->name, status);
8353 cmd = pending_find(MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, hdev);
8355 BT_ERR("cmd not found in the pending list");
8360 mgmt_cmd_status(cmd->sk, hdev->id,
8361 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
8362 mgmt_status(status));
8364 memset(&rp, 0, sizeof(rp));
8366 rp.def_tx_octets = cpu_to_le16(hdev->le_def_tx_len);
8367 rp.def_tx_time = cpu_to_le16(hdev->le_def_tx_time);
8369 mgmt_cmd_complete(cmd->sk, hdev->id,
8370 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, 0,
8373 mgmt_pending_remove(cmd);
8376 static int read_host_suggested_data_length(struct sock *sk,
8377 struct hci_dev *hdev, void *data, u16 len)
8379 struct mgmt_pending_cmd *cmd;
8382 BT_DBG("read_host_suggested_data_length %s", hdev->name);
8386 if (!hdev_is_powered(hdev)) {
8387 err = mgmt_cmd_status(sk, hdev->id,
8388 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
8389 MGMT_STATUS_NOT_POWERED);
8393 if (!lmp_le_capable(hdev)) {
8394 err = mgmt_cmd_status(sk, hdev->id,
8395 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
8396 MGMT_STATUS_NOT_SUPPORTED);
8400 if (pending_find(MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, hdev)) {
8401 err = mgmt_cmd_status(sk, hdev->id,
8402 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
8407 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
8414 err = hci_send_cmd(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
8416 mgmt_pending_remove(cmd);
8419 hci_dev_unlock(hdev);
8423 #endif /* TIZEN_BT */
8425 static bool ltk_is_valid(struct mgmt_ltk_info *key)
8427 if (key->initiator != 0x00 && key->initiator != 0x01)
8430 switch (key->addr.type) {
8431 case BDADDR_LE_PUBLIC:
8434 case BDADDR_LE_RANDOM:
8435 /* Two most significant bits shall be set */
8436 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
8444 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
8445 void *cp_data, u16 len)
8447 struct mgmt_cp_load_long_term_keys *cp = cp_data;
8448 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
8449 sizeof(struct mgmt_ltk_info));
8450 u16 key_count, expected_len;
8453 bt_dev_dbg(hdev, "sock %p", sk);
8455 if (!lmp_le_capable(hdev))
8456 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8457 MGMT_STATUS_NOT_SUPPORTED);
8459 key_count = __le16_to_cpu(cp->key_count);
8460 if (key_count > max_key_count) {
8461 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
8463 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8464 MGMT_STATUS_INVALID_PARAMS);
8467 expected_len = struct_size(cp, keys, key_count);
8468 if (expected_len != len) {
8469 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
8471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8472 MGMT_STATUS_INVALID_PARAMS);
8475 bt_dev_dbg(hdev, "key_count %u", key_count);
8477 for (i = 0; i < key_count; i++) {
8478 struct mgmt_ltk_info *key = &cp->keys[i];
8480 if (!ltk_is_valid(key))
8481 return mgmt_cmd_status(sk, hdev->id,
8482 MGMT_OP_LOAD_LONG_TERM_KEYS,
8483 MGMT_STATUS_INVALID_PARAMS);
8488 hci_smp_ltks_clear(hdev);
8490 for (i = 0; i < key_count; i++) {
8491 struct mgmt_ltk_info *key = &cp->keys[i];
8492 u8 type, authenticated;
8494 if (hci_is_blocked_key(hdev,
8495 HCI_BLOCKED_KEY_TYPE_LTK,
8497 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
8502 switch (key->type) {
8503 case MGMT_LTK_UNAUTHENTICATED:
8504 authenticated = 0x00;
8505 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8507 case MGMT_LTK_AUTHENTICATED:
8508 authenticated = 0x01;
8509 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8511 case MGMT_LTK_P256_UNAUTH:
8512 authenticated = 0x00;
8513 type = SMP_LTK_P256;
8515 case MGMT_LTK_P256_AUTH:
8516 authenticated = 0x01;
8517 type = SMP_LTK_P256;
8519 case MGMT_LTK_P256_DEBUG:
8520 authenticated = 0x00;
8521 type = SMP_LTK_P256_DEBUG;
8527 hci_add_ltk(hdev, &key->addr.bdaddr,
8528 le_addr_type(key->addr.type), type, authenticated,
8529 key->val, key->enc_size, key->ediv, key->rand);
8532 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
8535 hci_dev_unlock(hdev);
8540 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8542 struct hci_conn *conn = cmd->user_data;
8543 struct mgmt_rp_get_conn_info rp;
8546 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8548 if (status == MGMT_STATUS_SUCCESS) {
8549 rp.rssi = conn->rssi;
8550 rp.tx_power = conn->tx_power;
8551 rp.max_tx_power = conn->max_tx_power;
8553 rp.rssi = HCI_RSSI_INVALID;
8554 rp.tx_power = HCI_TX_POWER_INVALID;
8555 rp.max_tx_power = HCI_TX_POWER_INVALID;
8558 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
8559 status, &rp, sizeof(rp));
8561 hci_conn_drop(conn);
8567 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
8570 struct hci_cp_read_rssi *cp;
8571 struct mgmt_pending_cmd *cmd;
8572 struct hci_conn *conn;
8576 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
8580 /* Commands sent in request are either Read RSSI or Read Transmit Power
8581 * Level so we check which one was last sent to retrieve connection
8582 * handle. Both commands have handle as first parameter so it's safe to
8583 * cast data on the same command struct.
8585 * First command sent is always Read RSSI and we fail only if it fails.
8586 * In other case we simply override error to indicate success as we
8587 * already remembered if TX power value is actually valid.
8589 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
8591 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
8592 status = MGMT_STATUS_SUCCESS;
8594 status = mgmt_status(hci_status);
8598 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
8602 handle = __le16_to_cpu(cp->handle);
8603 conn = hci_conn_hash_lookup_handle(hdev, handle);
8605 bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
8610 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
8614 cmd->cmd_complete(cmd, status);
8615 mgmt_pending_remove(cmd);
8618 hci_dev_unlock(hdev);
8621 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
8624 struct mgmt_cp_get_conn_info *cp = data;
8625 struct mgmt_rp_get_conn_info rp;
8626 struct hci_conn *conn;
8627 unsigned long conn_info_age;
8630 bt_dev_dbg(hdev, "sock %p", sk);
8632 memset(&rp, 0, sizeof(rp));
8633 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8634 rp.addr.type = cp->addr.type;
8636 if (!bdaddr_type_is_valid(cp->addr.type))
8637 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8638 MGMT_STATUS_INVALID_PARAMS,
8643 if (!hdev_is_powered(hdev)) {
8644 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8645 MGMT_STATUS_NOT_POWERED, &rp,
8650 if (cp->addr.type == BDADDR_BREDR)
8651 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8654 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8656 if (!conn || conn->state != BT_CONNECTED) {
8657 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8658 MGMT_STATUS_NOT_CONNECTED, &rp,
8663 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
8664 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8665 MGMT_STATUS_BUSY, &rp, sizeof(rp));
8669 /* To avoid client trying to guess when to poll again for information we
8670 * calculate conn info age as random value between min/max set in hdev.
8672 conn_info_age = hdev->conn_info_min_age +
8673 prandom_u32_max(hdev->conn_info_max_age -
8674 hdev->conn_info_min_age);
8676 /* Query controller to refresh cached values if they are too old or were
8679 if (time_after(jiffies, conn->conn_info_timestamp +
8680 msecs_to_jiffies(conn_info_age)) ||
8681 !conn->conn_info_timestamp) {
8682 struct hci_request req;
8683 struct hci_cp_read_tx_power req_txp_cp;
8684 struct hci_cp_read_rssi req_rssi_cp;
8685 struct mgmt_pending_cmd *cmd;
8687 hci_req_init(&req, hdev);
8688 req_rssi_cp.handle = cpu_to_le16(conn->handle);
8689 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
8692 /* For LE links TX power does not change thus we don't need to
8693 * query for it once value is known.
8695 if (!bdaddr_type_is_le(cp->addr.type) ||
8696 conn->tx_power == HCI_TX_POWER_INVALID) {
8697 req_txp_cp.handle = cpu_to_le16(conn->handle);
8698 req_txp_cp.type = 0x00;
8699 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8700 sizeof(req_txp_cp), &req_txp_cp);
8703 /* Max TX power needs to be read only once per connection */
8704 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
8705 req_txp_cp.handle = cpu_to_le16(conn->handle);
8706 req_txp_cp.type = 0x01;
8707 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8708 sizeof(req_txp_cp), &req_txp_cp);
8711 err = hci_req_run(&req, conn_info_refresh_complete);
8715 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
8722 hci_conn_hold(conn);
8723 cmd->user_data = hci_conn_get(conn);
8724 cmd->cmd_complete = conn_info_cmd_complete;
8726 conn->conn_info_timestamp = jiffies;
8728 /* Cache is valid, just reply with values cached in hci_conn */
8729 rp.rssi = conn->rssi;
8730 rp.tx_power = conn->tx_power;
8731 rp.max_tx_power = conn->max_tx_power;
8733 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8734 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8738 hci_dev_unlock(hdev);
8742 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8744 struct hci_conn *conn = cmd->user_data;
8745 struct mgmt_rp_get_clock_info rp;
8746 struct hci_dev *hdev;
8749 memset(&rp, 0, sizeof(rp));
8750 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8755 hdev = hci_dev_get(cmd->index);
8757 rp.local_clock = cpu_to_le32(hdev->clock);
8762 rp.piconet_clock = cpu_to_le32(conn->clock);
8763 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
8767 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
8771 hci_conn_drop(conn);
8778 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8780 struct hci_cp_read_clock *hci_cp;
8781 struct mgmt_pending_cmd *cmd;
8782 struct hci_conn *conn;
8784 bt_dev_dbg(hdev, "status %u", status);
8788 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
8792 if (hci_cp->which) {
8793 u16 handle = __le16_to_cpu(hci_cp->handle);
8794 conn = hci_conn_hash_lookup_handle(hdev, handle);
8799 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
8803 cmd->cmd_complete(cmd, mgmt_status(status));
8804 mgmt_pending_remove(cmd);
8807 hci_dev_unlock(hdev);
8810 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
8813 struct mgmt_cp_get_clock_info *cp = data;
8814 struct mgmt_rp_get_clock_info rp;
8815 struct hci_cp_read_clock hci_cp;
8816 struct mgmt_pending_cmd *cmd;
8817 struct hci_request req;
8818 struct hci_conn *conn;
8821 bt_dev_dbg(hdev, "sock %p", sk);
8823 memset(&rp, 0, sizeof(rp));
8824 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8825 rp.addr.type = cp->addr.type;
8827 if (cp->addr.type != BDADDR_BREDR)
8828 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8829 MGMT_STATUS_INVALID_PARAMS,
8834 if (!hdev_is_powered(hdev)) {
8835 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8836 MGMT_STATUS_NOT_POWERED, &rp,
8841 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8842 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8844 if (!conn || conn->state != BT_CONNECTED) {
8845 err = mgmt_cmd_complete(sk, hdev->id,
8846 MGMT_OP_GET_CLOCK_INFO,
8847 MGMT_STATUS_NOT_CONNECTED,
8855 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
8861 cmd->cmd_complete = clock_info_cmd_complete;
8863 hci_req_init(&req, hdev);
8865 memset(&hci_cp, 0, sizeof(hci_cp));
8866 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8869 hci_conn_hold(conn);
8870 cmd->user_data = hci_conn_get(conn);
8872 hci_cp.handle = cpu_to_le16(conn->handle);
8873 hci_cp.which = 0x01; /* Piconet clock */
8874 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8877 err = hci_req_run(&req, get_clock_info_complete);
8879 mgmt_pending_remove(cmd);
8882 hci_dev_unlock(hdev);
8886 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
8888 struct hci_conn *conn;
8890 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
8894 if (conn->dst_type != type)
8897 if (conn->state != BT_CONNECTED)
8903 /* This function requires the caller holds hdev->lock */
8904 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
8905 u8 addr_type, u8 auto_connect)
8907 struct hci_conn_params *params;
8909 params = hci_conn_params_add(hdev, addr, addr_type);
8913 if (params->auto_connect == auto_connect)
8916 list_del_init(¶ms->action);
8918 switch (auto_connect) {
8919 case HCI_AUTO_CONN_DISABLED:
8920 case HCI_AUTO_CONN_LINK_LOSS:
8921 /* If auto connect is being disabled when we're trying to
8922 * connect to device, keep connecting.
8924 if (params->explicit_connect)
8925 list_add(¶ms->action, &hdev->pend_le_conns);
8927 case HCI_AUTO_CONN_REPORT:
8928 if (params->explicit_connect)
8929 list_add(¶ms->action, &hdev->pend_le_conns);
8931 list_add(¶ms->action, &hdev->pend_le_reports);
8933 case HCI_AUTO_CONN_DIRECT:
8934 case HCI_AUTO_CONN_ALWAYS:
8935 if (!is_connected(hdev, addr, addr_type))
8936 list_add(¶ms->action, &hdev->pend_le_conns);
8940 params->auto_connect = auto_connect;
8942 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
8943 addr, addr_type, auto_connect);
8948 static void device_added(struct sock *sk, struct hci_dev *hdev,
8949 bdaddr_t *bdaddr, u8 type, u8 action)
8951 struct mgmt_ev_device_added ev;
8953 bacpy(&ev.addr.bdaddr, bdaddr);
8954 ev.addr.type = type;
8957 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
8960 static int add_device(struct sock *sk, struct hci_dev *hdev,
8961 void *data, u16 len)
8963 struct mgmt_cp_add_device *cp = data;
8964 u8 auto_conn, addr_type;
8965 struct hci_conn_params *params;
8967 u32 current_flags = 0;
8969 bt_dev_dbg(hdev, "sock %p", sk);
8971 if (!bdaddr_type_is_valid(cp->addr.type) ||
8972 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
8973 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8974 MGMT_STATUS_INVALID_PARAMS,
8975 &cp->addr, sizeof(cp->addr));
8977 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
8978 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8979 MGMT_STATUS_INVALID_PARAMS,
8980 &cp->addr, sizeof(cp->addr));
8984 if (cp->addr.type == BDADDR_BREDR) {
8985 /* Only incoming connections action is supported for now */
8986 if (cp->action != 0x01) {
8987 err = mgmt_cmd_complete(sk, hdev->id,
8989 MGMT_STATUS_INVALID_PARAMS,
8990 &cp->addr, sizeof(cp->addr));
8994 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
9000 hci_req_update_scan(hdev);
9005 addr_type = le_addr_type(cp->addr.type);
9007 if (cp->action == 0x02)
9008 auto_conn = HCI_AUTO_CONN_ALWAYS;
9009 else if (cp->action == 0x01)
9010 auto_conn = HCI_AUTO_CONN_DIRECT;
9012 auto_conn = HCI_AUTO_CONN_REPORT;
9014 /* Kernel internally uses conn_params with resolvable private
9015 * address, but Add Device allows only identity addresses.
9016 * Make sure it is enforced before calling
9017 * hci_conn_params_lookup.
9019 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9020 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9021 MGMT_STATUS_INVALID_PARAMS,
9022 &cp->addr, sizeof(cp->addr));
9026 /* If the connection parameters don't exist for this device,
9027 * they will be created and configured with defaults.
9029 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
9031 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9032 MGMT_STATUS_FAILED, &cp->addr,
9036 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9039 current_flags = params->current_flags;
9042 hci_update_background_scan(hdev);
9045 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
9046 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
9047 SUPPORTED_DEVICE_FLAGS(), current_flags);
9049 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9050 MGMT_STATUS_SUCCESS, &cp->addr,
9054 hci_dev_unlock(hdev);
9058 static void device_removed(struct sock *sk, struct hci_dev *hdev,
9059 bdaddr_t *bdaddr, u8 type)
9061 struct mgmt_ev_device_removed ev;
9063 bacpy(&ev.addr.bdaddr, bdaddr);
9064 ev.addr.type = type;
9066 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
9069 static int remove_device(struct sock *sk, struct hci_dev *hdev,
9070 void *data, u16 len)
9072 struct mgmt_cp_remove_device *cp = data;
9075 bt_dev_dbg(hdev, "sock %p", sk);
9079 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9080 struct hci_conn_params *params;
9083 if (!bdaddr_type_is_valid(cp->addr.type)) {
9084 err = mgmt_cmd_complete(sk, hdev->id,
9085 MGMT_OP_REMOVE_DEVICE,
9086 MGMT_STATUS_INVALID_PARAMS,
9087 &cp->addr, sizeof(cp->addr));
9091 if (cp->addr.type == BDADDR_BREDR) {
9092 err = hci_bdaddr_list_del(&hdev->accept_list,
9096 err = mgmt_cmd_complete(sk, hdev->id,
9097 MGMT_OP_REMOVE_DEVICE,
9098 MGMT_STATUS_INVALID_PARAMS,
9104 hci_req_update_scan(hdev);
9106 device_removed(sk, hdev, &cp->addr.bdaddr,
9111 addr_type = le_addr_type(cp->addr.type);
9113 /* Kernel internally uses conn_params with resolvable private
9114 * address, but Remove Device allows only identity addresses.
9115 * Make sure it is enforced before calling
9116 * hci_conn_params_lookup.
9118 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9119 err = mgmt_cmd_complete(sk, hdev->id,
9120 MGMT_OP_REMOVE_DEVICE,
9121 MGMT_STATUS_INVALID_PARAMS,
9122 &cp->addr, sizeof(cp->addr));
9126 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9129 err = mgmt_cmd_complete(sk, hdev->id,
9130 MGMT_OP_REMOVE_DEVICE,
9131 MGMT_STATUS_INVALID_PARAMS,
9132 &cp->addr, sizeof(cp->addr));
9136 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
9137 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
9138 err = mgmt_cmd_complete(sk, hdev->id,
9139 MGMT_OP_REMOVE_DEVICE,
9140 MGMT_STATUS_INVALID_PARAMS,
9141 &cp->addr, sizeof(cp->addr));
9145 list_del(¶ms->action);
9146 list_del(¶ms->list);
9148 hci_update_background_scan(hdev);
9150 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
9152 struct hci_conn_params *p, *tmp;
9153 struct bdaddr_list *b, *btmp;
9155 if (cp->addr.type) {
9156 err = mgmt_cmd_complete(sk, hdev->id,
9157 MGMT_OP_REMOVE_DEVICE,
9158 MGMT_STATUS_INVALID_PARAMS,
9159 &cp->addr, sizeof(cp->addr));
9163 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
9164 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
9169 hci_req_update_scan(hdev);
9171 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
9172 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
9174 device_removed(sk, hdev, &p->addr, p->addr_type);
9175 if (p->explicit_connect) {
9176 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
9179 list_del(&p->action);
9184 bt_dev_dbg(hdev, "All LE connection parameters were removed");
9186 hci_update_background_scan(hdev);
9190 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
9191 MGMT_STATUS_SUCCESS, &cp->addr,
9194 hci_dev_unlock(hdev);
9198 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
9201 struct mgmt_cp_load_conn_param *cp = data;
9202 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
9203 sizeof(struct mgmt_conn_param));
9204 u16 param_count, expected_len;
9207 if (!lmp_le_capable(hdev))
9208 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9209 MGMT_STATUS_NOT_SUPPORTED);
9211 param_count = __le16_to_cpu(cp->param_count);
9212 if (param_count > max_param_count) {
9213 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
9215 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9216 MGMT_STATUS_INVALID_PARAMS);
9219 expected_len = struct_size(cp, params, param_count);
9220 if (expected_len != len) {
9221 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
9223 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9224 MGMT_STATUS_INVALID_PARAMS);
9227 bt_dev_dbg(hdev, "param_count %u", param_count);
9231 hci_conn_params_clear_disabled(hdev);
9233 for (i = 0; i < param_count; i++) {
9234 struct mgmt_conn_param *param = &cp->params[i];
9235 struct hci_conn_params *hci_param;
9236 u16 min, max, latency, timeout;
9239 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
9242 if (param->addr.type == BDADDR_LE_PUBLIC) {
9243 addr_type = ADDR_LE_DEV_PUBLIC;
9244 } else if (param->addr.type == BDADDR_LE_RANDOM) {
9245 addr_type = ADDR_LE_DEV_RANDOM;
9247 bt_dev_err(hdev, "ignoring invalid connection parameters");
9251 min = le16_to_cpu(param->min_interval);
9252 max = le16_to_cpu(param->max_interval);
9253 latency = le16_to_cpu(param->latency);
9254 timeout = le16_to_cpu(param->timeout);
9256 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
9257 min, max, latency, timeout);
9259 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
9260 bt_dev_err(hdev, "ignoring invalid connection parameters");
9264 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
9267 bt_dev_err(hdev, "failed to add connection parameters");
9271 hci_param->conn_min_interval = min;
9272 hci_param->conn_max_interval = max;
9273 hci_param->conn_latency = latency;
9274 hci_param->supervision_timeout = timeout;
9277 hci_dev_unlock(hdev);
9279 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
9283 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
9284 void *data, u16 len)
9286 struct mgmt_cp_set_external_config *cp = data;
9290 bt_dev_dbg(hdev, "sock %p", sk);
9292 if (hdev_is_powered(hdev))
9293 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9294 MGMT_STATUS_REJECTED);
9296 if (cp->config != 0x00 && cp->config != 0x01)
9297 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9298 MGMT_STATUS_INVALID_PARAMS);
9300 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
9301 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9302 MGMT_STATUS_NOT_SUPPORTED);
9307 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
9309 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
9311 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
9318 err = new_options(hdev, sk);
9320 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
9321 mgmt_index_removed(hdev);
9323 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
9324 hci_dev_set_flag(hdev, HCI_CONFIG);
9325 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9327 queue_work(hdev->req_workqueue, &hdev->power_on);
9329 set_bit(HCI_RAW, &hdev->flags);
9330 mgmt_index_added(hdev);
9335 hci_dev_unlock(hdev);
9339 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
9340 void *data, u16 len)
9342 struct mgmt_cp_set_public_address *cp = data;
9346 bt_dev_dbg(hdev, "sock %p", sk);
9348 if (hdev_is_powered(hdev))
9349 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9350 MGMT_STATUS_REJECTED);
9352 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
9353 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9354 MGMT_STATUS_INVALID_PARAMS);
9356 if (!hdev->set_bdaddr)
9357 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9358 MGMT_STATUS_NOT_SUPPORTED);
9362 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
9363 bacpy(&hdev->public_addr, &cp->bdaddr);
9365 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
9372 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
9373 err = new_options(hdev, sk);
9375 if (is_configured(hdev)) {
9376 mgmt_index_removed(hdev);
9378 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
9380 hci_dev_set_flag(hdev, HCI_CONFIG);
9381 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9383 queue_work(hdev->req_workqueue, &hdev->power_on);
9387 hci_dev_unlock(hdev);
9392 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
9396 struct mgmt_ev_device_name_update *ev = (void *)buf;
9402 bacpy(&ev->addr.bdaddr, bdaddr);
9403 ev->addr.type = BDADDR_BREDR;
9405 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9408 ev->eir_len = cpu_to_le16(eir_len);
9410 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
9411 sizeof(*ev) + eir_len, NULL);
9414 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9415 u8 link_type, u8 addr_type, u8 status)
9417 struct mgmt_ev_conn_update_failed ev;
9419 bacpy(&ev.addr.bdaddr, bdaddr);
9420 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9423 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
9424 &ev, sizeof(ev), NULL);
9427 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
9428 u8 link_type, u8 addr_type, u16 conn_interval,
9429 u16 conn_latency, u16 supervision_timeout)
9431 struct mgmt_ev_conn_updated ev;
9433 bacpy(&ev.addr.bdaddr, bdaddr);
9434 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9435 ev.conn_interval = cpu_to_le16(conn_interval);
9436 ev.conn_latency = cpu_to_le16(conn_latency);
9437 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
9439 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
9440 &ev, sizeof(ev), NULL);
9443 /* le device found event - Pass adv type */
9444 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9445 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
9446 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
9449 struct mgmt_ev_le_device_found *ev = (void *)buf;
9452 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
9455 /* Make sure that the buffer is big enough. The 5 extra bytes
9456 * are for the potential CoD field.
9458 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9461 memset(buf, 0, sizeof(buf));
9463 bacpy(&ev->addr.bdaddr, bdaddr);
9464 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9466 ev->flags = cpu_to_le32(flags);
9467 ev->adv_type = adv_type;
9470 memcpy(ev->eir, eir, eir_len);
9472 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
9473 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9476 if (scan_rsp_len > 0)
9477 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9479 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9480 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9482 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9486 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
9487 u16 opcode, struct sk_buff *skb)
9489 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
9490 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
9491 u8 *h192, *r192, *h256, *r256;
9492 struct mgmt_pending_cmd *cmd;
9496 bt_dev_dbg(hdev, "status %u", status);
9498 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
9502 mgmt_cp = cmd->param;
9505 status = mgmt_status(status);
9512 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
9513 struct hci_rp_read_local_oob_data *rp;
9515 if (skb->len != sizeof(*rp)) {
9516 status = MGMT_STATUS_FAILED;
9519 status = MGMT_STATUS_SUCCESS;
9520 rp = (void *)skb->data;
9522 eir_len = 5 + 18 + 18;
9529 struct hci_rp_read_local_oob_ext_data *rp;
9531 if (skb->len != sizeof(*rp)) {
9532 status = MGMT_STATUS_FAILED;
9535 status = MGMT_STATUS_SUCCESS;
9536 rp = (void *)skb->data;
9538 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
9539 eir_len = 5 + 18 + 18;
9543 eir_len = 5 + 18 + 18 + 18 + 18;
9553 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
9560 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
9561 hdev->dev_class, 3);
9564 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9565 EIR_SSP_HASH_C192, h192, 16);
9566 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9567 EIR_SSP_RAND_R192, r192, 16);
9571 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9572 EIR_SSP_HASH_C256, h256, 16);
9573 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9574 EIR_SSP_RAND_R256, r256, 16);
9578 mgmt_rp->type = mgmt_cp->type;
9579 mgmt_rp->eir_len = cpu_to_le16(eir_len);
9581 err = mgmt_cmd_complete(cmd->sk, hdev->id,
9582 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
9583 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
9584 if (err < 0 || status)
9587 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
9589 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9590 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
9591 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
9594 mgmt_pending_remove(cmd);
9597 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
9598 struct mgmt_cp_read_local_oob_ext_data *cp)
9600 struct mgmt_pending_cmd *cmd;
9601 struct hci_request req;
9604 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
9609 hci_req_init(&req, hdev);
9611 if (bredr_sc_enabled(hdev))
9612 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
9614 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
9616 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
9618 mgmt_pending_remove(cmd);
9625 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
9626 void *data, u16 data_len)
9628 struct mgmt_cp_read_local_oob_ext_data *cp = data;
9629 struct mgmt_rp_read_local_oob_ext_data *rp;
9632 u8 status, flags, role, addr[7], hash[16], rand[16];
9635 bt_dev_dbg(hdev, "sock %p", sk);
9637 if (hdev_is_powered(hdev)) {
9639 case BIT(BDADDR_BREDR):
9640 status = mgmt_bredr_support(hdev);
9646 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9647 status = mgmt_le_support(hdev);
9651 eir_len = 9 + 3 + 18 + 18 + 3;
9654 status = MGMT_STATUS_INVALID_PARAMS;
9659 status = MGMT_STATUS_NOT_POWERED;
9663 rp_len = sizeof(*rp) + eir_len;
9664 rp = kmalloc(rp_len, GFP_ATOMIC);
9675 case BIT(BDADDR_BREDR):
9676 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9677 err = read_local_ssp_oob_req(hdev, sk, cp);
9678 hci_dev_unlock(hdev);
9682 status = MGMT_STATUS_FAILED;
9685 eir_len = eir_append_data(rp->eir, eir_len,
9687 hdev->dev_class, 3);
9690 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9691 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
9692 smp_generate_oob(hdev, hash, rand) < 0) {
9693 hci_dev_unlock(hdev);
9694 status = MGMT_STATUS_FAILED;
9698 /* This should return the active RPA, but since the RPA
9699 * is only programmed on demand, it is really hard to fill
9700 * this in at the moment. For now disallow retrieving
9701 * local out-of-band data when privacy is in use.
9703 * Returning the identity address will not help here since
9704 * pairing happens before the identity resolving key is
9705 * known and thus the connection establishment happens
9706 * based on the RPA and not the identity address.
9708 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
9709 hci_dev_unlock(hdev);
9710 status = MGMT_STATUS_REJECTED;
9714 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
9715 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
9716 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
9717 bacmp(&hdev->static_addr, BDADDR_ANY))) {
9718 memcpy(addr, &hdev->static_addr, 6);
9721 memcpy(addr, &hdev->bdaddr, 6);
9725 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
9726 addr, sizeof(addr));
9728 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
9733 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
9734 &role, sizeof(role));
9736 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
9737 eir_len = eir_append_data(rp->eir, eir_len,
9739 hash, sizeof(hash));
9741 eir_len = eir_append_data(rp->eir, eir_len,
9743 rand, sizeof(rand));
9746 flags = mgmt_get_adv_discov_flags(hdev);
9748 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
9749 flags |= LE_AD_NO_BREDR;
9751 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
9752 &flags, sizeof(flags));
9756 hci_dev_unlock(hdev);
9758 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
9760 status = MGMT_STATUS_SUCCESS;
9763 rp->type = cp->type;
9764 rp->eir_len = cpu_to_le16(eir_len);
9766 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
9767 status, rp, sizeof(*rp) + eir_len);
9768 if (err < 0 || status)
9771 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9772 rp, sizeof(*rp) + eir_len,
9773 HCI_MGMT_OOB_DATA_EVENTS, sk);
9781 static u32 get_supported_adv_flags(struct hci_dev *hdev)
9785 flags |= MGMT_ADV_FLAG_CONNECTABLE;
9786 flags |= MGMT_ADV_FLAG_DISCOV;
9787 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
9788 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
9789 flags |= MGMT_ADV_FLAG_APPEARANCE;
9790 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
9791 flags |= MGMT_ADV_PARAM_DURATION;
9792 flags |= MGMT_ADV_PARAM_TIMEOUT;
9793 flags |= MGMT_ADV_PARAM_INTERVALS;
9794 flags |= MGMT_ADV_PARAM_TX_POWER;
9795 flags |= MGMT_ADV_PARAM_SCAN_RSP;
9797 /* In extended adv TX_POWER returned from Set Adv Param
9798 * will be always valid.
9800 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
9801 ext_adv_capable(hdev))
9802 flags |= MGMT_ADV_FLAG_TX_POWER;
9804 if (ext_adv_capable(hdev)) {
9805 flags |= MGMT_ADV_FLAG_SEC_1M;
9806 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
9807 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
9809 if (hdev->le_features[1] & HCI_LE_PHY_2M)
9810 flags |= MGMT_ADV_FLAG_SEC_2M;
9812 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
9813 flags |= MGMT_ADV_FLAG_SEC_CODED;
9819 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
9820 void *data, u16 data_len)
9822 struct mgmt_rp_read_adv_features *rp;
9825 struct adv_info *adv_instance;
9826 u32 supported_flags;
9829 bt_dev_dbg(hdev, "sock %p", sk);
9831 if (!lmp_le_capable(hdev))
9832 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9833 MGMT_STATUS_REJECTED);
9835 /* Enabling the experimental LL Privay support disables support for
9838 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9840 MGMT_STATUS_NOT_SUPPORTED);
9844 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
9845 rp = kmalloc(rp_len, GFP_ATOMIC);
9847 hci_dev_unlock(hdev);
9851 supported_flags = get_supported_adv_flags(hdev);
9853 rp->supported_flags = cpu_to_le32(supported_flags);
9854 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
9855 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
9856 rp->max_instances = hdev->le_num_of_adv_sets;
9857 rp->num_instances = hdev->adv_instance_cnt;
9859 instance = rp->instance;
9860 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
9861 *instance = adv_instance->instance;
9865 hci_dev_unlock(hdev);
9867 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9868 MGMT_STATUS_SUCCESS, rp, rp_len);
9875 static u8 calculate_name_len(struct hci_dev *hdev)
9877 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
9879 return append_local_name(hdev, buf, 0);
9882 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
9885 u8 max_len = HCI_MAX_AD_LENGTH;
9888 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
9889 MGMT_ADV_FLAG_LIMITED_DISCOV |
9890 MGMT_ADV_FLAG_MANAGED_FLAGS))
9893 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
9896 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
9897 max_len -= calculate_name_len(hdev);
9899 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
9906 static bool flags_managed(u32 adv_flags)
9908 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
9909 MGMT_ADV_FLAG_LIMITED_DISCOV |
9910 MGMT_ADV_FLAG_MANAGED_FLAGS);
9913 static bool tx_power_managed(u32 adv_flags)
9915 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
9918 static bool name_managed(u32 adv_flags)
9920 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
9923 static bool appearance_managed(u32 adv_flags)
9925 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
9928 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
9929 u8 len, bool is_adv_data)
9934 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
9939 /* Make sure that the data is correctly formatted. */
9940 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
9946 if (data[i + 1] == EIR_FLAGS &&
9947 (!is_adv_data || flags_managed(adv_flags)))
9950 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
9953 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
9956 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
9959 if (data[i + 1] == EIR_APPEARANCE &&
9960 appearance_managed(adv_flags))
9963 /* If the current field length would exceed the total data
9964 * length, then it's invalid.
9966 if (i + cur_len >= len)
9973 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
9975 u32 supported_flags, phy_flags;
9977 /* The current implementation only supports a subset of the specified
9978 * flags. Also need to check mutual exclusiveness of sec flags.
9980 supported_flags = get_supported_adv_flags(hdev);
9981 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
9982 if (adv_flags & ~supported_flags ||
9983 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
9989 static bool adv_busy(struct hci_dev *hdev)
9991 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
9992 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
9993 pending_find(MGMT_OP_SET_LE, hdev) ||
9994 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
9995 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
9998 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
10001 struct mgmt_pending_cmd *cmd;
10002 struct mgmt_cp_add_advertising *cp;
10003 struct mgmt_rp_add_advertising rp;
10004 struct adv_info *adv_instance, *n;
10007 bt_dev_dbg(hdev, "status %u", status);
10009 hci_dev_lock(hdev);
10011 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
10013 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
10015 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
10016 if (!adv_instance->pending)
10020 adv_instance->pending = false;
10024 instance = adv_instance->instance;
10026 if (hdev->cur_adv_instance == instance)
10027 cancel_adv_timeout(hdev);
10029 hci_remove_adv_instance(hdev, instance);
10030 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
10037 rp.instance = cp->instance;
10040 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10041 mgmt_status(status));
10043 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10044 mgmt_status(status), &rp, sizeof(rp));
10046 mgmt_pending_remove(cmd);
10049 hci_dev_unlock(hdev);
10052 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
10053 void *data, u16 data_len)
10055 struct mgmt_cp_add_advertising *cp = data;
10056 struct mgmt_rp_add_advertising rp;
10059 u16 timeout, duration;
10060 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
10061 u8 schedule_instance = 0;
10062 struct adv_info *next_instance;
10064 struct mgmt_pending_cmd *cmd;
10065 struct hci_request req;
10067 bt_dev_dbg(hdev, "sock %p", sk);
10069 status = mgmt_le_support(hdev);
10071 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10074 /* Enabling the experimental LL Privay support disables support for
10077 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
10078 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10079 MGMT_STATUS_NOT_SUPPORTED);
10081 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10082 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10083 MGMT_STATUS_INVALID_PARAMS);
10085 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
10086 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10087 MGMT_STATUS_INVALID_PARAMS);
10089 flags = __le32_to_cpu(cp->flags);
10090 timeout = __le16_to_cpu(cp->timeout);
10091 duration = __le16_to_cpu(cp->duration);
10093 if (!requested_adv_flags_are_valid(hdev, flags))
10094 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10095 MGMT_STATUS_INVALID_PARAMS);
10097 hci_dev_lock(hdev);
10099 if (timeout && !hdev_is_powered(hdev)) {
10100 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10101 MGMT_STATUS_REJECTED);
10105 if (adv_busy(hdev)) {
10106 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10111 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
10112 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
10113 cp->scan_rsp_len, false)) {
10114 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10115 MGMT_STATUS_INVALID_PARAMS);
10119 err = hci_add_adv_instance(hdev, cp->instance, flags,
10120 cp->adv_data_len, cp->data,
10122 cp->data + cp->adv_data_len,
10124 HCI_ADV_TX_POWER_NO_PREFERENCE,
10125 hdev->le_adv_min_interval,
10126 hdev->le_adv_max_interval);
10128 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10129 MGMT_STATUS_FAILED);
10133 /* Only trigger an advertising added event if a new instance was
10136 if (hdev->adv_instance_cnt > prev_instance_cnt)
10137 mgmt_advertising_added(sk, hdev, cp->instance);
10139 if (hdev->cur_adv_instance == cp->instance) {
10140 /* If the currently advertised instance is being changed then
10141 * cancel the current advertising and schedule the next
10142 * instance. If there is only one instance then the overridden
10143 * advertising data will be visible right away.
10145 cancel_adv_timeout(hdev);
10147 next_instance = hci_get_next_instance(hdev, cp->instance);
10149 schedule_instance = next_instance->instance;
10150 } else if (!hdev->adv_instance_timeout) {
10151 /* Immediately advertise the new instance if no other
10152 * instance is currently being advertised.
10154 schedule_instance = cp->instance;
10157 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
10158 * there is no instance to be advertised then we have no HCI
10159 * communication to make. Simply return.
10161 if (!hdev_is_powered(hdev) ||
10162 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10163 !schedule_instance) {
10164 rp.instance = cp->instance;
10165 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10166 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10170 /* We're good to go, update advertising data, parameters, and start
10173 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
10180 hci_req_init(&req, hdev);
10182 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
10185 err = hci_req_run(&req, add_advertising_complete);
10188 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10189 MGMT_STATUS_FAILED);
10190 mgmt_pending_remove(cmd);
10194 hci_dev_unlock(hdev);
10199 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
10202 struct mgmt_pending_cmd *cmd;
10203 struct mgmt_cp_add_ext_adv_params *cp;
10204 struct mgmt_rp_add_ext_adv_params rp;
10205 struct adv_info *adv_instance;
10208 BT_DBG("%s", hdev->name);
10210 hci_dev_lock(hdev);
10212 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
10217 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10221 rp.instance = cp->instance;
10222 rp.tx_power = adv_instance->tx_power;
10224 /* While we're at it, inform userspace of the available space for this
10225 * advertisement, given the flags that will be used.
10227 flags = __le32_to_cpu(cp->flags);
10228 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10229 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10232 /* If this advertisement was previously advertising and we
10233 * failed to update it, we signal that it has been removed and
10234 * delete its structure
10236 if (!adv_instance->pending)
10237 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
10239 hci_remove_adv_instance(hdev, cp->instance);
10241 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10242 mgmt_status(status));
10245 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10246 mgmt_status(status), &rp, sizeof(rp));
10251 mgmt_pending_remove(cmd);
10253 hci_dev_unlock(hdev);
10256 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
10257 void *data, u16 data_len)
10259 struct mgmt_cp_add_ext_adv_params *cp = data;
10260 struct mgmt_rp_add_ext_adv_params rp;
10261 struct mgmt_pending_cmd *cmd = NULL;
10262 struct adv_info *adv_instance;
10263 struct hci_request req;
10264 u32 flags, min_interval, max_interval;
10265 u16 timeout, duration;
10270 BT_DBG("%s", hdev->name);
10272 status = mgmt_le_support(hdev);
10274 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10277 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10278 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10279 MGMT_STATUS_INVALID_PARAMS);
10281 /* The purpose of breaking add_advertising into two separate MGMT calls
10282 * for params and data is to allow more parameters to be added to this
10283 * structure in the future. For this reason, we verify that we have the
10284 * bare minimum structure we know of when the interface was defined. Any
10285 * extra parameters we don't know about will be ignored in this request.
10287 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
10288 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10289 MGMT_STATUS_INVALID_PARAMS);
10291 flags = __le32_to_cpu(cp->flags);
10293 if (!requested_adv_flags_are_valid(hdev, flags))
10294 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10295 MGMT_STATUS_INVALID_PARAMS);
10297 hci_dev_lock(hdev);
10299 /* In new interface, we require that we are powered to register */
10300 if (!hdev_is_powered(hdev)) {
10301 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10302 MGMT_STATUS_REJECTED);
10306 if (adv_busy(hdev)) {
10307 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10312 /* Parse defined parameters from request, use defaults otherwise */
10313 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
10314 __le16_to_cpu(cp->timeout) : 0;
10316 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
10317 __le16_to_cpu(cp->duration) :
10318 hdev->def_multi_adv_rotation_duration;
10320 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10321 __le32_to_cpu(cp->min_interval) :
10322 hdev->le_adv_min_interval;
10324 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10325 __le32_to_cpu(cp->max_interval) :
10326 hdev->le_adv_max_interval;
10328 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
10330 HCI_ADV_TX_POWER_NO_PREFERENCE;
10332 /* Create advertising instance with no advertising or response data */
10333 err = hci_add_adv_instance(hdev, cp->instance, flags,
10334 0, NULL, 0, NULL, timeout, duration,
10335 tx_power, min_interval, max_interval);
10338 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10339 MGMT_STATUS_FAILED);
10343 /* Submit request for advertising params if ext adv available */
10344 if (ext_adv_capable(hdev)) {
10345 hci_req_init(&req, hdev);
10346 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10348 /* Updating parameters of an active instance will return a
10349 * Command Disallowed error, so we must first disable the
10350 * instance if it is active.
10352 if (!adv_instance->pending)
10353 __hci_req_disable_ext_adv_instance(&req, cp->instance);
10355 __hci_req_setup_ext_adv_instance(&req, cp->instance);
10357 err = hci_req_run(&req, add_ext_adv_params_complete);
10360 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
10361 hdev, data, data_len);
10364 hci_remove_adv_instance(hdev, cp->instance);
10369 rp.instance = cp->instance;
10370 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
10371 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10372 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10373 err = mgmt_cmd_complete(sk, hdev->id,
10374 MGMT_OP_ADD_EXT_ADV_PARAMS,
10375 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10379 hci_dev_unlock(hdev);
10384 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
10387 struct mgmt_cp_add_ext_adv_data *cp = data;
10388 struct mgmt_rp_add_ext_adv_data rp;
10389 u8 schedule_instance = 0;
10390 struct adv_info *next_instance;
10391 struct adv_info *adv_instance;
10393 struct mgmt_pending_cmd *cmd;
10394 struct hci_request req;
10396 BT_DBG("%s", hdev->name);
10398 hci_dev_lock(hdev);
10400 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10402 if (!adv_instance) {
10403 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10404 MGMT_STATUS_INVALID_PARAMS);
10408 /* In new interface, we require that we are powered to register */
10409 if (!hdev_is_powered(hdev)) {
10410 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10411 MGMT_STATUS_REJECTED);
10412 goto clear_new_instance;
10415 if (adv_busy(hdev)) {
10416 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10418 goto clear_new_instance;
10421 /* Validate new data */
10422 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
10423 cp->adv_data_len, true) ||
10424 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
10425 cp->adv_data_len, cp->scan_rsp_len, false)) {
10426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10427 MGMT_STATUS_INVALID_PARAMS);
10428 goto clear_new_instance;
10431 /* Set the data in the advertising instance */
10432 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
10433 cp->data, cp->scan_rsp_len,
10434 cp->data + cp->adv_data_len);
10436 /* We're good to go, update advertising data, parameters, and start
10440 hci_req_init(&req, hdev);
10442 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
10444 if (ext_adv_capable(hdev)) {
10445 __hci_req_update_adv_data(&req, cp->instance);
10446 __hci_req_update_scan_rsp_data(&req, cp->instance);
10447 __hci_req_enable_ext_advertising(&req, cp->instance);
10450 /* If using software rotation, determine next instance to use */
10452 if (hdev->cur_adv_instance == cp->instance) {
10453 /* If the currently advertised instance is being changed
10454 * then cancel the current advertising and schedule the
10455 * next instance. If there is only one instance then the
10456 * overridden advertising data will be visible right
10459 cancel_adv_timeout(hdev);
10461 next_instance = hci_get_next_instance(hdev,
10464 schedule_instance = next_instance->instance;
10465 } else if (!hdev->adv_instance_timeout) {
10466 /* Immediately advertise the new instance if no other
10467 * instance is currently being advertised.
10469 schedule_instance = cp->instance;
10472 /* If the HCI_ADVERTISING flag is set or there is no instance to
10473 * be advertised then we have no HCI communication to make.
10476 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10477 !schedule_instance) {
10478 if (adv_instance->pending) {
10479 mgmt_advertising_added(sk, hdev, cp->instance);
10480 adv_instance->pending = false;
10482 rp.instance = cp->instance;
10483 err = mgmt_cmd_complete(sk, hdev->id,
10484 MGMT_OP_ADD_EXT_ADV_DATA,
10485 MGMT_STATUS_SUCCESS, &rp,
10490 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
10494 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
10498 goto clear_new_instance;
10502 err = hci_req_run(&req, add_advertising_complete);
10505 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10506 MGMT_STATUS_FAILED);
10507 mgmt_pending_remove(cmd);
10508 goto clear_new_instance;
10511 /* We were successful in updating data, so trigger advertising_added
10512 * event if this is an instance that wasn't previously advertising. If
10513 * a failure occurs in the requests we initiated, we will remove the
10514 * instance again in add_advertising_complete
10516 if (adv_instance->pending)
10517 mgmt_advertising_added(sk, hdev, cp->instance);
10521 clear_new_instance:
10522 hci_remove_adv_instance(hdev, cp->instance);
10525 hci_dev_unlock(hdev);
10530 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
10533 struct mgmt_pending_cmd *cmd;
10534 struct mgmt_cp_remove_advertising *cp;
10535 struct mgmt_rp_remove_advertising rp;
10537 bt_dev_dbg(hdev, "status %u", status);
10539 hci_dev_lock(hdev);
10541 /* A failure status here only means that we failed to disable
10542 * advertising. Otherwise, the advertising instance has been removed,
10543 * so report success.
10545 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
10550 rp.instance = cp->instance;
10552 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
10554 mgmt_pending_remove(cmd);
10557 hci_dev_unlock(hdev);
10560 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
10561 void *data, u16 data_len)
10563 struct mgmt_cp_remove_advertising *cp = data;
10564 struct mgmt_rp_remove_advertising rp;
10565 struct mgmt_pending_cmd *cmd;
10566 struct hci_request req;
10569 bt_dev_dbg(hdev, "sock %p", sk);
10571 /* Enabling the experimental LL Privay support disables support for
10574 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
10575 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
10576 MGMT_STATUS_NOT_SUPPORTED);
10578 hci_dev_lock(hdev);
10580 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
10581 err = mgmt_cmd_status(sk, hdev->id,
10582 MGMT_OP_REMOVE_ADVERTISING,
10583 MGMT_STATUS_INVALID_PARAMS);
10587 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
10588 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
10589 pending_find(MGMT_OP_SET_LE, hdev)) {
10590 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10595 if (list_empty(&hdev->adv_instances)) {
10596 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10597 MGMT_STATUS_INVALID_PARAMS);
10601 hci_req_init(&req, hdev);
10603 /* If we use extended advertising, instance is disabled and removed */
10604 if (ext_adv_capable(hdev)) {
10605 __hci_req_disable_ext_adv_instance(&req, cp->instance);
10606 __hci_req_remove_ext_adv_instance(&req, cp->instance);
10609 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
10611 if (list_empty(&hdev->adv_instances))
10612 __hci_req_disable_advertising(&req);
10614 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
10615 * flag is set or the device isn't powered then we have no HCI
10616 * communication to make. Simply return.
10618 if (skb_queue_empty(&req.cmd_q) ||
10619 !hdev_is_powered(hdev) ||
10620 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
10621 hci_req_purge(&req);
10622 rp.instance = cp->instance;
10623 err = mgmt_cmd_complete(sk, hdev->id,
10624 MGMT_OP_REMOVE_ADVERTISING,
10625 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10629 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
10636 err = hci_req_run(&req, remove_advertising_complete);
10638 mgmt_pending_remove(cmd);
10641 hci_dev_unlock(hdev);
10646 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
10647 void *data, u16 data_len)
10649 struct mgmt_cp_get_adv_size_info *cp = data;
10650 struct mgmt_rp_get_adv_size_info rp;
10651 u32 flags, supported_flags;
10654 bt_dev_dbg(hdev, "sock %p", sk);
10656 if (!lmp_le_capable(hdev))
10657 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10658 MGMT_STATUS_REJECTED);
10660 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10661 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10662 MGMT_STATUS_INVALID_PARAMS);
10664 flags = __le32_to_cpu(cp->flags);
10666 /* The current implementation only supports a subset of the specified
10669 supported_flags = get_supported_adv_flags(hdev);
10670 if (flags & ~supported_flags)
10671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10672 MGMT_STATUS_INVALID_PARAMS);
10674 rp.instance = cp->instance;
10675 rp.flags = cp->flags;
10676 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10677 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10679 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10680 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10685 static const struct hci_mgmt_handler mgmt_handlers[] = {
10686 { NULL }, /* 0x0000 (no command) */
10687 { read_version, MGMT_READ_VERSION_SIZE,
10689 HCI_MGMT_UNTRUSTED },
10690 { read_commands, MGMT_READ_COMMANDS_SIZE,
10692 HCI_MGMT_UNTRUSTED },
10693 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
10695 HCI_MGMT_UNTRUSTED },
10696 { read_controller_info, MGMT_READ_INFO_SIZE,
10697 HCI_MGMT_UNTRUSTED },
10698 { set_powered, MGMT_SETTING_SIZE },
10699 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
10700 { set_connectable, MGMT_SETTING_SIZE },
10701 { set_fast_connectable, MGMT_SETTING_SIZE },
10702 { set_bondable, MGMT_SETTING_SIZE },
10703 { set_link_security, MGMT_SETTING_SIZE },
10704 { set_ssp, MGMT_SETTING_SIZE },
10705 { set_hs, MGMT_SETTING_SIZE },
10706 { set_le, MGMT_SETTING_SIZE },
10707 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
10708 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
10709 { add_uuid, MGMT_ADD_UUID_SIZE },
10710 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
10711 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
10712 HCI_MGMT_VAR_LEN },
10713 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
10714 HCI_MGMT_VAR_LEN },
10715 { disconnect, MGMT_DISCONNECT_SIZE },
10716 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
10717 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
10718 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
10719 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
10720 { pair_device, MGMT_PAIR_DEVICE_SIZE },
10721 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
10722 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
10723 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
10724 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
10725 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
10726 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
10727 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
10728 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
10729 HCI_MGMT_VAR_LEN },
10730 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
10731 { start_discovery, MGMT_START_DISCOVERY_SIZE },
10732 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
10733 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
10734 { block_device, MGMT_BLOCK_DEVICE_SIZE },
10735 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
10736 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
10737 { set_advertising, MGMT_SETTING_SIZE },
10738 { set_bredr, MGMT_SETTING_SIZE },
10739 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
10740 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
10741 { set_secure_conn, MGMT_SETTING_SIZE },
10742 { set_debug_keys, MGMT_SETTING_SIZE },
10743 { set_privacy, MGMT_SET_PRIVACY_SIZE },
10744 { load_irks, MGMT_LOAD_IRKS_SIZE,
10745 HCI_MGMT_VAR_LEN },
10746 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
10747 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
10748 { add_device, MGMT_ADD_DEVICE_SIZE },
10749 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
10750 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
10751 HCI_MGMT_VAR_LEN },
10752 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
10754 HCI_MGMT_UNTRUSTED },
10755 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
10756 HCI_MGMT_UNCONFIGURED |
10757 HCI_MGMT_UNTRUSTED },
10758 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
10759 HCI_MGMT_UNCONFIGURED },
10760 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
10761 HCI_MGMT_UNCONFIGURED },
10762 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
10763 HCI_MGMT_VAR_LEN },
10764 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
10765 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
10767 HCI_MGMT_UNTRUSTED },
10768 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
10769 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
10770 HCI_MGMT_VAR_LEN },
10771 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
10772 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
10773 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
10774 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
10775 HCI_MGMT_UNTRUSTED },
10776 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
10777 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
10778 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
10779 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
10780 HCI_MGMT_VAR_LEN },
10781 { set_wideband_speech, MGMT_SETTING_SIZE },
10782 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
10783 HCI_MGMT_UNTRUSTED },
10784 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
10785 HCI_MGMT_UNTRUSTED |
10786 HCI_MGMT_HDEV_OPTIONAL },
10787 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
10789 HCI_MGMT_HDEV_OPTIONAL },
10790 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
10791 HCI_MGMT_UNTRUSTED },
10792 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
10793 HCI_MGMT_VAR_LEN },
10794 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
10795 HCI_MGMT_UNTRUSTED },
10796 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
10797 HCI_MGMT_VAR_LEN },
10798 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
10799 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
10800 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
10801 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
10802 HCI_MGMT_VAR_LEN },
10803 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
10804 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
10805 HCI_MGMT_VAR_LEN },
10806 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
10807 HCI_MGMT_VAR_LEN },
10808 { add_adv_patterns_monitor_rssi,
10809 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
10810 HCI_MGMT_VAR_LEN },
10814 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
10815 { NULL }, /* 0x0000 (no command) */
10816 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
10817 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
10818 HCI_MGMT_VAR_LEN },
10819 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
10820 HCI_MGMT_VAR_LEN },
10821 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
10822 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
10823 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
10824 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
10825 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
10826 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
10827 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
10828 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
10829 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
10830 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
10831 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
10832 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
10833 { set_voice_setting, MGMT_SET_VOICE_SETTING_SIZE },
10834 { get_adv_tx_power, MGMT_GET_ADV_TX_POWER_SIZE },
10835 { enable_bt_6lowpan, MGMT_ENABLE_BT_6LOWPAN_SIZE },
10836 { connect_bt_6lowpan, MGMT_CONNECT_6LOWPAN_SIZE },
10837 { disconnect_bt_6lowpan, MGMT_DISCONNECT_6LOWPAN_SIZE },
10838 { read_maximum_le_data_length,
10839 MGMT_LE_READ_MAXIMUM_DATA_LENGTH_SIZE },
10840 { write_host_suggested_le_data_length,
10841 MGMT_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH_SIZE },
10842 { read_host_suggested_data_length,
10843 MGMT_LE_READ_HOST_SUGGESTED_DATA_LENGTH_SIZE },
10847 void mgmt_index_added(struct hci_dev *hdev)
10849 struct mgmt_ev_ext_index ev;
10851 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10854 switch (hdev->dev_type) {
10856 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10857 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
10858 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10861 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
10862 HCI_MGMT_INDEX_EVENTS);
10873 ev.bus = hdev->bus;
10875 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
10876 HCI_MGMT_EXT_INDEX_EVENTS);
10879 void mgmt_index_removed(struct hci_dev *hdev)
10881 struct mgmt_ev_ext_index ev;
10882 u8 status = MGMT_STATUS_INVALID_INDEX;
10884 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10887 switch (hdev->dev_type) {
10889 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10891 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10892 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
10893 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10896 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
10897 HCI_MGMT_INDEX_EVENTS);
10908 ev.bus = hdev->bus;
10910 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
10911 HCI_MGMT_EXT_INDEX_EVENTS);
10914 /* This function requires the caller holds hdev->lock */
10915 static void restart_le_actions(struct hci_dev *hdev)
10917 struct hci_conn_params *p;
10919 list_for_each_entry(p, &hdev->le_conn_params, list) {
10920 /* Needed for AUTO_OFF case where might not "really"
10921 * have been powered off.
10923 list_del_init(&p->action);
10925 switch (p->auto_connect) {
10926 case HCI_AUTO_CONN_DIRECT:
10927 case HCI_AUTO_CONN_ALWAYS:
10928 list_add(&p->action, &hdev->pend_le_conns);
10930 case HCI_AUTO_CONN_REPORT:
10931 list_add(&p->action, &hdev->pend_le_reports);
10939 void mgmt_power_on(struct hci_dev *hdev, int err)
10941 struct cmd_lookup match = { NULL, hdev };
10943 bt_dev_dbg(hdev, "err %d", err);
10945 hci_dev_lock(hdev);
10948 restart_le_actions(hdev);
10949 hci_update_background_scan(hdev);
10952 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10954 new_settings(hdev, match.sk);
10957 sock_put(match.sk);
10959 hci_dev_unlock(hdev);
10962 void __mgmt_power_off(struct hci_dev *hdev)
10964 struct cmd_lookup match = { NULL, hdev };
10965 u8 status, zero_cod[] = { 0, 0, 0 };
10967 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10969 /* If the power off is because of hdev unregistration let
10970 * use the appropriate INVALID_INDEX status. Otherwise use
10971 * NOT_POWERED. We cover both scenarios here since later in
10972 * mgmt_index_removed() any hci_conn callbacks will have already
10973 * been triggered, potentially causing misleading DISCONNECTED
10974 * status responses.
10976 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
10977 status = MGMT_STATUS_INVALID_INDEX;
10979 status = MGMT_STATUS_NOT_POWERED;
10981 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10983 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
10984 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
10985 zero_cod, sizeof(zero_cod),
10986 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10987 ext_info_changed(hdev, NULL);
10990 new_settings(hdev, match.sk);
10993 sock_put(match.sk);
10996 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
10998 struct mgmt_pending_cmd *cmd;
11001 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11005 if (err == -ERFKILL)
11006 status = MGMT_STATUS_RFKILLED;
11008 status = MGMT_STATUS_FAILED;
11010 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
11012 mgmt_pending_remove(cmd);
11015 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
11018 struct mgmt_ev_new_link_key ev;
11020 memset(&ev, 0, sizeof(ev));
11022 ev.store_hint = persistent;
11023 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11024 ev.key.addr.type = BDADDR_BREDR;
11025 ev.key.type = key->type;
11026 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
11027 ev.key.pin_len = key->pin_len;
11029 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
11032 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
11034 switch (ltk->type) {
11036 case SMP_LTK_RESPONDER:
11037 if (ltk->authenticated)
11038 return MGMT_LTK_AUTHENTICATED;
11039 return MGMT_LTK_UNAUTHENTICATED;
11041 if (ltk->authenticated)
11042 return MGMT_LTK_P256_AUTH;
11043 return MGMT_LTK_P256_UNAUTH;
11044 case SMP_LTK_P256_DEBUG:
11045 return MGMT_LTK_P256_DEBUG;
11048 return MGMT_LTK_UNAUTHENTICATED;
11051 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
11053 struct mgmt_ev_new_long_term_key ev;
11055 memset(&ev, 0, sizeof(ev));
11057 /* Devices using resolvable or non-resolvable random addresses
11058 * without providing an identity resolving key don't require
11059 * to store long term keys. Their addresses will change the
11060 * next time around.
11062 * Only when a remote device provides an identity address
11063 * make sure the long term key is stored. If the remote
11064 * identity is known, the long term keys are internally
11065 * mapped to the identity address. So allow static random
11066 * and public addresses here.
11068 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11069 (key->bdaddr.b[5] & 0xc0) != 0xc0)
11070 ev.store_hint = 0x00;
11072 ev.store_hint = persistent;
11074 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11075 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
11076 ev.key.type = mgmt_ltk_type(key);
11077 ev.key.enc_size = key->enc_size;
11078 ev.key.ediv = key->ediv;
11079 ev.key.rand = key->rand;
11081 if (key->type == SMP_LTK)
11082 ev.key.initiator = 1;
11084 /* Make sure we copy only the significant bytes based on the
11085 * encryption key size, and set the rest of the value to zeroes.
11087 memcpy(ev.key.val, key->val, key->enc_size);
11088 memset(ev.key.val + key->enc_size, 0,
11089 sizeof(ev.key.val) - key->enc_size);
11091 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
11094 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
11096 struct mgmt_ev_new_irk ev;
11098 memset(&ev, 0, sizeof(ev));
11100 ev.store_hint = persistent;
11102 bacpy(&ev.rpa, &irk->rpa);
11103 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
11104 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
11105 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
11107 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
11110 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
11113 struct mgmt_ev_new_csrk ev;
11115 memset(&ev, 0, sizeof(ev));
11117 /* Devices using resolvable or non-resolvable random addresses
11118 * without providing an identity resolving key don't require
11119 * to store signature resolving keys. Their addresses will change
11120 * the next time around.
11122 * Only when a remote device provides an identity address
11123 * make sure the signature resolving key is stored. So allow
11124 * static random and public addresses here.
11126 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11127 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
11128 ev.store_hint = 0x00;
11130 ev.store_hint = persistent;
11132 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
11133 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
11134 ev.key.type = csrk->type;
11135 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
11137 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
11140 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
11141 u8 bdaddr_type, u8 store_hint, u16 min_interval,
11142 u16 max_interval, u16 latency, u16 timeout)
11144 struct mgmt_ev_new_conn_param ev;
11146 if (!hci_is_identity_address(bdaddr, bdaddr_type))
11149 memset(&ev, 0, sizeof(ev));
11150 bacpy(&ev.addr.bdaddr, bdaddr);
11151 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
11152 ev.store_hint = store_hint;
11153 ev.min_interval = cpu_to_le16(min_interval);
11154 ev.max_interval = cpu_to_le16(max_interval);
11155 ev.latency = cpu_to_le16(latency);
11156 ev.timeout = cpu_to_le16(timeout);
11158 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
11161 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
11162 u8 *name, u8 name_len)
11165 struct mgmt_ev_device_connected *ev = (void *) buf;
11169 bacpy(&ev->addr.bdaddr, &conn->dst);
11170 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11173 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
11175 ev->flags = __cpu_to_le32(flags);
11177 /* We must ensure that the EIR Data fields are ordered and
11178 * unique. Keep it simple for now and avoid the problem by not
11179 * adding any BR/EDR data to the LE adv.
11181 if (conn->le_adv_data_len > 0) {
11182 memcpy(&ev->eir[eir_len],
11183 conn->le_adv_data, conn->le_adv_data_len);
11184 eir_len = conn->le_adv_data_len;
11187 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
11190 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
11191 eir_len = eir_append_data(ev->eir, eir_len,
11193 conn->dev_class, 3);
11196 ev->eir_len = cpu_to_le16(eir_len);
11198 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
11199 sizeof(*ev) + eir_len, NULL);
11202 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
11204 struct sock **sk = data;
11206 cmd->cmd_complete(cmd, 0);
11211 mgmt_pending_remove(cmd);
11214 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
11216 struct hci_dev *hdev = data;
11217 struct mgmt_cp_unpair_device *cp = cmd->param;
11219 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
11221 cmd->cmd_complete(cmd, 0);
11222 mgmt_pending_remove(cmd);
11225 bool mgmt_powering_down(struct hci_dev *hdev)
11227 struct mgmt_pending_cmd *cmd;
11228 struct mgmt_mode *cp;
11230 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11241 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
11242 u8 link_type, u8 addr_type, u8 reason,
11243 bool mgmt_connected)
11245 struct mgmt_ev_device_disconnected ev;
11246 struct sock *sk = NULL;
11248 /* The connection is still in hci_conn_hash so test for 1
11249 * instead of 0 to know if this is the last one.
11251 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11252 cancel_delayed_work(&hdev->power_off);
11253 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11256 if (!mgmt_connected)
11259 if (link_type != ACL_LINK && link_type != LE_LINK)
11262 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
11264 bacpy(&ev.addr.bdaddr, bdaddr);
11265 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11266 ev.reason = reason;
11268 /* Report disconnects due to suspend */
11269 if (hdev->suspended)
11270 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
11272 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
11277 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11281 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
11282 u8 link_type, u8 addr_type, u8 status)
11284 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
11285 struct mgmt_cp_disconnect *cp;
11286 struct mgmt_pending_cmd *cmd;
11288 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11291 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
11297 if (bacmp(bdaddr, &cp->addr.bdaddr))
11300 if (cp->addr.type != bdaddr_type)
11303 cmd->cmd_complete(cmd, mgmt_status(status));
11304 mgmt_pending_remove(cmd);
11307 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11308 u8 addr_type, u8 status)
11310 struct mgmt_ev_connect_failed ev;
11312 /* The connection is still in hci_conn_hash so test for 1
11313 * instead of 0 to know if this is the last one.
11315 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11316 cancel_delayed_work(&hdev->power_off);
11317 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11320 bacpy(&ev.addr.bdaddr, bdaddr);
11321 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11322 ev.status = mgmt_status(status);
11324 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
11327 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
11329 struct mgmt_ev_pin_code_request ev;
11331 bacpy(&ev.addr.bdaddr, bdaddr);
11332 ev.addr.type = BDADDR_BREDR;
11333 ev.secure = secure;
11335 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
11338 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11341 struct mgmt_pending_cmd *cmd;
11343 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
11347 cmd->cmd_complete(cmd, mgmt_status(status));
11348 mgmt_pending_remove(cmd);
11351 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11354 struct mgmt_pending_cmd *cmd;
11356 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
11360 cmd->cmd_complete(cmd, mgmt_status(status));
11361 mgmt_pending_remove(cmd);
11364 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11365 u8 link_type, u8 addr_type, u32 value,
11368 struct mgmt_ev_user_confirm_request ev;
11370 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11372 bacpy(&ev.addr.bdaddr, bdaddr);
11373 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11374 ev.confirm_hint = confirm_hint;
11375 ev.value = cpu_to_le32(value);
11377 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
11381 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11382 u8 link_type, u8 addr_type)
11384 struct mgmt_ev_user_passkey_request ev;
11386 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11388 bacpy(&ev.addr.bdaddr, bdaddr);
11389 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11391 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
11395 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11396 u8 link_type, u8 addr_type, u8 status,
11399 struct mgmt_pending_cmd *cmd;
11401 cmd = pending_find(opcode, hdev);
11405 cmd->cmd_complete(cmd, mgmt_status(status));
11406 mgmt_pending_remove(cmd);
11411 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11412 u8 link_type, u8 addr_type, u8 status)
11414 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11415 status, MGMT_OP_USER_CONFIRM_REPLY);
11418 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11419 u8 link_type, u8 addr_type, u8 status)
11421 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11423 MGMT_OP_USER_CONFIRM_NEG_REPLY);
11426 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11427 u8 link_type, u8 addr_type, u8 status)
11429 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11430 status, MGMT_OP_USER_PASSKEY_REPLY);
11433 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11434 u8 link_type, u8 addr_type, u8 status)
11436 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11438 MGMT_OP_USER_PASSKEY_NEG_REPLY);
11441 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
11442 u8 link_type, u8 addr_type, u32 passkey,
11445 struct mgmt_ev_passkey_notify ev;
11447 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11449 bacpy(&ev.addr.bdaddr, bdaddr);
11450 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11451 ev.passkey = __cpu_to_le32(passkey);
11452 ev.entered = entered;
11454 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
11457 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
11459 struct mgmt_ev_auth_failed ev;
11460 struct mgmt_pending_cmd *cmd;
11461 u8 status = mgmt_status(hci_status);
11463 bacpy(&ev.addr.bdaddr, &conn->dst);
11464 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11465 ev.status = status;
11467 cmd = find_pairing(conn);
11469 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
11470 cmd ? cmd->sk : NULL);
11473 cmd->cmd_complete(cmd, status);
11474 mgmt_pending_remove(cmd);
11478 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
11480 struct cmd_lookup match = { NULL, hdev };
11484 u8 mgmt_err = mgmt_status(status);
11485 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
11486 cmd_status_rsp, &mgmt_err);
11490 if (test_bit(HCI_AUTH, &hdev->flags))
11491 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
11493 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
11495 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
11499 new_settings(hdev, match.sk);
11502 sock_put(match.sk);
11505 static void clear_eir(struct hci_request *req)
11507 struct hci_dev *hdev = req->hdev;
11508 struct hci_cp_write_eir cp;
11510 if (!lmp_ext_inq_capable(hdev))
11513 memset(hdev->eir, 0, sizeof(hdev->eir));
11515 memset(&cp, 0, sizeof(cp));
11517 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
11520 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
11522 struct cmd_lookup match = { NULL, hdev };
11523 struct hci_request req;
11524 bool changed = false;
11527 u8 mgmt_err = mgmt_status(status);
11529 if (enable && hci_dev_test_and_clear_flag(hdev,
11530 HCI_SSP_ENABLED)) {
11531 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
11532 new_settings(hdev, NULL);
11535 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
11541 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
11543 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
11545 changed = hci_dev_test_and_clear_flag(hdev,
11548 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
11551 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
11554 new_settings(hdev, match.sk);
11557 sock_put(match.sk);
11559 hci_req_init(&req, hdev);
11561 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
11562 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
11563 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
11564 sizeof(enable), &enable);
11565 __hci_req_update_eir(&req);
11570 hci_req_run(&req, NULL);
11573 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
11575 struct cmd_lookup *match = data;
11577 if (match->sk == NULL) {
11578 match->sk = cmd->sk;
11579 sock_hold(match->sk);
11583 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
11586 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
11588 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
11589 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
11590 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
11593 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
11594 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11595 ext_info_changed(hdev, NULL);
11599 sock_put(match.sk);
11602 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
11604 struct mgmt_cp_set_local_name ev;
11605 struct mgmt_pending_cmd *cmd;
11610 memset(&ev, 0, sizeof(ev));
11611 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
11612 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
11614 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
11616 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
11618 /* If this is a HCI command related to powering on the
11619 * HCI dev don't send any mgmt signals.
11621 if (pending_find(MGMT_OP_SET_POWERED, hdev))
11625 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
11626 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
11627 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
11630 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
11634 for (i = 0; i < uuid_count; i++) {
11635 if (!memcmp(uuid, uuids[i], 16))
11642 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
11646 while (parsed < eir_len) {
11647 u8 field_len = eir[0];
11651 if (field_len == 0)
11654 if (eir_len - parsed < field_len + 1)
11658 case EIR_UUID16_ALL:
11659 case EIR_UUID16_SOME:
11660 for (i = 0; i + 3 <= field_len; i += 2) {
11661 memcpy(uuid, bluetooth_base_uuid, 16);
11662 uuid[13] = eir[i + 3];
11663 uuid[12] = eir[i + 2];
11664 if (has_uuid(uuid, uuid_count, uuids))
11668 case EIR_UUID32_ALL:
11669 case EIR_UUID32_SOME:
11670 for (i = 0; i + 5 <= field_len; i += 4) {
11671 memcpy(uuid, bluetooth_base_uuid, 16);
11672 uuid[15] = eir[i + 5];
11673 uuid[14] = eir[i + 4];
11674 uuid[13] = eir[i + 3];
11675 uuid[12] = eir[i + 2];
11676 if (has_uuid(uuid, uuid_count, uuids))
11680 case EIR_UUID128_ALL:
11681 case EIR_UUID128_SOME:
11682 for (i = 0; i + 17 <= field_len; i += 16) {
11683 memcpy(uuid, eir + i + 2, 16);
11684 if (has_uuid(uuid, uuid_count, uuids))
11690 parsed += field_len + 1;
11691 eir += field_len + 1;
11697 static void restart_le_scan(struct hci_dev *hdev)
11699 /* If controller is not scanning we are done. */
11700 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
11703 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
11704 hdev->discovery.scan_start +
11705 hdev->discovery.scan_duration))
11708 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
11709 DISCOV_LE_RESTART_DELAY);
11712 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
11713 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11715 /* If a RSSI threshold has been specified, and
11716 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
11717 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
11718 * is set, let it through for further processing, as we might need to
11719 * restart the scan.
11721 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
11722 * the results are also dropped.
11724 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11725 (rssi == HCI_RSSI_INVALID ||
11726 (rssi < hdev->discovery.rssi &&
11727 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
11730 if (hdev->discovery.uuid_count != 0) {
11731 /* If a list of UUIDs is provided in filter, results with no
11732 * matching UUID should be dropped.
11734 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
11735 hdev->discovery.uuids) &&
11736 !eir_has_uuids(scan_rsp, scan_rsp_len,
11737 hdev->discovery.uuid_count,
11738 hdev->discovery.uuids))
11742 /* If duplicate filtering does not report RSSI changes, then restart
11743 * scanning to ensure updated result with updated RSSI values.
11745 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
11746 restart_le_scan(hdev);
11748 /* Validate RSSI value against the RSSI threshold once more. */
11749 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11750 rssi < hdev->discovery.rssi)
11757 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11758 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
11759 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11762 struct mgmt_ev_device_found *ev = (void *)buf;
11765 /* Don't send events for a non-kernel initiated discovery. With
11766 * LE one exception is if we have pend_le_reports > 0 in which
11767 * case we're doing passive scanning and want these events.
11769 if (!hci_discovery_active(hdev)) {
11770 if (link_type == ACL_LINK)
11772 if (link_type == LE_LINK &&
11773 list_empty(&hdev->pend_le_reports) &&
11774 !hci_is_adv_monitoring(hdev)) {
11779 if (hdev->discovery.result_filtering) {
11780 /* We are using service discovery */
11781 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
11786 if (hdev->discovery.limited) {
11787 /* Check for limited discoverable bit */
11789 if (!(dev_class[1] & 0x20))
11792 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
11793 if (!flags || !(flags[0] & LE_AD_LIMITED))
11798 /* Make sure that the buffer is big enough. The 5 extra bytes
11799 * are for the potential CoD field.
11801 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
11804 memset(buf, 0, sizeof(buf));
11806 /* In case of device discovery with BR/EDR devices (pre 1.2), the
11807 * RSSI value was reported as 0 when not available. This behavior
11808 * is kept when using device discovery. This is required for full
11809 * backwards compatibility with the API.
11811 * However when using service discovery, the value 127 will be
11812 * returned when the RSSI is not available.
11814 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
11815 link_type == ACL_LINK)
11818 bacpy(&ev->addr.bdaddr, bdaddr);
11819 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11821 ev->flags = cpu_to_le32(flags);
11824 /* Copy EIR or advertising data into event */
11825 memcpy(ev->eir, eir, eir_len);
11827 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11829 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11832 if (scan_rsp_len > 0)
11833 /* Append scan response data to event */
11834 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
11836 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
11837 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
11839 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
11842 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11843 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
11845 struct mgmt_ev_device_found *ev;
11846 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
11849 ev = (struct mgmt_ev_device_found *) buf;
11851 memset(buf, 0, sizeof(buf));
11853 bacpy(&ev->addr.bdaddr, bdaddr);
11854 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11857 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
11860 ev->eir_len = cpu_to_le16(eir_len);
11862 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
11865 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
11867 struct mgmt_ev_discovering ev;
11869 bt_dev_dbg(hdev, "discovering %u", discovering);
11871 memset(&ev, 0, sizeof(ev));
11872 ev.type = hdev->discovery.type;
11873 ev.discovering = discovering;
11875 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
11878 void mgmt_suspending(struct hci_dev *hdev, u8 state)
11880 struct mgmt_ev_controller_suspend ev;
11882 ev.suspend_state = state;
11883 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
11886 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
11889 struct mgmt_ev_controller_resume ev;
11891 ev.wake_reason = reason;
11893 bacpy(&ev.addr.bdaddr, bdaddr);
11894 ev.addr.type = addr_type;
11896 memset(&ev.addr, 0, sizeof(ev.addr));
11899 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
11902 static struct hci_mgmt_chan chan = {
11903 .channel = HCI_CHANNEL_CONTROL,
11904 .handler_count = ARRAY_SIZE(mgmt_handlers),
11905 .handlers = mgmt_handlers,
11907 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
11908 .tizen_handlers = tizen_mgmt_handlers,
11910 .hdev_init = mgmt_init_hdev,
11913 int mgmt_init(void)
11915 return hci_mgmt_chan_register(&chan);
11918 void mgmt_exit(void)
11920 hci_mgmt_chan_unregister(&chan);