2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
39 #include "hci_request.h"
41 #include "mgmt_util.h"
42 #include "mgmt_config.h"
45 #define MGMT_VERSION 1
46 #define MGMT_REVISION 21
48 static const u16 mgmt_commands[] = {
49 MGMT_OP_READ_INDEX_LIST,
52 MGMT_OP_SET_DISCOVERABLE,
53 MGMT_OP_SET_CONNECTABLE,
54 MGMT_OP_SET_FAST_CONNECTABLE,
56 MGMT_OP_SET_LINK_SECURITY,
60 MGMT_OP_SET_DEV_CLASS,
61 MGMT_OP_SET_LOCAL_NAME,
64 MGMT_OP_LOAD_LINK_KEYS,
65 MGMT_OP_LOAD_LONG_TERM_KEYS,
67 MGMT_OP_GET_CONNECTIONS,
68 MGMT_OP_PIN_CODE_REPLY,
69 MGMT_OP_PIN_CODE_NEG_REPLY,
70 MGMT_OP_SET_IO_CAPABILITY,
72 MGMT_OP_CANCEL_PAIR_DEVICE,
73 MGMT_OP_UNPAIR_DEVICE,
74 MGMT_OP_USER_CONFIRM_REPLY,
75 MGMT_OP_USER_CONFIRM_NEG_REPLY,
76 MGMT_OP_USER_PASSKEY_REPLY,
77 MGMT_OP_USER_PASSKEY_NEG_REPLY,
78 MGMT_OP_READ_LOCAL_OOB_DATA,
79 MGMT_OP_ADD_REMOTE_OOB_DATA,
80 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
81 MGMT_OP_START_DISCOVERY,
82 MGMT_OP_STOP_DISCOVERY,
85 MGMT_OP_UNBLOCK_DEVICE,
86 MGMT_OP_SET_DEVICE_ID,
87 MGMT_OP_SET_ADVERTISING,
89 MGMT_OP_SET_STATIC_ADDRESS,
90 MGMT_OP_SET_SCAN_PARAMS,
91 MGMT_OP_SET_SECURE_CONN,
92 MGMT_OP_SET_DEBUG_KEYS,
95 MGMT_OP_GET_CONN_INFO,
96 MGMT_OP_GET_CLOCK_INFO,
98 MGMT_OP_REMOVE_DEVICE,
99 MGMT_OP_LOAD_CONN_PARAM,
100 MGMT_OP_READ_UNCONF_INDEX_LIST,
101 MGMT_OP_READ_CONFIG_INFO,
102 MGMT_OP_SET_EXTERNAL_CONFIG,
103 MGMT_OP_SET_PUBLIC_ADDRESS,
104 MGMT_OP_START_SERVICE_DISCOVERY,
105 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
106 MGMT_OP_READ_EXT_INDEX_LIST,
107 MGMT_OP_READ_ADV_FEATURES,
108 MGMT_OP_ADD_ADVERTISING,
109 MGMT_OP_REMOVE_ADVERTISING,
110 MGMT_OP_GET_ADV_SIZE_INFO,
111 MGMT_OP_START_LIMITED_DISCOVERY,
112 MGMT_OP_READ_EXT_INFO,
113 MGMT_OP_SET_APPEARANCE,
114 MGMT_OP_GET_PHY_CONFIGURATION,
115 MGMT_OP_SET_PHY_CONFIGURATION,
116 MGMT_OP_SET_BLOCKED_KEYS,
117 MGMT_OP_SET_WIDEBAND_SPEECH,
118 MGMT_OP_READ_CONTROLLER_CAP,
119 MGMT_OP_READ_EXP_FEATURES_INFO,
120 MGMT_OP_SET_EXP_FEATURE,
121 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
122 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
123 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
124 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
125 MGMT_OP_GET_DEVICE_FLAGS,
126 MGMT_OP_SET_DEVICE_FLAGS,
127 MGMT_OP_READ_ADV_MONITOR_FEATURES,
128 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
129 MGMT_OP_REMOVE_ADV_MONITOR,
130 MGMT_OP_ADD_EXT_ADV_PARAMS,
131 MGMT_OP_ADD_EXT_ADV_DATA,
132 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
135 static const u16 mgmt_events[] = {
136 MGMT_EV_CONTROLLER_ERROR,
138 MGMT_EV_INDEX_REMOVED,
139 MGMT_EV_NEW_SETTINGS,
140 MGMT_EV_CLASS_OF_DEV_CHANGED,
141 MGMT_EV_LOCAL_NAME_CHANGED,
142 MGMT_EV_NEW_LINK_KEY,
143 MGMT_EV_NEW_LONG_TERM_KEY,
144 MGMT_EV_DEVICE_CONNECTED,
145 MGMT_EV_DEVICE_DISCONNECTED,
146 MGMT_EV_CONNECT_FAILED,
147 MGMT_EV_PIN_CODE_REQUEST,
148 MGMT_EV_USER_CONFIRM_REQUEST,
149 MGMT_EV_USER_PASSKEY_REQUEST,
151 MGMT_EV_DEVICE_FOUND,
153 MGMT_EV_DEVICE_BLOCKED,
154 MGMT_EV_DEVICE_UNBLOCKED,
155 MGMT_EV_DEVICE_UNPAIRED,
156 MGMT_EV_PASSKEY_NOTIFY,
159 MGMT_EV_DEVICE_ADDED,
160 MGMT_EV_DEVICE_REMOVED,
161 MGMT_EV_NEW_CONN_PARAM,
162 MGMT_EV_UNCONF_INDEX_ADDED,
163 MGMT_EV_UNCONF_INDEX_REMOVED,
164 MGMT_EV_NEW_CONFIG_OPTIONS,
165 MGMT_EV_EXT_INDEX_ADDED,
166 MGMT_EV_EXT_INDEX_REMOVED,
167 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
168 MGMT_EV_ADVERTISING_ADDED,
169 MGMT_EV_ADVERTISING_REMOVED,
170 MGMT_EV_EXT_INFO_CHANGED,
171 MGMT_EV_PHY_CONFIGURATION_CHANGED,
172 MGMT_EV_EXP_FEATURE_CHANGED,
173 MGMT_EV_DEVICE_FLAGS_CHANGED,
174 MGMT_EV_ADV_MONITOR_ADDED,
175 MGMT_EV_ADV_MONITOR_REMOVED,
176 MGMT_EV_CONTROLLER_SUSPEND,
177 MGMT_EV_CONTROLLER_RESUME,
180 static const u16 mgmt_untrusted_commands[] = {
181 MGMT_OP_READ_INDEX_LIST,
183 MGMT_OP_READ_UNCONF_INDEX_LIST,
184 MGMT_OP_READ_CONFIG_INFO,
185 MGMT_OP_READ_EXT_INDEX_LIST,
186 MGMT_OP_READ_EXT_INFO,
187 MGMT_OP_READ_CONTROLLER_CAP,
188 MGMT_OP_READ_EXP_FEATURES_INFO,
189 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
190 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
193 static const u16 mgmt_untrusted_events[] = {
195 MGMT_EV_INDEX_REMOVED,
196 MGMT_EV_NEW_SETTINGS,
197 MGMT_EV_CLASS_OF_DEV_CHANGED,
198 MGMT_EV_LOCAL_NAME_CHANGED,
199 MGMT_EV_UNCONF_INDEX_ADDED,
200 MGMT_EV_UNCONF_INDEX_REMOVED,
201 MGMT_EV_NEW_CONFIG_OPTIONS,
202 MGMT_EV_EXT_INDEX_ADDED,
203 MGMT_EV_EXT_INDEX_REMOVED,
204 MGMT_EV_EXT_INFO_CHANGED,
205 MGMT_EV_EXP_FEATURE_CHANGED,
208 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
210 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
211 "\x00\x00\x00\x00\x00\x00\x00\x00"
213 /* HCI to MGMT error code conversion table */
214 static const u8 mgmt_status_table[] = {
216 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
217 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
218 MGMT_STATUS_FAILED, /* Hardware Failure */
219 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
220 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
221 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
222 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
223 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
224 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
225 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
226 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
227 MGMT_STATUS_BUSY, /* Command Disallowed */
228 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
229 MGMT_STATUS_REJECTED, /* Rejected Security */
230 MGMT_STATUS_REJECTED, /* Rejected Personal */
231 MGMT_STATUS_TIMEOUT, /* Host Timeout */
232 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
233 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
234 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
235 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
236 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
237 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
238 MGMT_STATUS_BUSY, /* Repeated Attempts */
239 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
240 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
241 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
242 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
243 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
244 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
245 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
246 MGMT_STATUS_FAILED, /* Unspecified Error */
247 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
248 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
249 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
250 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
251 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
252 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
253 MGMT_STATUS_FAILED, /* Unit Link Key Used */
254 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
255 MGMT_STATUS_TIMEOUT, /* Instant Passed */
256 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
257 MGMT_STATUS_FAILED, /* Transaction Collision */
258 MGMT_STATUS_FAILED, /* Reserved for future use */
259 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
260 MGMT_STATUS_REJECTED, /* QoS Rejected */
261 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
262 MGMT_STATUS_REJECTED, /* Insufficient Security */
263 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
264 MGMT_STATUS_FAILED, /* Reserved for future use */
265 MGMT_STATUS_BUSY, /* Role Switch Pending */
266 MGMT_STATUS_FAILED, /* Reserved for future use */
267 MGMT_STATUS_FAILED, /* Slot Violation */
268 MGMT_STATUS_FAILED, /* Role Switch Failed */
269 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
270 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
271 MGMT_STATUS_BUSY, /* Host Busy Pairing */
272 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
273 MGMT_STATUS_BUSY, /* Controller Busy */
274 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
275 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
276 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
277 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
278 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
281 static u8 mgmt_status(u8 hci_status)
283 if (hci_status < ARRAY_SIZE(mgmt_status_table))
284 return mgmt_status_table[hci_status];
286 return MGMT_STATUS_FAILED;
289 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
292 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
296 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
297 u16 len, int flag, struct sock *skip_sk)
299 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
303 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
304 struct sock *skip_sk)
306 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
307 HCI_SOCK_TRUSTED, skip_sk);
310 static u8 le_addr_type(u8 mgmt_addr_type)
312 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
313 return ADDR_LE_DEV_PUBLIC;
315 return ADDR_LE_DEV_RANDOM;
318 void mgmt_fill_version_info(void *ver)
320 struct mgmt_rp_read_version *rp = ver;
322 rp->version = MGMT_VERSION;
323 rp->revision = cpu_to_le16(MGMT_REVISION);
326 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
329 struct mgmt_rp_read_version rp;
331 bt_dev_dbg(hdev, "sock %p", sk);
333 mgmt_fill_version_info(&rp);
335 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
339 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
342 struct mgmt_rp_read_commands *rp;
343 u16 num_commands, num_events;
347 bt_dev_dbg(hdev, "sock %p", sk);
349 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
350 num_commands = ARRAY_SIZE(mgmt_commands);
351 num_events = ARRAY_SIZE(mgmt_events);
353 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
354 num_events = ARRAY_SIZE(mgmt_untrusted_events);
357 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
359 rp = kmalloc(rp_size, GFP_KERNEL);
363 rp->num_commands = cpu_to_le16(num_commands);
364 rp->num_events = cpu_to_le16(num_events);
366 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
367 __le16 *opcode = rp->opcodes;
369 for (i = 0; i < num_commands; i++, opcode++)
370 put_unaligned_le16(mgmt_commands[i], opcode);
372 for (i = 0; i < num_events; i++, opcode++)
373 put_unaligned_le16(mgmt_events[i], opcode);
375 __le16 *opcode = rp->opcodes;
377 for (i = 0; i < num_commands; i++, opcode++)
378 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
380 for (i = 0; i < num_events; i++, opcode++)
381 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
384 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
391 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
394 struct mgmt_rp_read_index_list *rp;
400 bt_dev_dbg(hdev, "sock %p", sk);
402 read_lock(&hci_dev_list_lock);
405 list_for_each_entry(d, &hci_dev_list, list) {
406 if (d->dev_type == HCI_PRIMARY &&
407 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
411 rp_len = sizeof(*rp) + (2 * count);
412 rp = kmalloc(rp_len, GFP_ATOMIC);
414 read_unlock(&hci_dev_list_lock);
419 list_for_each_entry(d, &hci_dev_list, list) {
420 if (hci_dev_test_flag(d, HCI_SETUP) ||
421 hci_dev_test_flag(d, HCI_CONFIG) ||
422 hci_dev_test_flag(d, HCI_USER_CHANNEL))
425 /* Devices marked as raw-only are neither configured
426 * nor unconfigured controllers.
428 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
431 if (d->dev_type == HCI_PRIMARY &&
432 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
433 rp->index[count++] = cpu_to_le16(d->id);
434 bt_dev_dbg(hdev, "Added hci%u", d->id);
438 rp->num_controllers = cpu_to_le16(count);
439 rp_len = sizeof(*rp) + (2 * count);
441 read_unlock(&hci_dev_list_lock);
443 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
451 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
452 void *data, u16 data_len)
454 struct mgmt_rp_read_unconf_index_list *rp;
460 bt_dev_dbg(hdev, "sock %p", sk);
462 read_lock(&hci_dev_list_lock);
465 list_for_each_entry(d, &hci_dev_list, list) {
466 if (d->dev_type == HCI_PRIMARY &&
467 hci_dev_test_flag(d, HCI_UNCONFIGURED))
471 rp_len = sizeof(*rp) + (2 * count);
472 rp = kmalloc(rp_len, GFP_ATOMIC);
474 read_unlock(&hci_dev_list_lock);
479 list_for_each_entry(d, &hci_dev_list, list) {
480 if (hci_dev_test_flag(d, HCI_SETUP) ||
481 hci_dev_test_flag(d, HCI_CONFIG) ||
482 hci_dev_test_flag(d, HCI_USER_CHANNEL))
485 /* Devices marked as raw-only are neither configured
486 * nor unconfigured controllers.
488 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
491 if (d->dev_type == HCI_PRIMARY &&
492 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
493 rp->index[count++] = cpu_to_le16(d->id);
494 bt_dev_dbg(hdev, "Added hci%u", d->id);
498 rp->num_controllers = cpu_to_le16(count);
499 rp_len = sizeof(*rp) + (2 * count);
501 read_unlock(&hci_dev_list_lock);
503 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
504 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
511 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
512 void *data, u16 data_len)
514 struct mgmt_rp_read_ext_index_list *rp;
519 bt_dev_dbg(hdev, "sock %p", sk);
521 read_lock(&hci_dev_list_lock);
524 list_for_each_entry(d, &hci_dev_list, list) {
525 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
529 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
531 read_unlock(&hci_dev_list_lock);
536 list_for_each_entry(d, &hci_dev_list, list) {
537 if (hci_dev_test_flag(d, HCI_SETUP) ||
538 hci_dev_test_flag(d, HCI_CONFIG) ||
539 hci_dev_test_flag(d, HCI_USER_CHANNEL))
542 /* Devices marked as raw-only are neither configured
543 * nor unconfigured controllers.
545 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
548 if (d->dev_type == HCI_PRIMARY) {
549 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
550 rp->entry[count].type = 0x01;
552 rp->entry[count].type = 0x00;
553 } else if (d->dev_type == HCI_AMP) {
554 rp->entry[count].type = 0x02;
559 rp->entry[count].bus = d->bus;
560 rp->entry[count++].index = cpu_to_le16(d->id);
561 bt_dev_dbg(hdev, "Added hci%u", d->id);
564 rp->num_controllers = cpu_to_le16(count);
566 read_unlock(&hci_dev_list_lock);
568 /* If this command is called at least once, then all the
569 * default index and unconfigured index events are disabled
570 * and from now on only extended index events are used.
572 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
573 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
574 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
576 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
577 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
578 struct_size(rp, entry, count));
585 static bool is_configured(struct hci_dev *hdev)
587 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
588 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
591 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
592 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
593 !bacmp(&hdev->public_addr, BDADDR_ANY))
599 static __le32 get_missing_options(struct hci_dev *hdev)
603 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
604 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
605 options |= MGMT_OPTION_EXTERNAL_CONFIG;
607 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
608 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
609 !bacmp(&hdev->public_addr, BDADDR_ANY))
610 options |= MGMT_OPTION_PUBLIC_ADDRESS;
612 return cpu_to_le32(options);
615 static int new_options(struct hci_dev *hdev, struct sock *skip)
617 __le32 options = get_missing_options(hdev);
619 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
620 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
623 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
625 __le32 options = get_missing_options(hdev);
627 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
631 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
632 void *data, u16 data_len)
634 struct mgmt_rp_read_config_info rp;
637 bt_dev_dbg(hdev, "sock %p", sk);
641 memset(&rp, 0, sizeof(rp));
642 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
644 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
647 if (hdev->set_bdaddr)
648 options |= MGMT_OPTION_PUBLIC_ADDRESS;
650 rp.supported_options = cpu_to_le32(options);
651 rp.missing_options = get_missing_options(hdev);
653 hci_dev_unlock(hdev);
655 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
659 static u32 get_supported_phys(struct hci_dev *hdev)
661 u32 supported_phys = 0;
663 if (lmp_bredr_capable(hdev)) {
664 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
666 if (hdev->features[0][0] & LMP_3SLOT)
667 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
669 if (hdev->features[0][0] & LMP_5SLOT)
670 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
672 if (lmp_edr_2m_capable(hdev)) {
673 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
675 if (lmp_edr_3slot_capable(hdev))
676 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
678 if (lmp_edr_5slot_capable(hdev))
679 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
681 if (lmp_edr_3m_capable(hdev)) {
682 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
684 if (lmp_edr_3slot_capable(hdev))
685 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
687 if (lmp_edr_5slot_capable(hdev))
688 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
693 if (lmp_le_capable(hdev)) {
694 supported_phys |= MGMT_PHY_LE_1M_TX;
695 supported_phys |= MGMT_PHY_LE_1M_RX;
697 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
698 supported_phys |= MGMT_PHY_LE_2M_TX;
699 supported_phys |= MGMT_PHY_LE_2M_RX;
702 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
703 supported_phys |= MGMT_PHY_LE_CODED_TX;
704 supported_phys |= MGMT_PHY_LE_CODED_RX;
708 return supported_phys;
711 static u32 get_selected_phys(struct hci_dev *hdev)
713 u32 selected_phys = 0;
715 if (lmp_bredr_capable(hdev)) {
716 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
718 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
719 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
721 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
722 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
724 if (lmp_edr_2m_capable(hdev)) {
725 if (!(hdev->pkt_type & HCI_2DH1))
726 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
728 if (lmp_edr_3slot_capable(hdev) &&
729 !(hdev->pkt_type & HCI_2DH3))
730 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
732 if (lmp_edr_5slot_capable(hdev) &&
733 !(hdev->pkt_type & HCI_2DH5))
734 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
736 if (lmp_edr_3m_capable(hdev)) {
737 if (!(hdev->pkt_type & HCI_3DH1))
738 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
740 if (lmp_edr_3slot_capable(hdev) &&
741 !(hdev->pkt_type & HCI_3DH3))
742 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
744 if (lmp_edr_5slot_capable(hdev) &&
745 !(hdev->pkt_type & HCI_3DH5))
746 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
751 if (lmp_le_capable(hdev)) {
752 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
753 selected_phys |= MGMT_PHY_LE_1M_TX;
755 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
756 selected_phys |= MGMT_PHY_LE_1M_RX;
758 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
759 selected_phys |= MGMT_PHY_LE_2M_TX;
761 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
762 selected_phys |= MGMT_PHY_LE_2M_RX;
764 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
765 selected_phys |= MGMT_PHY_LE_CODED_TX;
767 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
768 selected_phys |= MGMT_PHY_LE_CODED_RX;
771 return selected_phys;
774 static u32 get_configurable_phys(struct hci_dev *hdev)
776 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
777 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
780 static u32 get_supported_settings(struct hci_dev *hdev)
784 settings |= MGMT_SETTING_POWERED;
785 settings |= MGMT_SETTING_BONDABLE;
786 settings |= MGMT_SETTING_DEBUG_KEYS;
787 settings |= MGMT_SETTING_CONNECTABLE;
788 settings |= MGMT_SETTING_DISCOVERABLE;
790 if (lmp_bredr_capable(hdev)) {
791 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
792 settings |= MGMT_SETTING_FAST_CONNECTABLE;
793 settings |= MGMT_SETTING_BREDR;
794 settings |= MGMT_SETTING_LINK_SECURITY;
796 if (lmp_ssp_capable(hdev)) {
797 settings |= MGMT_SETTING_SSP;
798 if (IS_ENABLED(CONFIG_BT_HS))
799 settings |= MGMT_SETTING_HS;
802 if (lmp_sc_capable(hdev))
803 settings |= MGMT_SETTING_SECURE_CONN;
805 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
807 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
810 if (lmp_le_capable(hdev)) {
811 settings |= MGMT_SETTING_LE;
812 settings |= MGMT_SETTING_SECURE_CONN;
813 settings |= MGMT_SETTING_PRIVACY;
814 settings |= MGMT_SETTING_STATIC_ADDRESS;
816 /* When the experimental feature for LL Privacy support is
817 * enabled, then advertising is no longer supported.
819 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
820 settings |= MGMT_SETTING_ADVERTISING;
823 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
825 settings |= MGMT_SETTING_CONFIGURATION;
827 settings |= MGMT_SETTING_PHY_CONFIGURATION;
832 static u32 get_current_settings(struct hci_dev *hdev)
836 if (hdev_is_powered(hdev))
837 settings |= MGMT_SETTING_POWERED;
839 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
840 settings |= MGMT_SETTING_CONNECTABLE;
842 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
843 settings |= MGMT_SETTING_FAST_CONNECTABLE;
845 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
846 settings |= MGMT_SETTING_DISCOVERABLE;
848 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
849 settings |= MGMT_SETTING_BONDABLE;
851 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
852 settings |= MGMT_SETTING_BREDR;
854 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
855 settings |= MGMT_SETTING_LE;
857 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
858 settings |= MGMT_SETTING_LINK_SECURITY;
860 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
861 settings |= MGMT_SETTING_SSP;
863 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
864 settings |= MGMT_SETTING_HS;
866 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
867 settings |= MGMT_SETTING_ADVERTISING;
869 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
870 settings |= MGMT_SETTING_SECURE_CONN;
872 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
873 settings |= MGMT_SETTING_DEBUG_KEYS;
875 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
876 settings |= MGMT_SETTING_PRIVACY;
878 /* The current setting for static address has two purposes. The
879 * first is to indicate if the static address will be used and
880 * the second is to indicate if it is actually set.
882 * This means if the static address is not configured, this flag
883 * will never be set. If the address is configured, then if the
884 * address is actually used decides if the flag is set or not.
886 * For single mode LE only controllers and dual-mode controllers
887 * with BR/EDR disabled, the existence of the static address will
890 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
891 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
892 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
893 if (bacmp(&hdev->static_addr, BDADDR_ANY))
894 settings |= MGMT_SETTING_STATIC_ADDRESS;
897 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
898 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
903 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
905 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
908 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
909 struct hci_dev *hdev,
912 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
915 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
917 struct mgmt_pending_cmd *cmd;
919 /* If there's a pending mgmt command the flags will not yet have
920 * their final values, so check for this first.
922 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
924 struct mgmt_mode *cp = cmd->param;
926 return LE_AD_GENERAL;
927 else if (cp->val == 0x02)
928 return LE_AD_LIMITED;
930 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
931 return LE_AD_LIMITED;
932 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
933 return LE_AD_GENERAL;
939 bool mgmt_get_connectable(struct hci_dev *hdev)
941 struct mgmt_pending_cmd *cmd;
943 /* If there's a pending mgmt command the flag will not yet have
944 * it's final value, so check for this first.
946 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
948 struct mgmt_mode *cp = cmd->param;
953 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
956 static void service_cache_off(struct work_struct *work)
958 struct hci_dev *hdev = container_of(work, struct hci_dev,
960 struct hci_request req;
962 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
965 hci_req_init(&req, hdev);
969 __hci_req_update_eir(&req);
970 __hci_req_update_class(&req);
972 hci_dev_unlock(hdev);
974 hci_req_run(&req, NULL);
977 static void rpa_expired(struct work_struct *work)
979 struct hci_dev *hdev = container_of(work, struct hci_dev,
981 struct hci_request req;
983 bt_dev_dbg(hdev, "");
985 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
987 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
990 /* The generation of a new RPA and programming it into the
991 * controller happens in the hci_req_enable_advertising()
994 hci_req_init(&req, hdev);
995 if (ext_adv_capable(hdev))
996 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
998 __hci_req_enable_advertising(&req);
999 hci_req_run(&req, NULL);
1002 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1004 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1007 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1008 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1010 /* Non-mgmt controlled devices get this bit set
1011 * implicitly so that pairing works for them, however
1012 * for mgmt we require user-space to explicitly enable
1015 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1018 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1019 void *data, u16 data_len)
1021 struct mgmt_rp_read_info rp;
1023 bt_dev_dbg(hdev, "sock %p", sk);
1027 memset(&rp, 0, sizeof(rp));
1029 bacpy(&rp.bdaddr, &hdev->bdaddr);
1031 rp.version = hdev->hci_ver;
1032 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1034 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1035 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1037 memcpy(rp.dev_class, hdev->dev_class, 3);
1039 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1040 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1042 hci_dev_unlock(hdev);
1044 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1048 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1053 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1054 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1055 hdev->dev_class, 3);
1057 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1058 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1061 name_len = strlen(hdev->dev_name);
1062 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1063 hdev->dev_name, name_len);
1065 name_len = strlen(hdev->short_name);
1066 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1067 hdev->short_name, name_len);
1072 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1073 void *data, u16 data_len)
1076 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1079 bt_dev_dbg(hdev, "sock %p", sk);
1081 memset(&buf, 0, sizeof(buf));
1085 bacpy(&rp->bdaddr, &hdev->bdaddr);
1087 rp->version = hdev->hci_ver;
1088 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1090 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1091 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1094 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1095 rp->eir_len = cpu_to_le16(eir_len);
1097 hci_dev_unlock(hdev);
1099 /* If this command is called at least once, then the events
1100 * for class of device and local name changes are disabled
1101 * and only the new extended controller information event
1104 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1105 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1106 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1108 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1109 sizeof(*rp) + eir_len);
1112 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1115 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1118 memset(buf, 0, sizeof(buf));
1120 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1121 ev->eir_len = cpu_to_le16(eir_len);
1123 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1124 sizeof(*ev) + eir_len,
1125 HCI_MGMT_EXT_INFO_EVENTS, skip);
1128 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1130 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1132 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1136 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1138 bt_dev_dbg(hdev, "status 0x%02x", status);
1140 if (hci_conn_count(hdev) == 0) {
1141 cancel_delayed_work(&hdev->power_off);
1142 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1146 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1148 struct mgmt_ev_advertising_added ev;
1150 ev.instance = instance;
1152 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1155 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1158 struct mgmt_ev_advertising_removed ev;
1160 ev.instance = instance;
1162 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1165 static void cancel_adv_timeout(struct hci_dev *hdev)
1167 if (hdev->adv_instance_timeout) {
1168 hdev->adv_instance_timeout = 0;
1169 cancel_delayed_work(&hdev->adv_instance_expire);
1173 static int clean_up_hci_state(struct hci_dev *hdev)
1175 struct hci_request req;
1176 struct hci_conn *conn;
1177 bool discov_stopped;
1180 hci_req_init(&req, hdev);
1182 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1183 test_bit(HCI_PSCAN, &hdev->flags)) {
1185 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1188 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1190 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1191 __hci_req_disable_advertising(&req);
1193 discov_stopped = hci_req_stop_discovery(&req);
1195 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1196 /* 0x15 == Terminated due to Power Off */
1197 __hci_abort_conn(&req, conn, 0x15);
1200 err = hci_req_run(&req, clean_up_hci_complete);
1201 if (!err && discov_stopped)
1202 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1207 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1210 struct mgmt_mode *cp = data;
1211 struct mgmt_pending_cmd *cmd;
1214 bt_dev_dbg(hdev, "sock %p", sk);
1216 if (cp->val != 0x00 && cp->val != 0x01)
1217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1218 MGMT_STATUS_INVALID_PARAMS);
1222 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1223 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1228 if (!!cp->val == hdev_is_powered(hdev)) {
1229 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1233 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1240 queue_work(hdev->req_workqueue, &hdev->power_on);
1243 /* Disconnect connections, stop scans, etc */
1244 err = clean_up_hci_state(hdev);
1246 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1247 HCI_POWER_OFF_TIMEOUT);
1249 /* ENODATA means there were no HCI commands queued */
1250 if (err == -ENODATA) {
1251 cancel_delayed_work(&hdev->power_off);
1252 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1258 hci_dev_unlock(hdev);
1262 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1264 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1266 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1267 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1270 int mgmt_new_settings(struct hci_dev *hdev)
1272 return new_settings(hdev, NULL);
1277 struct hci_dev *hdev;
1281 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1283 struct cmd_lookup *match = data;
1285 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1287 list_del(&cmd->list);
1289 if (match->sk == NULL) {
1290 match->sk = cmd->sk;
1291 sock_hold(match->sk);
1294 mgmt_pending_free(cmd);
1297 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1301 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1302 mgmt_pending_remove(cmd);
1305 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1307 if (cmd->cmd_complete) {
1310 cmd->cmd_complete(cmd, *status);
1311 mgmt_pending_remove(cmd);
1316 cmd_status_rsp(cmd, data);
1319 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1321 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1322 cmd->param, cmd->param_len);
1325 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1327 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1328 cmd->param, sizeof(struct mgmt_addr_info));
1331 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1333 if (!lmp_bredr_capable(hdev))
1334 return MGMT_STATUS_NOT_SUPPORTED;
1335 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1336 return MGMT_STATUS_REJECTED;
1338 return MGMT_STATUS_SUCCESS;
1341 static u8 mgmt_le_support(struct hci_dev *hdev)
1343 if (!lmp_le_capable(hdev))
1344 return MGMT_STATUS_NOT_SUPPORTED;
1345 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1346 return MGMT_STATUS_REJECTED;
1348 return MGMT_STATUS_SUCCESS;
1351 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1353 struct mgmt_pending_cmd *cmd;
1355 bt_dev_dbg(hdev, "status 0x%02x", status);
1359 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1364 u8 mgmt_err = mgmt_status(status);
1365 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1366 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1370 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1371 hdev->discov_timeout > 0) {
1372 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1373 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1376 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1377 new_settings(hdev, cmd->sk);
1380 mgmt_pending_remove(cmd);
1383 hci_dev_unlock(hdev);
1386 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1389 struct mgmt_cp_set_discoverable *cp = data;
1390 struct mgmt_pending_cmd *cmd;
1394 bt_dev_dbg(hdev, "sock %p", sk);
1396 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1397 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1398 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1399 MGMT_STATUS_REJECTED);
1401 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1402 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1403 MGMT_STATUS_INVALID_PARAMS);
1405 timeout = __le16_to_cpu(cp->timeout);
1407 /* Disabling discoverable requires that no timeout is set,
1408 * and enabling limited discoverable requires a timeout.
1410 if ((cp->val == 0x00 && timeout > 0) ||
1411 (cp->val == 0x02 && timeout == 0))
1412 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1413 MGMT_STATUS_INVALID_PARAMS);
1417 if (!hdev_is_powered(hdev) && timeout > 0) {
1418 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1419 MGMT_STATUS_NOT_POWERED);
1423 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1424 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1425 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1430 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1431 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1432 MGMT_STATUS_REJECTED);
1436 if (hdev->advertising_paused) {
1437 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1442 if (!hdev_is_powered(hdev)) {
1443 bool changed = false;
1445 /* Setting limited discoverable when powered off is
1446 * not a valid operation since it requires a timeout
1447 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1449 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1450 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1454 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1459 err = new_settings(hdev, sk);
1464 /* If the current mode is the same, then just update the timeout
1465 * value with the new value. And if only the timeout gets updated,
1466 * then no need for any HCI transactions.
1468 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1469 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1470 HCI_LIMITED_DISCOVERABLE)) {
1471 cancel_delayed_work(&hdev->discov_off);
1472 hdev->discov_timeout = timeout;
1474 if (cp->val && hdev->discov_timeout > 0) {
1475 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1476 queue_delayed_work(hdev->req_workqueue,
1477 &hdev->discov_off, to);
1480 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1484 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1490 /* Cancel any potential discoverable timeout that might be
1491 * still active and store new timeout value. The arming of
1492 * the timeout happens in the complete handler.
1494 cancel_delayed_work(&hdev->discov_off);
1495 hdev->discov_timeout = timeout;
1498 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1500 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1502 /* Limited discoverable mode */
1503 if (cp->val == 0x02)
1504 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1506 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1508 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1512 hci_dev_unlock(hdev);
1516 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1518 struct mgmt_pending_cmd *cmd;
1520 bt_dev_dbg(hdev, "status 0x%02x", status);
1524 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1529 u8 mgmt_err = mgmt_status(status);
1530 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1534 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1535 new_settings(hdev, cmd->sk);
1538 mgmt_pending_remove(cmd);
1541 hci_dev_unlock(hdev);
1544 static int set_connectable_update_settings(struct hci_dev *hdev,
1545 struct sock *sk, u8 val)
1547 bool changed = false;
1550 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1554 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1556 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1557 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1560 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1565 hci_req_update_scan(hdev);
1566 hci_update_background_scan(hdev);
1567 return new_settings(hdev, sk);
1573 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1576 struct mgmt_mode *cp = data;
1577 struct mgmt_pending_cmd *cmd;
1580 bt_dev_dbg(hdev, "sock %p", sk);
1582 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1583 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1585 MGMT_STATUS_REJECTED);
1587 if (cp->val != 0x00 && cp->val != 0x01)
1588 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1589 MGMT_STATUS_INVALID_PARAMS);
1593 if (!hdev_is_powered(hdev)) {
1594 err = set_connectable_update_settings(hdev, sk, cp->val);
1598 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1599 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1605 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1612 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1614 if (hdev->discov_timeout > 0)
1615 cancel_delayed_work(&hdev->discov_off);
1617 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1618 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1619 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1622 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1626 hci_dev_unlock(hdev);
1630 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1633 struct mgmt_mode *cp = data;
1637 bt_dev_dbg(hdev, "sock %p", sk);
1639 if (cp->val != 0x00 && cp->val != 0x01)
1640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1641 MGMT_STATUS_INVALID_PARAMS);
1646 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1648 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1650 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1655 /* In limited privacy mode the change of bondable mode
1656 * may affect the local advertising address.
1658 if (hdev_is_powered(hdev) &&
1659 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1660 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1661 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1662 queue_work(hdev->req_workqueue,
1663 &hdev->discoverable_update);
1665 err = new_settings(hdev, sk);
1669 hci_dev_unlock(hdev);
1673 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1676 struct mgmt_mode *cp = data;
1677 struct mgmt_pending_cmd *cmd;
1681 bt_dev_dbg(hdev, "sock %p", sk);
1683 status = mgmt_bredr_support(hdev);
1685 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1688 if (cp->val != 0x00 && cp->val != 0x01)
1689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1690 MGMT_STATUS_INVALID_PARAMS);
1694 if (!hdev_is_powered(hdev)) {
1695 bool changed = false;
1697 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1698 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1702 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1707 err = new_settings(hdev, sk);
1712 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1713 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1720 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1721 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1725 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1731 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1733 mgmt_pending_remove(cmd);
1738 hci_dev_unlock(hdev);
1742 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1744 struct mgmt_mode *cp = data;
1745 struct mgmt_pending_cmd *cmd;
1749 bt_dev_dbg(hdev, "sock %p", sk);
1751 status = mgmt_bredr_support(hdev);
1753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1755 if (!lmp_ssp_capable(hdev))
1756 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1757 MGMT_STATUS_NOT_SUPPORTED);
1759 if (cp->val != 0x00 && cp->val != 0x01)
1760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1761 MGMT_STATUS_INVALID_PARAMS);
1765 if (!hdev_is_powered(hdev)) {
1769 changed = !hci_dev_test_and_set_flag(hdev,
1772 changed = hci_dev_test_and_clear_flag(hdev,
1775 changed = hci_dev_test_and_clear_flag(hdev,
1778 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1781 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1786 err = new_settings(hdev, sk);
1791 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1792 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1797 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1798 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1802 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1808 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1809 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1810 sizeof(cp->val), &cp->val);
1812 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1814 mgmt_pending_remove(cmd);
1819 hci_dev_unlock(hdev);
1823 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1825 struct mgmt_mode *cp = data;
1830 bt_dev_dbg(hdev, "sock %p", sk);
1832 if (!IS_ENABLED(CONFIG_BT_HS))
1833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1834 MGMT_STATUS_NOT_SUPPORTED);
1836 status = mgmt_bredr_support(hdev);
1838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1840 if (!lmp_ssp_capable(hdev))
1841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1842 MGMT_STATUS_NOT_SUPPORTED);
1844 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1846 MGMT_STATUS_REJECTED);
1848 if (cp->val != 0x00 && cp->val != 0x01)
1849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1850 MGMT_STATUS_INVALID_PARAMS);
1854 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1855 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1861 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1863 if (hdev_is_powered(hdev)) {
1864 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1865 MGMT_STATUS_REJECTED);
1869 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1872 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1877 err = new_settings(hdev, sk);
1880 hci_dev_unlock(hdev);
1884 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1886 struct cmd_lookup match = { NULL, hdev };
1891 u8 mgmt_err = mgmt_status(status);
1893 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1898 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1900 new_settings(hdev, match.sk);
1905 /* Make sure the controller has a good default for
1906 * advertising data. Restrict the update to when LE
1907 * has actually been enabled. During power on, the
1908 * update in powered_update_hci will take care of it.
1910 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1911 struct hci_request req;
1912 hci_req_init(&req, hdev);
1913 if (ext_adv_capable(hdev)) {
1916 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1918 __hci_req_update_scan_rsp_data(&req, 0x00);
1920 __hci_req_update_adv_data(&req, 0x00);
1921 __hci_req_update_scan_rsp_data(&req, 0x00);
1923 hci_req_run(&req, NULL);
1924 hci_update_background_scan(hdev);
1928 hci_dev_unlock(hdev);
1931 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1933 struct mgmt_mode *cp = data;
1934 struct hci_cp_write_le_host_supported hci_cp;
1935 struct mgmt_pending_cmd *cmd;
1936 struct hci_request req;
1940 bt_dev_dbg(hdev, "sock %p", sk);
1942 if (!lmp_le_capable(hdev))
1943 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1944 MGMT_STATUS_NOT_SUPPORTED);
1946 if (cp->val != 0x00 && cp->val != 0x01)
1947 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1948 MGMT_STATUS_INVALID_PARAMS);
1950 /* Bluetooth single mode LE only controllers or dual-mode
1951 * controllers configured as LE only devices, do not allow
1952 * switching LE off. These have either LE enabled explicitly
1953 * or BR/EDR has been previously switched off.
1955 * When trying to enable an already enabled LE, then gracefully
1956 * send a positive response. Trying to disable it however will
1957 * result into rejection.
1959 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1960 if (cp->val == 0x01)
1961 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1964 MGMT_STATUS_REJECTED);
1970 enabled = lmp_host_le_capable(hdev);
1973 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1975 if (!hdev_is_powered(hdev) || val == enabled) {
1976 bool changed = false;
1978 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1979 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1983 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1984 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1988 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1993 err = new_settings(hdev, sk);
1998 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1999 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2000 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2005 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2011 hci_req_init(&req, hdev);
2013 memset(&hci_cp, 0, sizeof(hci_cp));
2017 hci_cp.simul = 0x00;
2019 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2020 __hci_req_disable_advertising(&req);
2022 if (ext_adv_capable(hdev))
2023 __hci_req_clear_ext_adv_sets(&req);
2026 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2029 err = hci_req_run(&req, le_enable_complete);
2031 mgmt_pending_remove(cmd);
2034 hci_dev_unlock(hdev);
2038 /* This is a helper function to test for pending mgmt commands that can
2039 * cause CoD or EIR HCI commands. We can only allow one such pending
2040 * mgmt command at a time since otherwise we cannot easily track what
2041 * the current values are, will be, and based on that calculate if a new
2042 * HCI command needs to be sent and if yes with what value.
2044 static bool pending_eir_or_class(struct hci_dev *hdev)
2046 struct mgmt_pending_cmd *cmd;
2048 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2049 switch (cmd->opcode) {
2050 case MGMT_OP_ADD_UUID:
2051 case MGMT_OP_REMOVE_UUID:
2052 case MGMT_OP_SET_DEV_CLASS:
2053 case MGMT_OP_SET_POWERED:
2061 static const u8 bluetooth_base_uuid[] = {
2062 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2063 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2066 static u8 get_uuid_size(const u8 *uuid)
2070 if (memcmp(uuid, bluetooth_base_uuid, 12))
2073 val = get_unaligned_le32(&uuid[12]);
2080 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2082 struct mgmt_pending_cmd *cmd;
2086 cmd = pending_find(mgmt_op, hdev);
2090 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2091 mgmt_status(status), hdev->dev_class, 3);
2093 mgmt_pending_remove(cmd);
2096 hci_dev_unlock(hdev);
2099 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2101 bt_dev_dbg(hdev, "status 0x%02x", status);
2103 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2106 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2108 struct mgmt_cp_add_uuid *cp = data;
2109 struct mgmt_pending_cmd *cmd;
2110 struct hci_request req;
2111 struct bt_uuid *uuid;
2114 bt_dev_dbg(hdev, "sock %p", sk);
2118 if (pending_eir_or_class(hdev)) {
2119 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2124 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2130 memcpy(uuid->uuid, cp->uuid, 16);
2131 uuid->svc_hint = cp->svc_hint;
2132 uuid->size = get_uuid_size(cp->uuid);
2134 list_add_tail(&uuid->list, &hdev->uuids);
2136 hci_req_init(&req, hdev);
2138 __hci_req_update_class(&req);
2139 __hci_req_update_eir(&req);
2141 err = hci_req_run(&req, add_uuid_complete);
2143 if (err != -ENODATA)
2146 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2147 hdev->dev_class, 3);
2151 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2160 hci_dev_unlock(hdev);
2164 static bool enable_service_cache(struct hci_dev *hdev)
2166 if (!hdev_is_powered(hdev))
2169 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2170 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2178 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2180 bt_dev_dbg(hdev, "status 0x%02x", status);
2182 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2185 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2188 struct mgmt_cp_remove_uuid *cp = data;
2189 struct mgmt_pending_cmd *cmd;
2190 struct bt_uuid *match, *tmp;
2191 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2192 struct hci_request req;
2195 bt_dev_dbg(hdev, "sock %p", sk);
2199 if (pending_eir_or_class(hdev)) {
2200 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2205 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2206 hci_uuids_clear(hdev);
2208 if (enable_service_cache(hdev)) {
2209 err = mgmt_cmd_complete(sk, hdev->id,
2210 MGMT_OP_REMOVE_UUID,
2211 0, hdev->dev_class, 3);
2220 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2221 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2224 list_del(&match->list);
2230 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2231 MGMT_STATUS_INVALID_PARAMS);
2236 hci_req_init(&req, hdev);
2238 __hci_req_update_class(&req);
2239 __hci_req_update_eir(&req);
2241 err = hci_req_run(&req, remove_uuid_complete);
2243 if (err != -ENODATA)
2246 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2247 hdev->dev_class, 3);
2251 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2260 hci_dev_unlock(hdev);
2264 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2266 bt_dev_dbg(hdev, "status 0x%02x", status);
2268 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2271 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2274 struct mgmt_cp_set_dev_class *cp = data;
2275 struct mgmt_pending_cmd *cmd;
2276 struct hci_request req;
2279 bt_dev_dbg(hdev, "sock %p", sk);
2281 if (!lmp_bredr_capable(hdev))
2282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2283 MGMT_STATUS_NOT_SUPPORTED);
2287 if (pending_eir_or_class(hdev)) {
2288 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2293 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2294 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2295 MGMT_STATUS_INVALID_PARAMS);
2299 hdev->major_class = cp->major;
2300 hdev->minor_class = cp->minor;
2302 if (!hdev_is_powered(hdev)) {
2303 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2304 hdev->dev_class, 3);
2308 hci_req_init(&req, hdev);
2310 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2311 hci_dev_unlock(hdev);
2312 cancel_delayed_work_sync(&hdev->service_cache);
2314 __hci_req_update_eir(&req);
2317 __hci_req_update_class(&req);
2319 err = hci_req_run(&req, set_class_complete);
2321 if (err != -ENODATA)
2324 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2325 hdev->dev_class, 3);
2329 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2338 hci_dev_unlock(hdev);
2342 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2345 struct mgmt_cp_load_link_keys *cp = data;
2346 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2347 sizeof(struct mgmt_link_key_info));
2348 u16 key_count, expected_len;
2352 bt_dev_dbg(hdev, "sock %p", sk);
2354 if (!lmp_bredr_capable(hdev))
2355 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2356 MGMT_STATUS_NOT_SUPPORTED);
2358 key_count = __le16_to_cpu(cp->key_count);
2359 if (key_count > max_key_count) {
2360 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 MGMT_STATUS_INVALID_PARAMS);
2366 expected_len = struct_size(cp, keys, key_count);
2367 if (expected_len != len) {
2368 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2371 MGMT_STATUS_INVALID_PARAMS);
2374 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2376 MGMT_STATUS_INVALID_PARAMS);
2378 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2381 for (i = 0; i < key_count; i++) {
2382 struct mgmt_link_key_info *key = &cp->keys[i];
2384 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2385 return mgmt_cmd_status(sk, hdev->id,
2386 MGMT_OP_LOAD_LINK_KEYS,
2387 MGMT_STATUS_INVALID_PARAMS);
2392 hci_link_keys_clear(hdev);
2395 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2397 changed = hci_dev_test_and_clear_flag(hdev,
2398 HCI_KEEP_DEBUG_KEYS);
2401 new_settings(hdev, NULL);
2403 for (i = 0; i < key_count; i++) {
2404 struct mgmt_link_key_info *key = &cp->keys[i];
2406 if (hci_is_blocked_key(hdev,
2407 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2409 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2414 /* Always ignore debug keys and require a new pairing if
2415 * the user wants to use them.
2417 if (key->type == HCI_LK_DEBUG_COMBINATION)
2420 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2421 key->type, key->pin_len, NULL);
2424 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2426 hci_dev_unlock(hdev);
2431 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2432 u8 addr_type, struct sock *skip_sk)
2434 struct mgmt_ev_device_unpaired ev;
2436 bacpy(&ev.addr.bdaddr, bdaddr);
2437 ev.addr.type = addr_type;
2439 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2443 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2446 struct mgmt_cp_unpair_device *cp = data;
2447 struct mgmt_rp_unpair_device rp;
2448 struct hci_conn_params *params;
2449 struct mgmt_pending_cmd *cmd;
2450 struct hci_conn *conn;
2454 memset(&rp, 0, sizeof(rp));
2455 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2456 rp.addr.type = cp->addr.type;
2458 if (!bdaddr_type_is_valid(cp->addr.type))
2459 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2460 MGMT_STATUS_INVALID_PARAMS,
2463 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2464 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2465 MGMT_STATUS_INVALID_PARAMS,
2470 if (!hdev_is_powered(hdev)) {
2471 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2472 MGMT_STATUS_NOT_POWERED, &rp,
2477 if (cp->addr.type == BDADDR_BREDR) {
2478 /* If disconnection is requested, then look up the
2479 * connection. If the remote device is connected, it
2480 * will be later used to terminate the link.
2482 * Setting it to NULL explicitly will cause no
2483 * termination of the link.
2486 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2491 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2493 err = mgmt_cmd_complete(sk, hdev->id,
2494 MGMT_OP_UNPAIR_DEVICE,
2495 MGMT_STATUS_NOT_PAIRED, &rp,
2503 /* LE address type */
2504 addr_type = le_addr_type(cp->addr.type);
2506 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2507 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2509 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2510 MGMT_STATUS_NOT_PAIRED, &rp,
2515 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2517 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2522 /* Defer clearing up the connection parameters until closing to
2523 * give a chance of keeping them if a repairing happens.
2525 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2527 /* Disable auto-connection parameters if present */
2528 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2530 if (params->explicit_connect)
2531 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2533 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2536 /* If disconnection is not requested, then clear the connection
2537 * variable so that the link is not terminated.
2539 if (!cp->disconnect)
2543 /* If the connection variable is set, then termination of the
2544 * link is requested.
2547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2549 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2553 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2560 cmd->cmd_complete = addr_cmd_complete;
2562 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2564 mgmt_pending_remove(cmd);
2567 hci_dev_unlock(hdev);
2571 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2574 struct mgmt_cp_disconnect *cp = data;
2575 struct mgmt_rp_disconnect rp;
2576 struct mgmt_pending_cmd *cmd;
2577 struct hci_conn *conn;
2580 bt_dev_dbg(hdev, "sock %p", sk);
2582 memset(&rp, 0, sizeof(rp));
2583 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2584 rp.addr.type = cp->addr.type;
2586 if (!bdaddr_type_is_valid(cp->addr.type))
2587 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2588 MGMT_STATUS_INVALID_PARAMS,
2593 if (!test_bit(HCI_UP, &hdev->flags)) {
2594 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2595 MGMT_STATUS_NOT_POWERED, &rp,
2600 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2601 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2602 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2606 if (cp->addr.type == BDADDR_BREDR)
2607 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2610 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2611 le_addr_type(cp->addr.type));
2613 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2614 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2615 MGMT_STATUS_NOT_CONNECTED, &rp,
2620 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2626 cmd->cmd_complete = generic_cmd_complete;
2628 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2630 mgmt_pending_remove(cmd);
2633 hci_dev_unlock(hdev);
2637 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2639 switch (link_type) {
2641 switch (addr_type) {
2642 case ADDR_LE_DEV_PUBLIC:
2643 return BDADDR_LE_PUBLIC;
2646 /* Fallback to LE Random address type */
2647 return BDADDR_LE_RANDOM;
2651 /* Fallback to BR/EDR type */
2652 return BDADDR_BREDR;
2656 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2659 struct mgmt_rp_get_connections *rp;
2664 bt_dev_dbg(hdev, "sock %p", sk);
2668 if (!hdev_is_powered(hdev)) {
2669 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2670 MGMT_STATUS_NOT_POWERED);
2675 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2676 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2680 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2687 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2688 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2690 bacpy(&rp->addr[i].bdaddr, &c->dst);
2691 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2692 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2697 rp->conn_count = cpu_to_le16(i);
2699 /* Recalculate length in case of filtered SCO connections, etc */
2700 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2701 struct_size(rp, addr, i));
2706 hci_dev_unlock(hdev);
2710 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2711 struct mgmt_cp_pin_code_neg_reply *cp)
2713 struct mgmt_pending_cmd *cmd;
2716 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2721 cmd->cmd_complete = addr_cmd_complete;
2723 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2724 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2726 mgmt_pending_remove(cmd);
2731 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2734 struct hci_conn *conn;
2735 struct mgmt_cp_pin_code_reply *cp = data;
2736 struct hci_cp_pin_code_reply reply;
2737 struct mgmt_pending_cmd *cmd;
2740 bt_dev_dbg(hdev, "sock %p", sk);
2744 if (!hdev_is_powered(hdev)) {
2745 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2746 MGMT_STATUS_NOT_POWERED);
2750 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2752 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2753 MGMT_STATUS_NOT_CONNECTED);
2757 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2758 struct mgmt_cp_pin_code_neg_reply ncp;
2760 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2762 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2764 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2766 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2767 MGMT_STATUS_INVALID_PARAMS);
2772 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2778 cmd->cmd_complete = addr_cmd_complete;
2780 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2781 reply.pin_len = cp->pin_len;
2782 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2784 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2786 mgmt_pending_remove(cmd);
2789 hci_dev_unlock(hdev);
2793 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2796 struct mgmt_cp_set_io_capability *cp = data;
2798 bt_dev_dbg(hdev, "sock %p", sk);
2800 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2801 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2802 MGMT_STATUS_INVALID_PARAMS);
2806 hdev->io_capability = cp->io_capability;
2808 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2810 hci_dev_unlock(hdev);
2812 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2816 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2818 struct hci_dev *hdev = conn->hdev;
2819 struct mgmt_pending_cmd *cmd;
2821 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2822 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2825 if (cmd->user_data != conn)
2834 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2836 struct mgmt_rp_pair_device rp;
2837 struct hci_conn *conn = cmd->user_data;
2840 bacpy(&rp.addr.bdaddr, &conn->dst);
2841 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2843 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2844 status, &rp, sizeof(rp));
2846 /* So we don't get further callbacks for this connection */
2847 conn->connect_cfm_cb = NULL;
2848 conn->security_cfm_cb = NULL;
2849 conn->disconn_cfm_cb = NULL;
2851 hci_conn_drop(conn);
2853 /* The device is paired so there is no need to remove
2854 * its connection parameters anymore.
2856 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2863 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2865 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2866 struct mgmt_pending_cmd *cmd;
2868 cmd = find_pairing(conn);
2870 cmd->cmd_complete(cmd, status);
2871 mgmt_pending_remove(cmd);
2875 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2877 struct mgmt_pending_cmd *cmd;
2879 BT_DBG("status %u", status);
2881 cmd = find_pairing(conn);
2883 BT_DBG("Unable to find a pending command");
2887 cmd->cmd_complete(cmd, mgmt_status(status));
2888 mgmt_pending_remove(cmd);
2891 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2893 struct mgmt_pending_cmd *cmd;
2895 BT_DBG("status %u", status);
2900 cmd = find_pairing(conn);
2902 BT_DBG("Unable to find a pending command");
2906 cmd->cmd_complete(cmd, mgmt_status(status));
2907 mgmt_pending_remove(cmd);
2910 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2913 struct mgmt_cp_pair_device *cp = data;
2914 struct mgmt_rp_pair_device rp;
2915 struct mgmt_pending_cmd *cmd;
2916 u8 sec_level, auth_type;
2917 struct hci_conn *conn;
2920 bt_dev_dbg(hdev, "sock %p", sk);
2922 memset(&rp, 0, sizeof(rp));
2923 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2924 rp.addr.type = cp->addr.type;
2926 if (!bdaddr_type_is_valid(cp->addr.type))
2927 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2928 MGMT_STATUS_INVALID_PARAMS,
2931 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2932 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2933 MGMT_STATUS_INVALID_PARAMS,
2938 if (!hdev_is_powered(hdev)) {
2939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2940 MGMT_STATUS_NOT_POWERED, &rp,
2945 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2946 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2947 MGMT_STATUS_ALREADY_PAIRED, &rp,
2952 sec_level = BT_SECURITY_MEDIUM;
2953 auth_type = HCI_AT_DEDICATED_BONDING;
2955 if (cp->addr.type == BDADDR_BREDR) {
2956 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2957 auth_type, CONN_REASON_PAIR_DEVICE);
2959 u8 addr_type = le_addr_type(cp->addr.type);
2960 struct hci_conn_params *p;
2962 /* When pairing a new device, it is expected to remember
2963 * this device for future connections. Adding the connection
2964 * parameter information ahead of time allows tracking
2965 * of the peripheral preferred values and will speed up any
2966 * further connection establishment.
2968 * If connection parameters already exist, then they
2969 * will be kept and this function does nothing.
2971 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2973 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2974 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2976 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2977 sec_level, HCI_LE_CONN_TIMEOUT,
2978 CONN_REASON_PAIR_DEVICE);
2984 if (PTR_ERR(conn) == -EBUSY)
2985 status = MGMT_STATUS_BUSY;
2986 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2987 status = MGMT_STATUS_NOT_SUPPORTED;
2988 else if (PTR_ERR(conn) == -ECONNREFUSED)
2989 status = MGMT_STATUS_REJECTED;
2991 status = MGMT_STATUS_CONNECT_FAILED;
2993 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2994 status, &rp, sizeof(rp));
2998 if (conn->connect_cfm_cb) {
2999 hci_conn_drop(conn);
3000 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3001 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3005 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3008 hci_conn_drop(conn);
3012 cmd->cmd_complete = pairing_complete;
3014 /* For LE, just connecting isn't a proof that the pairing finished */
3015 if (cp->addr.type == BDADDR_BREDR) {
3016 conn->connect_cfm_cb = pairing_complete_cb;
3017 conn->security_cfm_cb = pairing_complete_cb;
3018 conn->disconn_cfm_cb = pairing_complete_cb;
3020 conn->connect_cfm_cb = le_pairing_complete_cb;
3021 conn->security_cfm_cb = le_pairing_complete_cb;
3022 conn->disconn_cfm_cb = le_pairing_complete_cb;
3025 conn->io_capability = cp->io_cap;
3026 cmd->user_data = hci_conn_get(conn);
3028 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3029 hci_conn_security(conn, sec_level, auth_type, true)) {
3030 cmd->cmd_complete(cmd, 0);
3031 mgmt_pending_remove(cmd);
3037 hci_dev_unlock(hdev);
3041 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3044 struct mgmt_addr_info *addr = data;
3045 struct mgmt_pending_cmd *cmd;
3046 struct hci_conn *conn;
3049 bt_dev_dbg(hdev, "sock %p", sk);
3053 if (!hdev_is_powered(hdev)) {
3054 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3055 MGMT_STATUS_NOT_POWERED);
3059 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3061 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3062 MGMT_STATUS_INVALID_PARAMS);
3066 conn = cmd->user_data;
3068 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3069 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3070 MGMT_STATUS_INVALID_PARAMS);
3074 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3075 mgmt_pending_remove(cmd);
3077 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3078 addr, sizeof(*addr));
3080 /* Since user doesn't want to proceed with the connection, abort any
3081 * ongoing pairing and then terminate the link if it was created
3082 * because of the pair device action.
3084 if (addr->type == BDADDR_BREDR)
3085 hci_remove_link_key(hdev, &addr->bdaddr);
3087 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3088 le_addr_type(addr->type));
3090 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3091 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3094 hci_dev_unlock(hdev);
3098 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3099 struct mgmt_addr_info *addr, u16 mgmt_op,
3100 u16 hci_op, __le32 passkey)
3102 struct mgmt_pending_cmd *cmd;
3103 struct hci_conn *conn;
3108 if (!hdev_is_powered(hdev)) {
3109 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3110 MGMT_STATUS_NOT_POWERED, addr,
3115 if (addr->type == BDADDR_BREDR)
3116 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3118 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3119 le_addr_type(addr->type));
3122 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3123 MGMT_STATUS_NOT_CONNECTED, addr,
3128 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3129 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3131 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3132 MGMT_STATUS_SUCCESS, addr,
3135 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3136 MGMT_STATUS_FAILED, addr,
3142 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3148 cmd->cmd_complete = addr_cmd_complete;
3150 /* Continue with pairing via HCI */
3151 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3152 struct hci_cp_user_passkey_reply cp;
3154 bacpy(&cp.bdaddr, &addr->bdaddr);
3155 cp.passkey = passkey;
3156 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3158 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3162 mgmt_pending_remove(cmd);
3165 hci_dev_unlock(hdev);
3169 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3170 void *data, u16 len)
3172 struct mgmt_cp_pin_code_neg_reply *cp = data;
3174 bt_dev_dbg(hdev, "sock %p", sk);
3176 return user_pairing_resp(sk, hdev, &cp->addr,
3177 MGMT_OP_PIN_CODE_NEG_REPLY,
3178 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3181 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3184 struct mgmt_cp_user_confirm_reply *cp = data;
3186 bt_dev_dbg(hdev, "sock %p", sk);
3188 if (len != sizeof(*cp))
3189 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3190 MGMT_STATUS_INVALID_PARAMS);
3192 return user_pairing_resp(sk, hdev, &cp->addr,
3193 MGMT_OP_USER_CONFIRM_REPLY,
3194 HCI_OP_USER_CONFIRM_REPLY, 0);
3197 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3198 void *data, u16 len)
3200 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3202 bt_dev_dbg(hdev, "sock %p", sk);
3204 return user_pairing_resp(sk, hdev, &cp->addr,
3205 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3206 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3209 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3212 struct mgmt_cp_user_passkey_reply *cp = data;
3214 bt_dev_dbg(hdev, "sock %p", sk);
3216 return user_pairing_resp(sk, hdev, &cp->addr,
3217 MGMT_OP_USER_PASSKEY_REPLY,
3218 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3221 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3222 void *data, u16 len)
3224 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3226 bt_dev_dbg(hdev, "sock %p", sk);
3228 return user_pairing_resp(sk, hdev, &cp->addr,
3229 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3230 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3233 static void adv_expire(struct hci_dev *hdev, u32 flags)
3235 struct adv_info *adv_instance;
3236 struct hci_request req;
3239 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3243 /* stop if current instance doesn't need to be changed */
3244 if (!(adv_instance->flags & flags))
3247 cancel_adv_timeout(hdev);
3249 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3253 hci_req_init(&req, hdev);
3254 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3259 hci_req_run(&req, NULL);
3262 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3264 struct mgmt_cp_set_local_name *cp;
3265 struct mgmt_pending_cmd *cmd;
3267 bt_dev_dbg(hdev, "status 0x%02x", status);
3271 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3278 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3279 mgmt_status(status));
3281 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3284 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3285 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3288 mgmt_pending_remove(cmd);
3291 hci_dev_unlock(hdev);
3294 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3297 struct mgmt_cp_set_local_name *cp = data;
3298 struct mgmt_pending_cmd *cmd;
3299 struct hci_request req;
3302 bt_dev_dbg(hdev, "sock %p", sk);
3306 /* If the old values are the same as the new ones just return a
3307 * direct command complete event.
3309 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3310 !memcmp(hdev->short_name, cp->short_name,
3311 sizeof(hdev->short_name))) {
3312 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3317 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3319 if (!hdev_is_powered(hdev)) {
3320 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3322 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3327 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3328 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3329 ext_info_changed(hdev, sk);
3334 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3340 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3342 hci_req_init(&req, hdev);
3344 if (lmp_bredr_capable(hdev)) {
3345 __hci_req_update_name(&req);
3346 __hci_req_update_eir(&req);
3349 /* The name is stored in the scan response data and so
3350 * no need to update the advertising data here.
3352 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3353 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3355 err = hci_req_run(&req, set_name_complete);
3357 mgmt_pending_remove(cmd);
3360 hci_dev_unlock(hdev);
3364 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3367 struct mgmt_cp_set_appearance *cp = data;
3371 bt_dev_dbg(hdev, "sock %p", sk);
3373 if (!lmp_le_capable(hdev))
3374 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3375 MGMT_STATUS_NOT_SUPPORTED);
3377 appearance = le16_to_cpu(cp->appearance);
3381 if (hdev->appearance != appearance) {
3382 hdev->appearance = appearance;
3384 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3385 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3387 ext_info_changed(hdev, sk);
3390 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3393 hci_dev_unlock(hdev);
3398 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3399 void *data, u16 len)
3401 struct mgmt_rp_get_phy_configuration rp;
3403 bt_dev_dbg(hdev, "sock %p", sk);
3407 memset(&rp, 0, sizeof(rp));
3409 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3410 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3411 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3413 hci_dev_unlock(hdev);
3415 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3419 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3421 struct mgmt_ev_phy_configuration_changed ev;
3423 memset(&ev, 0, sizeof(ev));
3425 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3427 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3431 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3432 u16 opcode, struct sk_buff *skb)
3434 struct mgmt_pending_cmd *cmd;
3436 bt_dev_dbg(hdev, "status 0x%02x", status);
3440 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3445 mgmt_cmd_status(cmd->sk, hdev->id,
3446 MGMT_OP_SET_PHY_CONFIGURATION,
3447 mgmt_status(status));
3449 mgmt_cmd_complete(cmd->sk, hdev->id,
3450 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3453 mgmt_phy_configuration_changed(hdev, cmd->sk);
3456 mgmt_pending_remove(cmd);
3459 hci_dev_unlock(hdev);
3462 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3463 void *data, u16 len)
3465 struct mgmt_cp_set_phy_configuration *cp = data;
3466 struct hci_cp_le_set_default_phy cp_phy;
3467 struct mgmt_pending_cmd *cmd;
3468 struct hci_request req;
3469 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3470 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3471 bool changed = false;
3474 bt_dev_dbg(hdev, "sock %p", sk);
3476 configurable_phys = get_configurable_phys(hdev);
3477 supported_phys = get_supported_phys(hdev);
3478 selected_phys = __le32_to_cpu(cp->selected_phys);
3480 if (selected_phys & ~supported_phys)
3481 return mgmt_cmd_status(sk, hdev->id,
3482 MGMT_OP_SET_PHY_CONFIGURATION,
3483 MGMT_STATUS_INVALID_PARAMS);
3485 unconfigure_phys = supported_phys & ~configurable_phys;
3487 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3488 return mgmt_cmd_status(sk, hdev->id,
3489 MGMT_OP_SET_PHY_CONFIGURATION,
3490 MGMT_STATUS_INVALID_PARAMS);
3492 if (selected_phys == get_selected_phys(hdev))
3493 return mgmt_cmd_complete(sk, hdev->id,
3494 MGMT_OP_SET_PHY_CONFIGURATION,
3499 if (!hdev_is_powered(hdev)) {
3500 err = mgmt_cmd_status(sk, hdev->id,
3501 MGMT_OP_SET_PHY_CONFIGURATION,
3502 MGMT_STATUS_REJECTED);
3506 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3507 err = mgmt_cmd_status(sk, hdev->id,
3508 MGMT_OP_SET_PHY_CONFIGURATION,
3513 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3514 pkt_type |= (HCI_DH3 | HCI_DM3);
3516 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3518 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3519 pkt_type |= (HCI_DH5 | HCI_DM5);
3521 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3523 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3524 pkt_type &= ~HCI_2DH1;
3526 pkt_type |= HCI_2DH1;
3528 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3529 pkt_type &= ~HCI_2DH3;
3531 pkt_type |= HCI_2DH3;
3533 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3534 pkt_type &= ~HCI_2DH5;
3536 pkt_type |= HCI_2DH5;
3538 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3539 pkt_type &= ~HCI_3DH1;
3541 pkt_type |= HCI_3DH1;
3543 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3544 pkt_type &= ~HCI_3DH3;
3546 pkt_type |= HCI_3DH3;
3548 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3549 pkt_type &= ~HCI_3DH5;
3551 pkt_type |= HCI_3DH5;
3553 if (pkt_type != hdev->pkt_type) {
3554 hdev->pkt_type = pkt_type;
3558 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3559 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3561 mgmt_phy_configuration_changed(hdev, sk);
3563 err = mgmt_cmd_complete(sk, hdev->id,
3564 MGMT_OP_SET_PHY_CONFIGURATION,
3570 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3577 hci_req_init(&req, hdev);
3579 memset(&cp_phy, 0, sizeof(cp_phy));
3581 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3582 cp_phy.all_phys |= 0x01;
3584 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3585 cp_phy.all_phys |= 0x02;
3587 if (selected_phys & MGMT_PHY_LE_1M_TX)
3588 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3590 if (selected_phys & MGMT_PHY_LE_2M_TX)
3591 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3593 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3594 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3596 if (selected_phys & MGMT_PHY_LE_1M_RX)
3597 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3599 if (selected_phys & MGMT_PHY_LE_2M_RX)
3600 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3602 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3603 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3605 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3607 err = hci_req_run_skb(&req, set_default_phy_complete);
3609 mgmt_pending_remove(cmd);
3612 hci_dev_unlock(hdev);
3617 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3620 int err = MGMT_STATUS_SUCCESS;
3621 struct mgmt_cp_set_blocked_keys *keys = data;
3622 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3623 sizeof(struct mgmt_blocked_key_info));
3624 u16 key_count, expected_len;
3627 bt_dev_dbg(hdev, "sock %p", sk);
3629 key_count = __le16_to_cpu(keys->key_count);
3630 if (key_count > max_key_count) {
3631 bt_dev_err(hdev, "too big key_count value %u", key_count);
3632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3633 MGMT_STATUS_INVALID_PARAMS);
3636 expected_len = struct_size(keys, keys, key_count);
3637 if (expected_len != len) {
3638 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3641 MGMT_STATUS_INVALID_PARAMS);
3646 hci_blocked_keys_clear(hdev);
3648 for (i = 0; i < keys->key_count; ++i) {
3649 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3652 err = MGMT_STATUS_NO_RESOURCES;
3656 b->type = keys->keys[i].type;
3657 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3658 list_add_rcu(&b->list, &hdev->blocked_keys);
3660 hci_dev_unlock(hdev);
3662 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3666 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3667 void *data, u16 len)
3669 struct mgmt_mode *cp = data;
3671 bool changed = false;
3673 bt_dev_dbg(hdev, "sock %p", sk);
3675 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3676 return mgmt_cmd_status(sk, hdev->id,
3677 MGMT_OP_SET_WIDEBAND_SPEECH,
3678 MGMT_STATUS_NOT_SUPPORTED);
3680 if (cp->val != 0x00 && cp->val != 0x01)
3681 return mgmt_cmd_status(sk, hdev->id,
3682 MGMT_OP_SET_WIDEBAND_SPEECH,
3683 MGMT_STATUS_INVALID_PARAMS);
3687 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3688 err = mgmt_cmd_status(sk, hdev->id,
3689 MGMT_OP_SET_WIDEBAND_SPEECH,
3694 if (hdev_is_powered(hdev) &&
3695 !!cp->val != hci_dev_test_flag(hdev,
3696 HCI_WIDEBAND_SPEECH_ENABLED)) {
3697 err = mgmt_cmd_status(sk, hdev->id,
3698 MGMT_OP_SET_WIDEBAND_SPEECH,
3699 MGMT_STATUS_REJECTED);
3704 changed = !hci_dev_test_and_set_flag(hdev,
3705 HCI_WIDEBAND_SPEECH_ENABLED);
3707 changed = hci_dev_test_and_clear_flag(hdev,
3708 HCI_WIDEBAND_SPEECH_ENABLED);
3710 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3715 err = new_settings(hdev, sk);
3718 hci_dev_unlock(hdev);
3722 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3723 void *data, u16 data_len)
3726 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3729 u8 tx_power_range[2];
3731 bt_dev_dbg(hdev, "sock %p", sk);
3733 memset(&buf, 0, sizeof(buf));
3737 /* When the Read Simple Pairing Options command is supported, then
3738 * the remote public key validation is supported.
3740 * Alternatively, when Microsoft extensions are available, they can
3741 * indicate support for public key validation as well.
3743 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3744 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3746 flags |= 0x02; /* Remote public key validation (LE) */
3748 /* When the Read Encryption Key Size command is supported, then the
3749 * encryption key size is enforced.
3751 if (hdev->commands[20] & 0x10)
3752 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3754 flags |= 0x08; /* Encryption key size enforcement (LE) */
3756 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3759 /* When the Read Simple Pairing Options command is supported, then
3760 * also max encryption key size information is provided.
3762 if (hdev->commands[41] & 0x08)
3763 cap_len = eir_append_le16(rp->cap, cap_len,
3764 MGMT_CAP_MAX_ENC_KEY_SIZE,
3765 hdev->max_enc_key_size);
3767 cap_len = eir_append_le16(rp->cap, cap_len,
3768 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3769 SMP_MAX_ENC_KEY_SIZE);
3771 /* Append the min/max LE tx power parameters if we were able to fetch
3772 * it from the controller
3774 if (hdev->commands[38] & 0x80) {
3775 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3776 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3777 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3781 rp->cap_len = cpu_to_le16(cap_len);
3783 hci_dev_unlock(hdev);
3785 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3786 rp, sizeof(*rp) + cap_len);
3789 #ifdef CONFIG_BT_FEATURE_DEBUG
3790 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3791 static const u8 debug_uuid[16] = {
3792 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3793 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3797 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3798 static const u8 simult_central_periph_uuid[16] = {
3799 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3800 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3803 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3804 static const u8 rpa_resolution_uuid[16] = {
3805 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3806 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3809 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3810 void *data, u16 data_len)
3812 char buf[62]; /* Enough space for 3 features */
3813 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3817 bt_dev_dbg(hdev, "sock %p", sk);
3819 memset(&buf, 0, sizeof(buf));
3821 #ifdef CONFIG_BT_FEATURE_DEBUG
3823 flags = bt_dbg_get() ? BIT(0) : 0;
3825 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3826 rp->features[idx].flags = cpu_to_le32(flags);
3832 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3833 (hdev->le_states[4] & 0x08) && /* Central */
3834 (hdev->le_states[4] & 0x40) && /* Peripheral */
3835 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3840 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3841 rp->features[idx].flags = cpu_to_le32(flags);
3845 if (hdev && use_ll_privacy(hdev)) {
3846 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3847 flags = BIT(0) | BIT(1);
3851 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3852 rp->features[idx].flags = cpu_to_le32(flags);
3856 rp->feature_count = cpu_to_le16(idx);
3858 /* After reading the experimental features information, enable
3859 * the events to update client on any future change.
3861 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3863 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3864 MGMT_OP_READ_EXP_FEATURES_INFO,
3865 0, rp, sizeof(*rp) + (20 * idx));
3868 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3871 struct mgmt_ev_exp_feature_changed ev;
3873 memset(&ev, 0, sizeof(ev));
3874 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3875 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3877 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3879 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3883 #ifdef CONFIG_BT_FEATURE_DEBUG
3884 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3886 struct mgmt_ev_exp_feature_changed ev;
3888 memset(&ev, 0, sizeof(ev));
3889 memcpy(ev.uuid, debug_uuid, 16);
3890 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3892 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3894 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3898 #define EXP_FEAT(_uuid, _set_func) \
3901 .set_func = _set_func, \
3904 /* The zero key uuid is special. Multiple exp features are set through it. */
3905 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
3906 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3908 struct mgmt_rp_set_exp_feature rp;
3910 memset(rp.uuid, 0, 16);
3911 rp.flags = cpu_to_le32(0);
3913 #ifdef CONFIG_BT_FEATURE_DEBUG
3915 bool changed = bt_dbg_get();
3920 exp_debug_feature_changed(false, sk);
3924 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3927 changed = hci_dev_test_and_clear_flag(hdev,
3928 HCI_ENABLE_LL_PRIVACY);
3930 exp_ll_privacy_feature_changed(false, hdev, sk);
3933 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3935 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3936 MGMT_OP_SET_EXP_FEATURE, 0,
3940 #ifdef CONFIG_BT_FEATURE_DEBUG
3941 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
3942 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3944 struct mgmt_rp_set_exp_feature rp;
3949 /* Command requires to use the non-controller index */
3951 return mgmt_cmd_status(sk, hdev->id,
3952 MGMT_OP_SET_EXP_FEATURE,
3953 MGMT_STATUS_INVALID_INDEX);
3955 /* Parameters are limited to a single octet */
3956 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3957 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3958 MGMT_OP_SET_EXP_FEATURE,
3959 MGMT_STATUS_INVALID_PARAMS);
3961 /* Only boolean on/off is supported */
3962 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3963 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3964 MGMT_OP_SET_EXP_FEATURE,
3965 MGMT_STATUS_INVALID_PARAMS);
3967 val = !!cp->param[0];
3968 changed = val ? !bt_dbg_get() : bt_dbg_get();
3971 memcpy(rp.uuid, debug_uuid, 16);
3972 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3974 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3976 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3977 MGMT_OP_SET_EXP_FEATURE, 0,
3981 exp_debug_feature_changed(val, sk);
3987 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
3988 struct mgmt_cp_set_exp_feature *cp,
3991 struct mgmt_rp_set_exp_feature rp;
3996 /* Command requires to use the controller index */
3998 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3999 MGMT_OP_SET_EXP_FEATURE,
4000 MGMT_STATUS_INVALID_INDEX);
4002 /* Changes can only be made when controller is powered down */
4003 if (hdev_is_powered(hdev))
4004 return mgmt_cmd_status(sk, hdev->id,
4005 MGMT_OP_SET_EXP_FEATURE,
4006 MGMT_STATUS_REJECTED);
4008 /* Parameters are limited to a single octet */
4009 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4010 return mgmt_cmd_status(sk, hdev->id,
4011 MGMT_OP_SET_EXP_FEATURE,
4012 MGMT_STATUS_INVALID_PARAMS);
4014 /* Only boolean on/off is supported */
4015 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4016 return mgmt_cmd_status(sk, hdev->id,
4017 MGMT_OP_SET_EXP_FEATURE,
4018 MGMT_STATUS_INVALID_PARAMS);
4020 val = !!cp->param[0];
4023 changed = !hci_dev_test_and_set_flag(hdev,
4024 HCI_ENABLE_LL_PRIVACY);
4025 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4027 /* Enable LL privacy + supported settings changed */
4028 flags = BIT(0) | BIT(1);
4030 changed = hci_dev_test_and_clear_flag(hdev,
4031 HCI_ENABLE_LL_PRIVACY);
4033 /* Disable LL privacy + supported settings changed */
4037 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4038 rp.flags = cpu_to_le32(flags);
4040 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4042 err = mgmt_cmd_complete(sk, hdev->id,
4043 MGMT_OP_SET_EXP_FEATURE, 0,
4047 exp_ll_privacy_feature_changed(val, hdev, sk);
4052 static const struct mgmt_exp_feature {
4054 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4055 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4056 } exp_features[] = {
4057 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4058 #ifdef CONFIG_BT_FEATURE_DEBUG
4059 EXP_FEAT(debug_uuid, set_debug_func),
4061 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4063 /* end with a null feature */
4064 EXP_FEAT(NULL, NULL)
4067 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4068 void *data, u16 data_len)
4070 struct mgmt_cp_set_exp_feature *cp = data;
4073 bt_dev_dbg(hdev, "sock %p", sk);
4075 for (i = 0; exp_features[i].uuid; i++) {
4076 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4077 return exp_features[i].set_func(sk, hdev, cp, data_len);
4080 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4081 MGMT_OP_SET_EXP_FEATURE,
4082 MGMT_STATUS_NOT_SUPPORTED);
4085 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4087 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4090 struct mgmt_cp_get_device_flags *cp = data;
4091 struct mgmt_rp_get_device_flags rp;
4092 struct bdaddr_list_with_flags *br_params;
4093 struct hci_conn_params *params;
4094 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4095 u32 current_flags = 0;
4096 u8 status = MGMT_STATUS_INVALID_PARAMS;
4098 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4099 &cp->addr.bdaddr, cp->addr.type);
4103 memset(&rp, 0, sizeof(rp));
4105 if (cp->addr.type == BDADDR_BREDR) {
4106 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4112 current_flags = br_params->current_flags;
4114 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4115 le_addr_type(cp->addr.type));
4120 current_flags = params->current_flags;
4123 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4124 rp.addr.type = cp->addr.type;
4125 rp.supported_flags = cpu_to_le32(supported_flags);
4126 rp.current_flags = cpu_to_le32(current_flags);
4128 status = MGMT_STATUS_SUCCESS;
4131 hci_dev_unlock(hdev);
4133 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4137 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4138 bdaddr_t *bdaddr, u8 bdaddr_type,
4139 u32 supported_flags, u32 current_flags)
4141 struct mgmt_ev_device_flags_changed ev;
4143 bacpy(&ev.addr.bdaddr, bdaddr);
4144 ev.addr.type = bdaddr_type;
4145 ev.supported_flags = cpu_to_le32(supported_flags);
4146 ev.current_flags = cpu_to_le32(current_flags);
4148 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4151 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4154 struct mgmt_cp_set_device_flags *cp = data;
4155 struct bdaddr_list_with_flags *br_params;
4156 struct hci_conn_params *params;
4157 u8 status = MGMT_STATUS_INVALID_PARAMS;
4158 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4159 u32 current_flags = __le32_to_cpu(cp->current_flags);
4161 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4162 &cp->addr.bdaddr, cp->addr.type,
4163 __le32_to_cpu(current_flags));
4165 if ((supported_flags | current_flags) != supported_flags) {
4166 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4167 current_flags, supported_flags);
4173 if (cp->addr.type == BDADDR_BREDR) {
4174 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4179 br_params->current_flags = current_flags;
4180 status = MGMT_STATUS_SUCCESS;
4182 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4183 &cp->addr.bdaddr, cp->addr.type);
4186 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4187 le_addr_type(cp->addr.type));
4189 params->current_flags = current_flags;
4190 status = MGMT_STATUS_SUCCESS;
4192 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4194 le_addr_type(cp->addr.type));
4199 hci_dev_unlock(hdev);
4201 if (status == MGMT_STATUS_SUCCESS)
4202 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4203 supported_flags, current_flags);
4205 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4206 &cp->addr, sizeof(cp->addr));
4209 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4212 struct mgmt_ev_adv_monitor_added ev;
4214 ev.monitor_handle = cpu_to_le16(handle);
4216 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4219 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4221 struct mgmt_ev_adv_monitor_removed ev;
4222 struct mgmt_pending_cmd *cmd;
4223 struct sock *sk_skip = NULL;
4224 struct mgmt_cp_remove_adv_monitor *cp;
4226 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4230 if (cp->monitor_handle)
4234 ev.monitor_handle = cpu_to_le16(handle);
4236 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4239 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4240 void *data, u16 len)
4242 struct adv_monitor *monitor = NULL;
4243 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4246 __u32 supported = 0;
4248 __u16 num_handles = 0;
4249 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4251 BT_DBG("request for %s", hdev->name);
4255 if (msft_monitor_supported(hdev))
4256 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4258 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4259 handles[num_handles++] = monitor->handle;
4261 hci_dev_unlock(hdev);
4263 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4264 rp = kmalloc(rp_size, GFP_KERNEL);
4268 /* All supported features are currently enabled */
4269 enabled = supported;
4271 rp->supported_features = cpu_to_le32(supported);
4272 rp->enabled_features = cpu_to_le32(enabled);
4273 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4274 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4275 rp->num_handles = cpu_to_le16(num_handles);
4277 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4279 err = mgmt_cmd_complete(sk, hdev->id,
4280 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4281 MGMT_STATUS_SUCCESS, rp, rp_size);
4288 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4290 struct mgmt_rp_add_adv_patterns_monitor rp;
4291 struct mgmt_pending_cmd *cmd;
4292 struct adv_monitor *monitor;
4297 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4299 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4304 monitor = cmd->user_data;
4305 rp.monitor_handle = cpu_to_le16(monitor->handle);
4308 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4309 hdev->adv_monitors_cnt++;
4310 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4311 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4312 hci_update_background_scan(hdev);
4315 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4316 mgmt_status(status), &rp, sizeof(rp));
4317 mgmt_pending_remove(cmd);
4320 hci_dev_unlock(hdev);
4321 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4322 rp.monitor_handle, status);
4327 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4328 struct adv_monitor *m, u8 status,
4329 void *data, u16 len, u16 op)
4331 struct mgmt_rp_add_adv_patterns_monitor rp;
4332 struct mgmt_pending_cmd *cmd;
4341 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4342 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4343 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4344 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4345 status = MGMT_STATUS_BUSY;
4349 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4351 status = MGMT_STATUS_NO_RESOURCES;
4356 pending = hci_add_adv_monitor(hdev, m, &err);
4358 if (err == -ENOSPC || err == -ENOMEM)
4359 status = MGMT_STATUS_NO_RESOURCES;
4360 else if (err == -EINVAL)
4361 status = MGMT_STATUS_INVALID_PARAMS;
4363 status = MGMT_STATUS_FAILED;
4365 mgmt_pending_remove(cmd);
4370 mgmt_pending_remove(cmd);
4371 rp.monitor_handle = cpu_to_le16(m->handle);
4372 mgmt_adv_monitor_added(sk, hdev, m->handle);
4373 m->state = ADV_MONITOR_STATE_REGISTERED;
4374 hdev->adv_monitors_cnt++;
4376 hci_dev_unlock(hdev);
4377 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4381 hci_dev_unlock(hdev);
4386 hci_free_adv_monitor(hdev, m);
4387 hci_dev_unlock(hdev);
4388 return mgmt_cmd_status(sk, hdev->id, op, status);
4391 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4392 struct mgmt_adv_rssi_thresholds *rssi)
4395 m->rssi.low_threshold = rssi->low_threshold;
4396 m->rssi.low_threshold_timeout =
4397 __le16_to_cpu(rssi->low_threshold_timeout);
4398 m->rssi.high_threshold = rssi->high_threshold;
4399 m->rssi.high_threshold_timeout =
4400 __le16_to_cpu(rssi->high_threshold_timeout);
4401 m->rssi.sampling_period = rssi->sampling_period;
4403 /* Default values. These numbers are the least constricting
4404 * parameters for MSFT API to work, so it behaves as if there
4405 * are no rssi parameter to consider. May need to be changed
4406 * if other API are to be supported.
4408 m->rssi.low_threshold = -127;
4409 m->rssi.low_threshold_timeout = 60;
4410 m->rssi.high_threshold = -127;
4411 m->rssi.high_threshold_timeout = 0;
4412 m->rssi.sampling_period = 0;
4416 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4417 struct mgmt_adv_pattern *patterns)
4419 u8 offset = 0, length = 0;
4420 struct adv_pattern *p = NULL;
4423 for (i = 0; i < pattern_count; i++) {
4424 offset = patterns[i].offset;
4425 length = patterns[i].length;
4426 if (offset >= HCI_MAX_AD_LENGTH ||
4427 length > HCI_MAX_AD_LENGTH ||
4428 (offset + length) > HCI_MAX_AD_LENGTH)
4429 return MGMT_STATUS_INVALID_PARAMS;
4431 p = kmalloc(sizeof(*p), GFP_KERNEL);
4433 return MGMT_STATUS_NO_RESOURCES;
4435 p->ad_type = patterns[i].ad_type;
4436 p->offset = patterns[i].offset;
4437 p->length = patterns[i].length;
4438 memcpy(p->value, patterns[i].value, p->length);
4440 INIT_LIST_HEAD(&p->list);
4441 list_add(&p->list, &m->patterns);
4444 return MGMT_STATUS_SUCCESS;
4447 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4448 void *data, u16 len)
4450 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4451 struct adv_monitor *m = NULL;
4452 u8 status = MGMT_STATUS_SUCCESS;
4453 size_t expected_size = sizeof(*cp);
4455 BT_DBG("request for %s", hdev->name);
4457 if (len <= sizeof(*cp)) {
4458 status = MGMT_STATUS_INVALID_PARAMS;
4462 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4463 if (len != expected_size) {
4464 status = MGMT_STATUS_INVALID_PARAMS;
4468 m = kzalloc(sizeof(*m), GFP_KERNEL);
4470 status = MGMT_STATUS_NO_RESOURCES;
4474 INIT_LIST_HEAD(&m->patterns);
4476 parse_adv_monitor_rssi(m, NULL);
4477 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4480 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4481 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4484 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4485 void *data, u16 len)
4487 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4488 struct adv_monitor *m = NULL;
4489 u8 status = MGMT_STATUS_SUCCESS;
4490 size_t expected_size = sizeof(*cp);
4492 BT_DBG("request for %s", hdev->name);
4494 if (len <= sizeof(*cp)) {
4495 status = MGMT_STATUS_INVALID_PARAMS;
4499 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4500 if (len != expected_size) {
4501 status = MGMT_STATUS_INVALID_PARAMS;
4505 m = kzalloc(sizeof(*m), GFP_KERNEL);
4507 status = MGMT_STATUS_NO_RESOURCES;
4511 INIT_LIST_HEAD(&m->patterns);
4513 parse_adv_monitor_rssi(m, &cp->rssi);
4514 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4517 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4518 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4521 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4523 struct mgmt_rp_remove_adv_monitor rp;
4524 struct mgmt_cp_remove_adv_monitor *cp;
4525 struct mgmt_pending_cmd *cmd;
4530 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4535 rp.monitor_handle = cp->monitor_handle;
4538 hci_update_background_scan(hdev);
4540 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4541 mgmt_status(status), &rp, sizeof(rp));
4542 mgmt_pending_remove(cmd);
4545 hci_dev_unlock(hdev);
4546 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4547 rp.monitor_handle, status);
4552 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4553 void *data, u16 len)
4555 struct mgmt_cp_remove_adv_monitor *cp = data;
4556 struct mgmt_rp_remove_adv_monitor rp;
4557 struct mgmt_pending_cmd *cmd;
4558 u16 handle = __le16_to_cpu(cp->monitor_handle);
4562 BT_DBG("request for %s", hdev->name);
4563 rp.monitor_handle = cp->monitor_handle;
4567 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4568 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4569 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4570 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4571 status = MGMT_STATUS_BUSY;
4575 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4577 status = MGMT_STATUS_NO_RESOURCES;
4582 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4584 pending = hci_remove_all_adv_monitor(hdev, &err);
4587 mgmt_pending_remove(cmd);
4590 status = MGMT_STATUS_INVALID_INDEX;
4592 status = MGMT_STATUS_FAILED;
4597 /* monitor can be removed without forwarding request to controller */
4599 mgmt_pending_remove(cmd);
4600 hci_dev_unlock(hdev);
4602 return mgmt_cmd_complete(sk, hdev->id,
4603 MGMT_OP_REMOVE_ADV_MONITOR,
4604 MGMT_STATUS_SUCCESS,
4608 hci_dev_unlock(hdev);
4612 hci_dev_unlock(hdev);
4613 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4617 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4618 u16 opcode, struct sk_buff *skb)
4620 struct mgmt_rp_read_local_oob_data mgmt_rp;
4621 size_t rp_size = sizeof(mgmt_rp);
4622 struct mgmt_pending_cmd *cmd;
4624 bt_dev_dbg(hdev, "status %u", status);
4626 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4630 if (status || !skb) {
4631 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4632 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4636 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4638 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4639 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4641 if (skb->len < sizeof(*rp)) {
4642 mgmt_cmd_status(cmd->sk, hdev->id,
4643 MGMT_OP_READ_LOCAL_OOB_DATA,
4644 MGMT_STATUS_FAILED);
4648 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4649 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4651 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4653 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4655 if (skb->len < sizeof(*rp)) {
4656 mgmt_cmd_status(cmd->sk, hdev->id,
4657 MGMT_OP_READ_LOCAL_OOB_DATA,
4658 MGMT_STATUS_FAILED);
4662 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4663 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4665 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4666 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4669 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4670 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4673 mgmt_pending_remove(cmd);
4676 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4677 void *data, u16 data_len)
4679 struct mgmt_pending_cmd *cmd;
4680 struct hci_request req;
4683 bt_dev_dbg(hdev, "sock %p", sk);
4687 if (!hdev_is_powered(hdev)) {
4688 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4689 MGMT_STATUS_NOT_POWERED);
4693 if (!lmp_ssp_capable(hdev)) {
4694 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4695 MGMT_STATUS_NOT_SUPPORTED);
4699 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4700 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4705 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4711 hci_req_init(&req, hdev);
4713 if (bredr_sc_enabled(hdev))
4714 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4716 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4718 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4720 mgmt_pending_remove(cmd);
4723 hci_dev_unlock(hdev);
4727 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4728 void *data, u16 len)
4730 struct mgmt_addr_info *addr = data;
4733 bt_dev_dbg(hdev, "sock %p", sk);
4735 if (!bdaddr_type_is_valid(addr->type))
4736 return mgmt_cmd_complete(sk, hdev->id,
4737 MGMT_OP_ADD_REMOTE_OOB_DATA,
4738 MGMT_STATUS_INVALID_PARAMS,
4739 addr, sizeof(*addr));
4743 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4744 struct mgmt_cp_add_remote_oob_data *cp = data;
4747 if (cp->addr.type != BDADDR_BREDR) {
4748 err = mgmt_cmd_complete(sk, hdev->id,
4749 MGMT_OP_ADD_REMOTE_OOB_DATA,
4750 MGMT_STATUS_INVALID_PARAMS,
4751 &cp->addr, sizeof(cp->addr));
4755 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4756 cp->addr.type, cp->hash,
4757 cp->rand, NULL, NULL);
4759 status = MGMT_STATUS_FAILED;
4761 status = MGMT_STATUS_SUCCESS;
4763 err = mgmt_cmd_complete(sk, hdev->id,
4764 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4765 &cp->addr, sizeof(cp->addr));
4766 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4767 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4768 u8 *rand192, *hash192, *rand256, *hash256;
4771 if (bdaddr_type_is_le(cp->addr.type)) {
4772 /* Enforce zero-valued 192-bit parameters as
4773 * long as legacy SMP OOB isn't implemented.
4775 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4776 memcmp(cp->hash192, ZERO_KEY, 16)) {
4777 err = mgmt_cmd_complete(sk, hdev->id,
4778 MGMT_OP_ADD_REMOTE_OOB_DATA,
4779 MGMT_STATUS_INVALID_PARAMS,
4780 addr, sizeof(*addr));
4787 /* In case one of the P-192 values is set to zero,
4788 * then just disable OOB data for P-192.
4790 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4791 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4795 rand192 = cp->rand192;
4796 hash192 = cp->hash192;
4800 /* In case one of the P-256 values is set to zero, then just
4801 * disable OOB data for P-256.
4803 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4804 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4808 rand256 = cp->rand256;
4809 hash256 = cp->hash256;
4812 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4813 cp->addr.type, hash192, rand192,
4816 status = MGMT_STATUS_FAILED;
4818 status = MGMT_STATUS_SUCCESS;
4820 err = mgmt_cmd_complete(sk, hdev->id,
4821 MGMT_OP_ADD_REMOTE_OOB_DATA,
4822 status, &cp->addr, sizeof(cp->addr));
4824 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4826 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4827 MGMT_STATUS_INVALID_PARAMS);
4831 hci_dev_unlock(hdev);
4835 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4836 void *data, u16 len)
4838 struct mgmt_cp_remove_remote_oob_data *cp = data;
4842 bt_dev_dbg(hdev, "sock %p", sk);
4844 if (cp->addr.type != BDADDR_BREDR)
4845 return mgmt_cmd_complete(sk, hdev->id,
4846 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4847 MGMT_STATUS_INVALID_PARAMS,
4848 &cp->addr, sizeof(cp->addr));
4852 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4853 hci_remote_oob_data_clear(hdev);
4854 status = MGMT_STATUS_SUCCESS;
4858 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4860 status = MGMT_STATUS_INVALID_PARAMS;
4862 status = MGMT_STATUS_SUCCESS;
4865 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4866 status, &cp->addr, sizeof(cp->addr));
4868 hci_dev_unlock(hdev);
4872 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4874 struct mgmt_pending_cmd *cmd;
4876 bt_dev_dbg(hdev, "status %u", status);
4880 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4882 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4885 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4888 cmd->cmd_complete(cmd, mgmt_status(status));
4889 mgmt_pending_remove(cmd);
4892 hci_dev_unlock(hdev);
4894 /* Handle suspend notifier */
4895 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4896 hdev->suspend_tasks)) {
4897 bt_dev_dbg(hdev, "Unpaused discovery");
4898 wake_up(&hdev->suspend_wait_q);
4902 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4903 uint8_t *mgmt_status)
4906 case DISCOV_TYPE_LE:
4907 *mgmt_status = mgmt_le_support(hdev);
4911 case DISCOV_TYPE_INTERLEAVED:
4912 *mgmt_status = mgmt_le_support(hdev);
4916 case DISCOV_TYPE_BREDR:
4917 *mgmt_status = mgmt_bredr_support(hdev);
4922 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4929 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4930 u16 op, void *data, u16 len)
4932 struct mgmt_cp_start_discovery *cp = data;
4933 struct mgmt_pending_cmd *cmd;
4937 bt_dev_dbg(hdev, "sock %p", sk);
4941 if (!hdev_is_powered(hdev)) {
4942 err = mgmt_cmd_complete(sk, hdev->id, op,
4943 MGMT_STATUS_NOT_POWERED,
4944 &cp->type, sizeof(cp->type));
4948 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4949 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4950 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4951 &cp->type, sizeof(cp->type));
4955 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4956 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4957 &cp->type, sizeof(cp->type));
4961 /* Can't start discovery when it is paused */
4962 if (hdev->discovery_paused) {
4963 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4964 &cp->type, sizeof(cp->type));
4968 /* Clear the discovery filter first to free any previously
4969 * allocated memory for the UUID list.
4971 hci_discovery_filter_clear(hdev);
4973 hdev->discovery.type = cp->type;
4974 hdev->discovery.report_invalid_rssi = false;
4975 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4976 hdev->discovery.limited = true;
4978 hdev->discovery.limited = false;
4980 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4986 cmd->cmd_complete = generic_cmd_complete;
4988 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4989 queue_work(hdev->req_workqueue, &hdev->discov_update);
4993 hci_dev_unlock(hdev);
4997 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4998 void *data, u16 len)
5000 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5004 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5005 void *data, u16 len)
5007 return start_discovery_internal(sk, hdev,
5008 MGMT_OP_START_LIMITED_DISCOVERY,
5012 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
5015 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
5019 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5020 void *data, u16 len)
5022 struct mgmt_cp_start_service_discovery *cp = data;
5023 struct mgmt_pending_cmd *cmd;
5024 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5025 u16 uuid_count, expected_len;
5029 bt_dev_dbg(hdev, "sock %p", sk);
5033 if (!hdev_is_powered(hdev)) {
5034 err = mgmt_cmd_complete(sk, hdev->id,
5035 MGMT_OP_START_SERVICE_DISCOVERY,
5036 MGMT_STATUS_NOT_POWERED,
5037 &cp->type, sizeof(cp->type));
5041 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5042 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5043 err = mgmt_cmd_complete(sk, hdev->id,
5044 MGMT_OP_START_SERVICE_DISCOVERY,
5045 MGMT_STATUS_BUSY, &cp->type,
5050 if (hdev->discovery_paused) {
5051 err = mgmt_cmd_complete(sk, hdev->id,
5052 MGMT_OP_START_SERVICE_DISCOVERY,
5053 MGMT_STATUS_BUSY, &cp->type,
5058 uuid_count = __le16_to_cpu(cp->uuid_count);
5059 if (uuid_count > max_uuid_count) {
5060 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5062 err = mgmt_cmd_complete(sk, hdev->id,
5063 MGMT_OP_START_SERVICE_DISCOVERY,
5064 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5069 expected_len = sizeof(*cp) + uuid_count * 16;
5070 if (expected_len != len) {
5071 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5073 err = mgmt_cmd_complete(sk, hdev->id,
5074 MGMT_OP_START_SERVICE_DISCOVERY,
5075 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5080 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5081 err = mgmt_cmd_complete(sk, hdev->id,
5082 MGMT_OP_START_SERVICE_DISCOVERY,
5083 status, &cp->type, sizeof(cp->type));
5087 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5094 cmd->cmd_complete = service_discovery_cmd_complete;
5096 /* Clear the discovery filter first to free any previously
5097 * allocated memory for the UUID list.
5099 hci_discovery_filter_clear(hdev);
5101 hdev->discovery.result_filtering = true;
5102 hdev->discovery.type = cp->type;
5103 hdev->discovery.rssi = cp->rssi;
5104 hdev->discovery.uuid_count = uuid_count;
5106 if (uuid_count > 0) {
5107 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5109 if (!hdev->discovery.uuids) {
5110 err = mgmt_cmd_complete(sk, hdev->id,
5111 MGMT_OP_START_SERVICE_DISCOVERY,
5113 &cp->type, sizeof(cp->type));
5114 mgmt_pending_remove(cmd);
5119 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5120 queue_work(hdev->req_workqueue, &hdev->discov_update);
5124 hci_dev_unlock(hdev);
5128 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5130 struct mgmt_pending_cmd *cmd;
5132 bt_dev_dbg(hdev, "status %u", status);
5136 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5138 cmd->cmd_complete(cmd, mgmt_status(status));
5139 mgmt_pending_remove(cmd);
5142 hci_dev_unlock(hdev);
5144 /* Handle suspend notifier */
5145 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5146 bt_dev_dbg(hdev, "Paused discovery");
5147 wake_up(&hdev->suspend_wait_q);
5151 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5154 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5155 struct mgmt_pending_cmd *cmd;
5158 bt_dev_dbg(hdev, "sock %p", sk);
5162 if (!hci_discovery_active(hdev)) {
5163 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5164 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5165 sizeof(mgmt_cp->type));
5169 if (hdev->discovery.type != mgmt_cp->type) {
5170 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5171 MGMT_STATUS_INVALID_PARAMS,
5172 &mgmt_cp->type, sizeof(mgmt_cp->type));
5176 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5182 cmd->cmd_complete = generic_cmd_complete;
5184 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5185 queue_work(hdev->req_workqueue, &hdev->discov_update);
5189 hci_dev_unlock(hdev);
5193 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5196 struct mgmt_cp_confirm_name *cp = data;
5197 struct inquiry_entry *e;
5200 bt_dev_dbg(hdev, "sock %p", sk);
5204 if (!hci_discovery_active(hdev)) {
5205 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5206 MGMT_STATUS_FAILED, &cp->addr,
5211 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5213 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5214 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5219 if (cp->name_known) {
5220 e->name_state = NAME_KNOWN;
5223 e->name_state = NAME_NEEDED;
5224 hci_inquiry_cache_update_resolve(hdev, e);
5227 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5228 &cp->addr, sizeof(cp->addr));
5231 hci_dev_unlock(hdev);
5235 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5238 struct mgmt_cp_block_device *cp = data;
5242 bt_dev_dbg(hdev, "sock %p", sk);
5244 if (!bdaddr_type_is_valid(cp->addr.type))
5245 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5246 MGMT_STATUS_INVALID_PARAMS,
5247 &cp->addr, sizeof(cp->addr));
5251 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5254 status = MGMT_STATUS_FAILED;
5258 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5260 status = MGMT_STATUS_SUCCESS;
5263 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5264 &cp->addr, sizeof(cp->addr));
5266 hci_dev_unlock(hdev);
5271 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5274 struct mgmt_cp_unblock_device *cp = data;
5278 bt_dev_dbg(hdev, "sock %p", sk);
5280 if (!bdaddr_type_is_valid(cp->addr.type))
5281 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5282 MGMT_STATUS_INVALID_PARAMS,
5283 &cp->addr, sizeof(cp->addr));
5287 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5290 status = MGMT_STATUS_INVALID_PARAMS;
5294 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5296 status = MGMT_STATUS_SUCCESS;
5299 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5300 &cp->addr, sizeof(cp->addr));
5302 hci_dev_unlock(hdev);
5307 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5310 struct mgmt_cp_set_device_id *cp = data;
5311 struct hci_request req;
5315 bt_dev_dbg(hdev, "sock %p", sk);
5317 source = __le16_to_cpu(cp->source);
5319 if (source > 0x0002)
5320 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5321 MGMT_STATUS_INVALID_PARAMS);
5325 hdev->devid_source = source;
5326 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5327 hdev->devid_product = __le16_to_cpu(cp->product);
5328 hdev->devid_version = __le16_to_cpu(cp->version);
5330 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5333 hci_req_init(&req, hdev);
5334 __hci_req_update_eir(&req);
5335 hci_req_run(&req, NULL);
5337 hci_dev_unlock(hdev);
5342 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5345 bt_dev_dbg(hdev, "status %u", status);
5348 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5351 struct cmd_lookup match = { NULL, hdev };
5352 struct hci_request req;
5354 struct adv_info *adv_instance;
5360 u8 mgmt_err = mgmt_status(status);
5362 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5363 cmd_status_rsp, &mgmt_err);
5367 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5368 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5370 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5372 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5375 new_settings(hdev, match.sk);
5380 /* Handle suspend notifier */
5381 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5382 hdev->suspend_tasks)) {
5383 bt_dev_dbg(hdev, "Paused advertising");
5384 wake_up(&hdev->suspend_wait_q);
5385 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5386 hdev->suspend_tasks)) {
5387 bt_dev_dbg(hdev, "Unpaused advertising");
5388 wake_up(&hdev->suspend_wait_q);
5391 /* If "Set Advertising" was just disabled and instance advertising was
5392 * set up earlier, then re-enable multi-instance advertising.
5394 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5395 list_empty(&hdev->adv_instances))
5398 instance = hdev->cur_adv_instance;
5400 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5401 struct adv_info, list);
5405 instance = adv_instance->instance;
5408 hci_req_init(&req, hdev);
5410 err = __hci_req_schedule_adv_instance(&req, instance, true);
5413 err = hci_req_run(&req, enable_advertising_instance);
5416 bt_dev_err(hdev, "failed to re-configure advertising");
5419 hci_dev_unlock(hdev);
5422 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5425 struct mgmt_mode *cp = data;
5426 struct mgmt_pending_cmd *cmd;
5427 struct hci_request req;
5431 bt_dev_dbg(hdev, "sock %p", sk);
5433 status = mgmt_le_support(hdev);
5435 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5438 /* Enabling the experimental LL Privay support disables support for
5441 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5442 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5443 MGMT_STATUS_NOT_SUPPORTED);
5445 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5446 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5447 MGMT_STATUS_INVALID_PARAMS);
5449 if (hdev->advertising_paused)
5450 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5457 /* The following conditions are ones which mean that we should
5458 * not do any HCI communication but directly send a mgmt
5459 * response to user space (after toggling the flag if
5462 if (!hdev_is_powered(hdev) ||
5463 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5464 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5465 hci_conn_num(hdev, LE_LINK) > 0 ||
5466 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5467 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5471 hdev->cur_adv_instance = 0x00;
5472 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5473 if (cp->val == 0x02)
5474 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5476 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5478 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5479 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5482 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5487 err = new_settings(hdev, sk);
5492 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5493 pending_find(MGMT_OP_SET_LE, hdev)) {
5494 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5499 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5505 hci_req_init(&req, hdev);
5507 if (cp->val == 0x02)
5508 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5510 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5512 cancel_adv_timeout(hdev);
5515 /* Switch to instance "0" for the Set Advertising setting.
5516 * We cannot use update_[adv|scan_rsp]_data() here as the
5517 * HCI_ADVERTISING flag is not yet set.
5519 hdev->cur_adv_instance = 0x00;
5521 if (ext_adv_capable(hdev)) {
5522 __hci_req_start_ext_adv(&req, 0x00);
5524 __hci_req_update_adv_data(&req, 0x00);
5525 __hci_req_update_scan_rsp_data(&req, 0x00);
5526 __hci_req_enable_advertising(&req);
5529 __hci_req_disable_advertising(&req);
5532 err = hci_req_run(&req, set_advertising_complete);
5534 mgmt_pending_remove(cmd);
5537 hci_dev_unlock(hdev);
5541 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5542 void *data, u16 len)
5544 struct mgmt_cp_set_static_address *cp = data;
5547 bt_dev_dbg(hdev, "sock %p", sk);
5549 if (!lmp_le_capable(hdev))
5550 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5551 MGMT_STATUS_NOT_SUPPORTED);
5553 if (hdev_is_powered(hdev))
5554 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5555 MGMT_STATUS_REJECTED);
5557 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5558 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5559 return mgmt_cmd_status(sk, hdev->id,
5560 MGMT_OP_SET_STATIC_ADDRESS,
5561 MGMT_STATUS_INVALID_PARAMS);
5563 /* Two most significant bits shall be set */
5564 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5565 return mgmt_cmd_status(sk, hdev->id,
5566 MGMT_OP_SET_STATIC_ADDRESS,
5567 MGMT_STATUS_INVALID_PARAMS);
5572 bacpy(&hdev->static_addr, &cp->bdaddr);
5574 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5578 err = new_settings(hdev, sk);
5581 hci_dev_unlock(hdev);
5585 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5586 void *data, u16 len)
5588 struct mgmt_cp_set_scan_params *cp = data;
5589 __u16 interval, window;
5592 bt_dev_dbg(hdev, "sock %p", sk);
5594 if (!lmp_le_capable(hdev))
5595 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5596 MGMT_STATUS_NOT_SUPPORTED);
5598 interval = __le16_to_cpu(cp->interval);
5600 if (interval < 0x0004 || interval > 0x4000)
5601 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5602 MGMT_STATUS_INVALID_PARAMS);
5604 window = __le16_to_cpu(cp->window);
5606 if (window < 0x0004 || window > 0x4000)
5607 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5608 MGMT_STATUS_INVALID_PARAMS);
5610 if (window > interval)
5611 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5612 MGMT_STATUS_INVALID_PARAMS);
5616 hdev->le_scan_interval = interval;
5617 hdev->le_scan_window = window;
5619 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5622 /* If background scan is running, restart it so new parameters are
5625 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5626 hdev->discovery.state == DISCOVERY_STOPPED) {
5627 struct hci_request req;
5629 hci_req_init(&req, hdev);
5631 hci_req_add_le_scan_disable(&req, false);
5632 hci_req_add_le_passive_scan(&req);
5634 hci_req_run(&req, NULL);
5637 hci_dev_unlock(hdev);
5642 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5645 struct mgmt_pending_cmd *cmd;
5647 bt_dev_dbg(hdev, "status 0x%02x", status);
5651 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5656 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5657 mgmt_status(status));
5659 struct mgmt_mode *cp = cmd->param;
5662 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5664 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5666 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5667 new_settings(hdev, cmd->sk);
5670 mgmt_pending_remove(cmd);
5673 hci_dev_unlock(hdev);
5676 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5677 void *data, u16 len)
5679 struct mgmt_mode *cp = data;
5680 struct mgmt_pending_cmd *cmd;
5681 struct hci_request req;
5684 bt_dev_dbg(hdev, "sock %p", sk);
5686 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5687 hdev->hci_ver < BLUETOOTH_VER_1_2)
5688 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5689 MGMT_STATUS_NOT_SUPPORTED);
5691 if (cp->val != 0x00 && cp->val != 0x01)
5692 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5693 MGMT_STATUS_INVALID_PARAMS);
5697 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5698 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5703 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5704 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5709 if (!hdev_is_powered(hdev)) {
5710 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5711 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5713 new_settings(hdev, sk);
5717 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5724 hci_req_init(&req, hdev);
5726 __hci_req_write_fast_connectable(&req, cp->val);
5728 err = hci_req_run(&req, fast_connectable_complete);
5730 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5731 MGMT_STATUS_FAILED);
5732 mgmt_pending_remove(cmd);
5736 hci_dev_unlock(hdev);
5741 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5743 struct mgmt_pending_cmd *cmd;
5745 bt_dev_dbg(hdev, "status 0x%02x", status);
5749 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5754 u8 mgmt_err = mgmt_status(status);
5756 /* We need to restore the flag if related HCI commands
5759 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5761 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5763 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5764 new_settings(hdev, cmd->sk);
5767 mgmt_pending_remove(cmd);
5770 hci_dev_unlock(hdev);
5773 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5775 struct mgmt_mode *cp = data;
5776 struct mgmt_pending_cmd *cmd;
5777 struct hci_request req;
5780 bt_dev_dbg(hdev, "sock %p", sk);
5782 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5783 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5784 MGMT_STATUS_NOT_SUPPORTED);
5786 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5787 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5788 MGMT_STATUS_REJECTED);
5790 if (cp->val != 0x00 && cp->val != 0x01)
5791 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5792 MGMT_STATUS_INVALID_PARAMS);
5796 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5797 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5801 if (!hdev_is_powered(hdev)) {
5803 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5804 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5805 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5806 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5807 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5810 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5812 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5816 err = new_settings(hdev, sk);
5820 /* Reject disabling when powered on */
5822 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5823 MGMT_STATUS_REJECTED);
5826 /* When configuring a dual-mode controller to operate
5827 * with LE only and using a static address, then switching
5828 * BR/EDR back on is not allowed.
5830 * Dual-mode controllers shall operate with the public
5831 * address as its identity address for BR/EDR and LE. So
5832 * reject the attempt to create an invalid configuration.
5834 * The same restrictions applies when secure connections
5835 * has been enabled. For BR/EDR this is a controller feature
5836 * while for LE it is a host stack feature. This means that
5837 * switching BR/EDR back on when secure connections has been
5838 * enabled is not a supported transaction.
5840 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5841 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5842 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5843 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5844 MGMT_STATUS_REJECTED);
5849 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5850 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5855 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5861 /* We need to flip the bit already here so that
5862 * hci_req_update_adv_data generates the correct flags.
5864 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5866 hci_req_init(&req, hdev);
5868 __hci_req_write_fast_connectable(&req, false);
5869 __hci_req_update_scan(&req);
5871 /* Since only the advertising data flags will change, there
5872 * is no need to update the scan response data.
5874 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5876 err = hci_req_run(&req, set_bredr_complete);
5878 mgmt_pending_remove(cmd);
5881 hci_dev_unlock(hdev);
5885 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5887 struct mgmt_pending_cmd *cmd;
5888 struct mgmt_mode *cp;
5890 bt_dev_dbg(hdev, "status %u", status);
5894 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5899 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5900 mgmt_status(status));
5908 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5909 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5912 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5913 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5916 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5917 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5921 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5922 new_settings(hdev, cmd->sk);
5925 mgmt_pending_remove(cmd);
5927 hci_dev_unlock(hdev);
5930 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5931 void *data, u16 len)
5933 struct mgmt_mode *cp = data;
5934 struct mgmt_pending_cmd *cmd;
5935 struct hci_request req;
5939 bt_dev_dbg(hdev, "sock %p", sk);
5941 if (!lmp_sc_capable(hdev) &&
5942 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5943 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5944 MGMT_STATUS_NOT_SUPPORTED);
5946 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5947 lmp_sc_capable(hdev) &&
5948 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5949 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5950 MGMT_STATUS_REJECTED);
5952 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5953 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5954 MGMT_STATUS_INVALID_PARAMS);
5958 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5959 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5963 changed = !hci_dev_test_and_set_flag(hdev,
5965 if (cp->val == 0x02)
5966 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5968 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5970 changed = hci_dev_test_and_clear_flag(hdev,
5972 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5975 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5980 err = new_settings(hdev, sk);
5985 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5986 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5993 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5994 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5995 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5999 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6005 hci_req_init(&req, hdev);
6006 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
6007 err = hci_req_run(&req, sc_enable_complete);
6009 mgmt_pending_remove(cmd);
6014 hci_dev_unlock(hdev);
6018 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6019 void *data, u16 len)
6021 struct mgmt_mode *cp = data;
6022 bool changed, use_changed;
6025 bt_dev_dbg(hdev, "sock %p", sk);
6027 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6028 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6029 MGMT_STATUS_INVALID_PARAMS);
6034 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6036 changed = hci_dev_test_and_clear_flag(hdev,
6037 HCI_KEEP_DEBUG_KEYS);
6039 if (cp->val == 0x02)
6040 use_changed = !hci_dev_test_and_set_flag(hdev,
6041 HCI_USE_DEBUG_KEYS);
6043 use_changed = hci_dev_test_and_clear_flag(hdev,
6044 HCI_USE_DEBUG_KEYS);
6046 if (hdev_is_powered(hdev) && use_changed &&
6047 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6048 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6049 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6050 sizeof(mode), &mode);
6053 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6058 err = new_settings(hdev, sk);
6061 hci_dev_unlock(hdev);
6065 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6068 struct mgmt_cp_set_privacy *cp = cp_data;
6072 bt_dev_dbg(hdev, "sock %p", sk);
6074 if (!lmp_le_capable(hdev))
6075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6076 MGMT_STATUS_NOT_SUPPORTED);
6078 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6080 MGMT_STATUS_INVALID_PARAMS);
6082 if (hdev_is_powered(hdev))
6083 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6084 MGMT_STATUS_REJECTED);
6088 /* If user space supports this command it is also expected to
6089 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6091 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6094 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6095 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6096 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6097 hci_adv_instances_set_rpa_expired(hdev, true);
6098 if (cp->privacy == 0x02)
6099 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6101 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6103 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6104 memset(hdev->irk, 0, sizeof(hdev->irk));
6105 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6106 hci_adv_instances_set_rpa_expired(hdev, false);
6107 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6110 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6115 err = new_settings(hdev, sk);
6118 hci_dev_unlock(hdev);
6122 static bool irk_is_valid(struct mgmt_irk_info *irk)
6124 switch (irk->addr.type) {
6125 case BDADDR_LE_PUBLIC:
6128 case BDADDR_LE_RANDOM:
6129 /* Two most significant bits shall be set */
6130 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6138 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6141 struct mgmt_cp_load_irks *cp = cp_data;
6142 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6143 sizeof(struct mgmt_irk_info));
6144 u16 irk_count, expected_len;
6147 bt_dev_dbg(hdev, "sock %p", sk);
6149 if (!lmp_le_capable(hdev))
6150 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6151 MGMT_STATUS_NOT_SUPPORTED);
6153 irk_count = __le16_to_cpu(cp->irk_count);
6154 if (irk_count > max_irk_count) {
6155 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6157 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6158 MGMT_STATUS_INVALID_PARAMS);
6161 expected_len = struct_size(cp, irks, irk_count);
6162 if (expected_len != len) {
6163 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6165 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6166 MGMT_STATUS_INVALID_PARAMS);
6169 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6171 for (i = 0; i < irk_count; i++) {
6172 struct mgmt_irk_info *key = &cp->irks[i];
6174 if (!irk_is_valid(key))
6175 return mgmt_cmd_status(sk, hdev->id,
6177 MGMT_STATUS_INVALID_PARAMS);
6182 hci_smp_irks_clear(hdev);
6184 for (i = 0; i < irk_count; i++) {
6185 struct mgmt_irk_info *irk = &cp->irks[i];
6187 if (hci_is_blocked_key(hdev,
6188 HCI_BLOCKED_KEY_TYPE_IRK,
6190 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6195 hci_add_irk(hdev, &irk->addr.bdaddr,
6196 le_addr_type(irk->addr.type), irk->val,
6200 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6202 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6204 hci_dev_unlock(hdev);
6210 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
6211 void *data, u16 len)
6213 struct mgmt_cp_set_advertising_params *cp = data;
6218 BT_DBG("%s", hdev->name);
6220 if (!lmp_le_capable(hdev))
6221 return mgmt_cmd_status(sk, hdev->id,
6222 MGMT_OP_SET_ADVERTISING_PARAMS,
6223 MGMT_STATUS_NOT_SUPPORTED);
6225 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6226 return mgmt_cmd_status(sk, hdev->id,
6227 MGMT_OP_SET_ADVERTISING_PARAMS,
6230 min_interval = __le16_to_cpu(cp->interval_min);
6231 max_interval = __le16_to_cpu(cp->interval_max);
6233 if (min_interval > max_interval ||
6234 min_interval < 0x0020 || max_interval > 0x4000)
6235 return mgmt_cmd_status(sk, hdev->id,
6236 MGMT_OP_SET_ADVERTISING_PARAMS,
6237 MGMT_STATUS_INVALID_PARAMS);
6241 hdev->le_adv_min_interval = min_interval;
6242 hdev->le_adv_max_interval = max_interval;
6243 hdev->adv_filter_policy = cp->filter_policy;
6244 hdev->adv_type = cp->type;
6246 err = mgmt_cmd_complete(sk, hdev->id,
6247 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
6249 hci_dev_unlock(hdev);
6254 static void set_advertising_data_complete(struct hci_dev *hdev,
6255 u8 status, u16 opcode)
6257 struct mgmt_cp_set_advertising_data *cp;
6258 struct mgmt_pending_cmd *cmd;
6260 BT_DBG("status 0x%02x", status);
6264 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
6271 mgmt_cmd_status(cmd->sk, hdev->id,
6272 MGMT_OP_SET_ADVERTISING_DATA,
6273 mgmt_status(status));
6275 mgmt_cmd_complete(cmd->sk, hdev->id,
6276 MGMT_OP_SET_ADVERTISING_DATA, 0,
6279 mgmt_pending_remove(cmd);
6282 hci_dev_unlock(hdev);
6285 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
6286 void *data, u16 len)
6288 struct mgmt_pending_cmd *cmd;
6289 struct hci_request req;
6290 struct mgmt_cp_set_advertising_data *cp = data;
6291 struct hci_cp_le_set_adv_data adv;
6294 BT_DBG("%s", hdev->name);
6296 if (!lmp_le_capable(hdev)) {
6297 return mgmt_cmd_status(sk, hdev->id,
6298 MGMT_OP_SET_ADVERTISING_DATA,
6299 MGMT_STATUS_NOT_SUPPORTED);
6304 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
6305 err = mgmt_cmd_status(sk, hdev->id,
6306 MGMT_OP_SET_ADVERTISING_DATA,
6311 if (len > HCI_MAX_AD_LENGTH) {
6312 err = mgmt_cmd_status(sk, hdev->id,
6313 MGMT_OP_SET_ADVERTISING_DATA,
6314 MGMT_STATUS_INVALID_PARAMS);
6318 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
6325 hci_req_init(&req, hdev);
6327 memset(&adv, 0, sizeof(adv));
6328 memcpy(adv.data, cp->data, len);
6331 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
6333 err = hci_req_run(&req, set_advertising_data_complete);
6335 mgmt_pending_remove(cmd);
6338 hci_dev_unlock(hdev);
6343 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
6346 struct mgmt_cp_set_scan_rsp_data *cp;
6347 struct mgmt_pending_cmd *cmd;
6349 BT_DBG("status 0x%02x", status);
6353 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
6360 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6361 mgmt_status(status));
6363 mgmt_cmd_complete(cmd->sk, hdev->id,
6364 MGMT_OP_SET_SCAN_RSP_DATA, 0,
6367 mgmt_pending_remove(cmd);
6370 hci_dev_unlock(hdev);
6373 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
6376 struct mgmt_pending_cmd *cmd;
6377 struct hci_request req;
6378 struct mgmt_cp_set_scan_rsp_data *cp = data;
6379 struct hci_cp_le_set_scan_rsp_data rsp;
6382 BT_DBG("%s", hdev->name);
6384 if (!lmp_le_capable(hdev))
6385 return mgmt_cmd_status(sk, hdev->id,
6386 MGMT_OP_SET_SCAN_RSP_DATA,
6387 MGMT_STATUS_NOT_SUPPORTED);
6391 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
6392 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6397 if (len > HCI_MAX_AD_LENGTH) {
6398 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6399 MGMT_STATUS_INVALID_PARAMS);
6403 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
6409 hci_req_init(&req, hdev);
6411 memset(&rsp, 0, sizeof(rsp));
6412 memcpy(rsp.data, cp->data, len);
6415 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
6417 err = hci_req_run(&req, set_scan_rsp_data_complete);
6419 mgmt_pending_remove(cmd);
6422 hci_dev_unlock(hdev);
6427 /* Adv White List feature */
6428 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6430 struct mgmt_cp_add_dev_white_list *cp;
6431 struct mgmt_pending_cmd *cmd;
6433 BT_DBG("status 0x%02x", status);
6437 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
6444 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6445 mgmt_status(status));
6447 mgmt_cmd_complete(cmd->sk, hdev->id,
6448 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
6450 mgmt_pending_remove(cmd);
6453 hci_dev_unlock(hdev);
6456 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
6457 void *data, u16 len)
6459 struct mgmt_pending_cmd *cmd;
6460 struct mgmt_cp_add_dev_white_list *cp = data;
6461 struct hci_request req;
6464 BT_DBG("%s", hdev->name);
6466 if (!lmp_le_capable(hdev))
6467 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6468 MGMT_STATUS_NOT_SUPPORTED);
6470 if (!hdev_is_powered(hdev))
6471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6472 MGMT_STATUS_REJECTED);
6476 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
6477 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6482 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
6488 hci_req_init(&req, hdev);
6490 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
6492 err = hci_req_run(&req, add_white_list_complete);
6494 mgmt_pending_remove(cmd);
6499 hci_dev_unlock(hdev);
6504 static void remove_from_white_list_complete(struct hci_dev *hdev,
6505 u8 status, u16 opcode)
6507 struct mgmt_cp_remove_dev_from_white_list *cp;
6508 struct mgmt_pending_cmd *cmd;
6510 BT_DBG("status 0x%02x", status);
6514 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
6521 mgmt_cmd_status(cmd->sk, hdev->id,
6522 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6523 mgmt_status(status));
6525 mgmt_cmd_complete(cmd->sk, hdev->id,
6526 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
6529 mgmt_pending_remove(cmd);
6532 hci_dev_unlock(hdev);
6535 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
6536 void *data, u16 len)
6538 struct mgmt_pending_cmd *cmd;
6539 struct mgmt_cp_remove_dev_from_white_list *cp = data;
6540 struct hci_request req;
6543 BT_DBG("%s", hdev->name);
6545 if (!lmp_le_capable(hdev))
6546 return mgmt_cmd_status(sk, hdev->id,
6547 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6548 MGMT_STATUS_NOT_SUPPORTED);
6550 if (!hdev_is_powered(hdev))
6551 return mgmt_cmd_status(sk, hdev->id,
6552 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6553 MGMT_STATUS_REJECTED);
6557 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
6558 err = mgmt_cmd_status(sk, hdev->id,
6559 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6564 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6571 hci_req_init(&req, hdev);
6573 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
6575 err = hci_req_run(&req, remove_from_white_list_complete);
6577 mgmt_pending_remove(cmd);
6582 hci_dev_unlock(hdev);
6587 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
6590 struct mgmt_pending_cmd *cmd;
6592 BT_DBG("status 0x%02x", status);
6596 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
6601 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6602 mgmt_status(status));
6604 mgmt_cmd_complete(cmd->sk, hdev->id,
6605 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6608 mgmt_pending_remove(cmd);
6611 hci_dev_unlock(hdev);
6614 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
6615 void *data, u16 len)
6617 struct mgmt_pending_cmd *cmd;
6618 struct hci_request req;
6621 BT_DBG("%s", hdev->name);
6623 if (!lmp_le_capable(hdev))
6624 return mgmt_cmd_status(sk, hdev->id,
6625 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6626 MGMT_STATUS_NOT_SUPPORTED);
6628 if (!hdev_is_powered(hdev))
6629 return mgmt_cmd_status(sk, hdev->id,
6630 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6631 MGMT_STATUS_REJECTED);
6635 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
6636 err = mgmt_cmd_status(sk, hdev->id,
6637 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6642 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6649 hci_req_init(&req, hdev);
6651 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
6653 err = hci_req_run(&req, clear_white_list_complete);
6655 mgmt_pending_remove(cmd);
6660 hci_dev_unlock(hdev);
6665 static void set_rssi_threshold_complete(struct hci_dev *hdev,
6666 u8 status, u16 opcode)
6668 struct mgmt_pending_cmd *cmd;
6670 BT_DBG("status 0x%02x", status);
6674 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6679 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6680 mgmt_status(status));
6682 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6685 mgmt_pending_remove(cmd);
6688 hci_dev_unlock(hdev);
6691 static void set_rssi_disable_complete(struct hci_dev *hdev,
6692 u8 status, u16 opcode)
6694 struct mgmt_pending_cmd *cmd;
6696 BT_DBG("status 0x%02x", status);
6700 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6705 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6706 mgmt_status(status));
6708 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6711 mgmt_pending_remove(cmd);
6714 hci_dev_unlock(hdev);
6717 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
6718 void *data, u16 len)
6721 struct hci_cp_set_rssi_threshold th = { 0, };
6722 struct mgmt_cp_set_enable_rssi *cp = data;
6723 struct hci_conn *conn;
6724 struct mgmt_pending_cmd *cmd;
6725 struct hci_request req;
6730 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6732 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6733 MGMT_STATUS_FAILED);
6737 if (!lmp_le_capable(hdev)) {
6738 mgmt_pending_remove(cmd);
6739 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6740 MGMT_STATUS_NOT_SUPPORTED);
6744 if (!hdev_is_powered(hdev)) {
6745 BT_DBG("%s", hdev->name);
6746 mgmt_pending_remove(cmd);
6747 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6748 MGMT_STATUS_NOT_POWERED);
6752 if (cp->link_type == 0x01)
6753 dest_type = LE_LINK;
6755 dest_type = ACL_LINK;
6757 /* Get LE/ACL link handle info */
6758 conn = hci_conn_hash_lookup_ba(hdev,
6759 dest_type, &cp->bdaddr);
6762 err = mgmt_cmd_complete(sk, hdev->id,
6763 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
6764 mgmt_pending_remove(cmd);
6768 hci_req_init(&req, hdev);
6770 th.hci_le_ext_opcode = 0x0B;
6772 th.conn_handle = conn->handle;
6773 th.alert_mask = 0x07;
6774 th.low_th = cp->low_th;
6775 th.in_range_th = cp->in_range_th;
6776 th.high_th = cp->high_th;
6778 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6779 err = hci_req_run(&req, set_rssi_threshold_complete);
6782 mgmt_pending_remove(cmd);
6783 BT_ERR("Error in requesting hci_req_run");
6788 hci_dev_unlock(hdev);
6792 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
6793 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6795 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
6796 struct mgmt_cp_set_enable_rssi *cp = data;
6797 struct mgmt_pending_cmd *cmd;
6802 mgmt_rp.status = rp->status;
6803 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6804 mgmt_rp.bt_address = cp->bdaddr;
6805 mgmt_rp.link_type = cp->link_type;
6807 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6808 MGMT_STATUS_SUCCESS, &mgmt_rp,
6809 sizeof(struct mgmt_cc_rsp_enable_rssi));
6811 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
6812 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6814 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
6815 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6816 &mgmt_rp.bt_address, true);
6820 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6822 mgmt_pending_remove(cmd);
6824 hci_dev_unlock(hdev);
6827 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
6828 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6830 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
6831 struct mgmt_cp_disable_rssi *cp = data;
6832 struct mgmt_pending_cmd *cmd;
6837 mgmt_rp.status = rp->status;
6838 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6839 mgmt_rp.bt_address = cp->bdaddr;
6840 mgmt_rp.link_type = cp->link_type;
6842 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6843 MGMT_STATUS_SUCCESS, &mgmt_rp,
6844 sizeof(struct mgmt_cc_rsp_enable_rssi));
6846 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
6847 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6849 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6850 &mgmt_rp.bt_address, false);
6854 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6856 mgmt_pending_remove(cmd);
6858 hci_dev_unlock(hdev);
6861 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
6862 void *data, u16 len)
6864 struct mgmt_pending_cmd *cmd;
6865 struct hci_request req;
6866 struct hci_cp_set_enable_rssi cp_en = { 0, };
6869 BT_DBG("Set Disable RSSI.");
6871 cp_en.hci_le_ext_opcode = 0x01;
6872 cp_en.le_enable_cs_Features = 0x00;
6873 cp_en.data[0] = 0x00;
6874 cp_en.data[1] = 0x00;
6875 cp_en.data[2] = 0x00;
6879 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6881 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6882 MGMT_STATUS_FAILED);
6886 if (!lmp_le_capable(hdev)) {
6887 mgmt_pending_remove(cmd);
6888 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6889 MGMT_STATUS_NOT_SUPPORTED);
6893 if (!hdev_is_powered(hdev)) {
6894 BT_DBG("%s", hdev->name);
6895 mgmt_pending_remove(cmd);
6896 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6897 MGMT_STATUS_NOT_POWERED);
6901 hci_req_init(&req, hdev);
6903 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6904 sizeof(struct hci_cp_set_enable_rssi),
6905 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6906 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6908 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6909 err = hci_req_run(&req, set_rssi_disable_complete);
6912 mgmt_pending_remove(cmd);
6913 BT_ERR("Error in requesting hci_req_run");
6918 hci_dev_unlock(hdev);
6922 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
6924 struct hci_cc_rsp_enable_rssi *rp = response;
6925 struct mgmt_pending_cmd *cmd_enable = NULL;
6926 struct mgmt_pending_cmd *cmd_disable = NULL;
6927 struct mgmt_cp_set_enable_rssi *cp_en;
6928 struct mgmt_cp_disable_rssi *cp_dis;
6931 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6932 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6933 hci_dev_unlock(hdev);
6936 BT_DBG("Enable Request");
6939 BT_DBG("Disable Request");
6942 cp_en = cmd_enable->param;
6947 switch (rp->le_ext_opcode) {
6949 BT_DBG("RSSI enabled.. Setting Threshold...");
6950 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
6951 cp_en, sizeof(*cp_en));
6955 BT_DBG("Sending RSSI enable success");
6956 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
6957 cp_en, rp, rp->status);
6961 } else if (cmd_disable) {
6962 cp_dis = cmd_disable->param;
6967 switch (rp->le_ext_opcode) {
6969 BT_DBG("Sending RSSI disable success");
6970 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6971 cp_dis, rp, rp->status);
6976 * Only unset RSSI Threshold values for the Link if
6977 * RSSI is monitored for other BREDR or LE Links
6979 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
6980 BT_DBG("Unset Threshold. Other links being monitored");
6981 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6982 cp_dis, rp, rp->status);
6984 BT_DBG("Unset Threshold. Disabling...");
6985 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
6986 cp_dis, sizeof(*cp_dis));
6993 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
6996 struct mgmt_pending_cmd *cmd;
6998 BT_DBG("status 0x%02x", status);
7002 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7007 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7008 mgmt_status(status));
7010 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7013 mgmt_pending_remove(cmd);
7016 hci_dev_unlock(hdev);
7019 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7020 void *data, u16 len)
7022 struct mgmt_pending_cmd *cmd;
7023 struct hci_request req;
7024 struct mgmt_cp_set_enable_rssi *cp = data;
7025 struct hci_cp_set_enable_rssi cp_en = { 0, };
7028 BT_DBG("Set Enable RSSI.");
7030 cp_en.hci_le_ext_opcode = 0x01;
7031 cp_en.le_enable_cs_Features = 0x04;
7032 cp_en.data[0] = 0x00;
7033 cp_en.data[1] = 0x00;
7034 cp_en.data[2] = 0x00;
7038 if (!lmp_le_capable(hdev)) {
7039 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7040 MGMT_STATUS_NOT_SUPPORTED);
7044 if (!hdev_is_powered(hdev)) {
7045 BT_DBG("%s", hdev->name);
7046 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7047 MGMT_STATUS_NOT_POWERED);
7051 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
7052 BT_DBG("%s", hdev->name);
7053 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7058 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
7061 BT_DBG("%s", hdev->name);
7066 /* If RSSI is already enabled directly set Threshold values */
7067 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
7068 hci_dev_unlock(hdev);
7069 BT_DBG("RSSI Enabled. Directly set Threshold");
7070 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
7074 hci_req_init(&req, hdev);
7076 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7077 sizeof(struct hci_cp_set_enable_rssi),
7078 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7079 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7081 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7082 err = hci_req_run(&req, set_rssi_enable_complete);
7085 mgmt_pending_remove(cmd);
7086 BT_ERR("Error in requesting hci_req_run");
7091 hci_dev_unlock(hdev);
7096 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7098 struct mgmt_pending_cmd *cmd;
7100 BT_DBG("status 0x%02x", status);
7104 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
7108 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7109 MGMT_STATUS_SUCCESS, &status, 1);
7111 mgmt_pending_remove(cmd);
7114 hci_dev_unlock(hdev);
7117 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
7120 struct mgmt_pending_cmd *cmd;
7121 struct hci_request req;
7122 struct mgmt_cp_get_raw_rssi *cp = data;
7123 struct hci_cp_get_raw_rssi hci_cp;
7125 struct hci_conn *conn;
7129 BT_DBG("Get Raw RSSI.");
7133 if (!lmp_le_capable(hdev)) {
7134 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7135 MGMT_STATUS_NOT_SUPPORTED);
7139 if (cp->link_type == 0x01)
7140 dest_type = LE_LINK;
7142 dest_type = ACL_LINK;
7144 /* Get LE/BREDR link handle info */
7145 conn = hci_conn_hash_lookup_ba(hdev,
7146 dest_type, &cp->bt_address);
7148 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7149 MGMT_STATUS_NOT_CONNECTED);
7152 hci_cp.conn_handle = conn->handle;
7154 if (!hdev_is_powered(hdev)) {
7155 BT_DBG("%s", hdev->name);
7156 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7157 MGMT_STATUS_NOT_POWERED);
7161 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
7162 BT_DBG("%s", hdev->name);
7163 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7168 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
7170 BT_DBG("%s", hdev->name);
7175 hci_req_init(&req, hdev);
7177 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
7178 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
7179 err = hci_req_run(&req, get_raw_rssi_complete);
7182 mgmt_pending_remove(cmd);
7183 BT_ERR("Error in requesting hci_req_run");
7187 hci_dev_unlock(hdev);
7192 void mgmt_raw_rssi_response(struct hci_dev *hdev,
7193 struct hci_cc_rp_get_raw_rssi *rp, int success)
7195 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
7196 struct hci_conn *conn;
7198 mgmt_rp.status = rp->status;
7199 mgmt_rp.rssi_dbm = rp->rssi_dbm;
7201 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
7205 bacpy(&mgmt_rp.bt_address, &conn->dst);
7206 if (conn->type == LE_LINK)
7207 mgmt_rp.link_type = 0x01;
7209 mgmt_rp.link_type = 0x00;
7211 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
7212 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
7215 static void set_disable_threshold_complete(struct hci_dev *hdev,
7216 u8 status, u16 opcode)
7218 struct mgmt_pending_cmd *cmd;
7220 BT_DBG("status 0x%02x", status);
7224 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7228 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7229 MGMT_STATUS_SUCCESS, &status, 1);
7231 mgmt_pending_remove(cmd);
7234 hci_dev_unlock(hdev);
7237 /** Removes monitoring for a link*/
7238 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
7239 void *data, u16 len)
7242 struct hci_cp_set_rssi_threshold th = { 0, };
7243 struct mgmt_cp_disable_rssi *cp = data;
7244 struct hci_conn *conn;
7245 struct mgmt_pending_cmd *cmd;
7246 struct hci_request req;
7249 BT_DBG("Set Disable RSSI.");
7253 if (!lmp_le_capable(hdev)) {
7254 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7255 MGMT_STATUS_NOT_SUPPORTED);
7259 /* Get LE/ACL link handle info*/
7260 if (cp->link_type == 0x01)
7261 dest_type = LE_LINK;
7263 dest_type = ACL_LINK;
7265 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
7267 err = mgmt_cmd_complete(sk, hdev->id,
7268 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
7272 th.hci_le_ext_opcode = 0x0B;
7274 th.conn_handle = conn->handle;
7275 th.alert_mask = 0x00;
7277 th.in_range_th = 0x00;
7280 if (!hdev_is_powered(hdev)) {
7281 BT_DBG("%s", hdev->name);
7282 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7287 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
7288 BT_DBG("%s", hdev->name);
7289 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7294 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
7297 BT_DBG("%s", hdev->name);
7302 hci_req_init(&req, hdev);
7304 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7305 err = hci_req_run(&req, set_disable_threshold_complete);
7307 mgmt_pending_remove(cmd);
7308 BT_ERR("Error in requesting hci_req_run");
7313 hci_dev_unlock(hdev);
7318 void mgmt_rssi_alert_evt(struct hci_dev *hdev, struct sk_buff *skb)
7320 struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
7321 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
7322 struct hci_conn *conn;
7324 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
7325 ev->conn_handle, ev->alert_type, ev->rssi_dbm);
7327 conn = hci_conn_hash_lookup_handle(hdev, ev->conn_handle);
7330 BT_ERR("RSSI alert Error: Device not found for handle");
7333 bacpy(&mgmt_ev.bdaddr, &conn->dst);
7335 if (conn->type == LE_LINK)
7336 mgmt_ev.link_type = 0x01;
7338 mgmt_ev.link_type = 0x00;
7340 mgmt_ev.alert_type = ev->alert_type;
7341 mgmt_ev.rssi_dbm = ev->rssi_dbm;
7343 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
7344 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
7348 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
7350 struct mgmt_pending_cmd *cmd;
7354 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7356 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7360 type = hdev->le_discovery.type;
7362 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7363 mgmt_status(status), &type, sizeof(type));
7364 mgmt_pending_remove(cmd);
7369 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
7372 unsigned long timeout = 0;
7374 BT_DBG("status %d", status);
7378 mgmt_start_le_discovery_failed(hdev, status);
7379 hci_dev_unlock(hdev);
7384 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
7385 hci_dev_unlock(hdev);
7387 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
7388 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
7393 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
7396 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
7397 void *data, u16 len)
7399 struct mgmt_cp_start_le_discovery *cp = data;
7400 struct mgmt_pending_cmd *cmd;
7401 struct hci_cp_le_set_scan_param param_cp;
7402 struct hci_cp_le_set_scan_enable enable_cp;
7403 struct hci_request req;
7404 u8 status, own_addr_type;
7407 BT_DBG("%s", hdev->name);
7411 if (!hdev_is_powered(hdev)) {
7412 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7413 MGMT_STATUS_NOT_POWERED);
7417 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
7418 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7423 if (cp->type != DISCOV_TYPE_LE) {
7424 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7425 MGMT_STATUS_INVALID_PARAMS);
7429 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
7435 hdev->le_discovery.type = cp->type;
7437 hci_req_init(&req, hdev);
7439 status = mgmt_le_support(hdev);
7441 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7443 mgmt_pending_remove(cmd);
7447 /* If controller is scanning, it means the background scanning
7448 * is running. Thus, we should temporarily stop it in order to
7449 * set the discovery scanning parameters.
7451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
7452 hci_req_add_le_scan_disable(&req, false);
7454 memset(¶m_cp, 0, sizeof(param_cp));
7456 /* All active scans will be done with either a resolvable
7457 * private address (when privacy feature has been enabled)
7458 * or unresolvable private address.
7460 err = hci_update_random_address(&req, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
7462 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7463 MGMT_STATUS_FAILED);
7464 mgmt_pending_remove(cmd);
7468 param_cp.type = hdev->le_scan_type;
7469 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
7470 param_cp.window = cpu_to_le16(hdev->le_scan_window);
7471 param_cp.own_address_type = own_addr_type;
7472 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
7475 memset(&enable_cp, 0, sizeof(enable_cp));
7476 enable_cp.enable = LE_SCAN_ENABLE;
7477 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
7479 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
7482 err = hci_req_run(&req, start_le_discovery_complete);
7484 mgmt_pending_remove(cmd);
7486 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
7489 hci_dev_unlock(hdev);
7493 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
7495 struct mgmt_pending_cmd *cmd;
7498 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7502 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7503 mgmt_status(status), &hdev->le_discovery.type,
7504 sizeof(hdev->le_discovery.type));
7505 mgmt_pending_remove(cmd);
7510 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
7513 BT_DBG("status %d", status);
7518 mgmt_stop_le_discovery_failed(hdev, status);
7522 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7525 hci_dev_unlock(hdev);
7528 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
7529 void *data, u16 len)
7531 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
7532 struct mgmt_pending_cmd *cmd;
7533 struct hci_request req;
7536 BT_DBG("%s", hdev->name);
7540 if (!hci_le_discovery_active(hdev)) {
7541 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7542 MGMT_STATUS_REJECTED, &mgmt_cp->type,
7543 sizeof(mgmt_cp->type));
7547 if (hdev->le_discovery.type != mgmt_cp->type) {
7548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7549 MGMT_STATUS_INVALID_PARAMS,
7550 &mgmt_cp->type, sizeof(mgmt_cp->type));
7554 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
7560 hci_req_init(&req, hdev);
7562 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
7563 BT_DBG("unknown le discovery state %u",
7564 hdev->le_discovery.state);
7566 mgmt_pending_remove(cmd);
7567 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7568 MGMT_STATUS_FAILED, &mgmt_cp->type,
7569 sizeof(mgmt_cp->type));
7573 cancel_delayed_work(&hdev->le_scan_disable);
7574 hci_req_add_le_scan_disable(&req, false);
7576 err = hci_req_run(&req, stop_le_discovery_complete);
7578 mgmt_pending_remove(cmd);
7580 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
7583 hci_dev_unlock(hdev);
7587 /* Separate LE discovery */
7588 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
7590 struct mgmt_ev_discovering ev;
7591 struct mgmt_pending_cmd *cmd;
7593 BT_DBG("%s le discovering %u", hdev->name, discovering);
7596 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7598 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7601 u8 type = hdev->le_discovery.type;
7603 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
7605 mgmt_pending_remove(cmd);
7608 memset(&ev, 0, sizeof(ev));
7609 ev.type = hdev->le_discovery.type;
7610 ev.discovering = discovering;
7612 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7615 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
7616 void *data, u16 len)
7620 BT_DBG("%s", hdev->name);
7624 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
7626 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
7628 hci_dev_unlock(hdev);
7633 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
7638 if (min > max || min < 6 || max > 3200)
7641 if (to_multiplier < 10 || to_multiplier > 3200)
7644 if (max >= to_multiplier * 8)
7647 max_latency = (to_multiplier * 8 / max) - 1;
7649 if (latency > 499 || latency > max_latency)
7655 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
7658 struct mgmt_cp_le_conn_update *cp = data;
7660 struct hci_conn *conn;
7661 u16 min, max, latency, supervision_timeout;
7664 if (!hdev_is_powered(hdev))
7665 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7666 MGMT_STATUS_NOT_POWERED);
7668 min = __le16_to_cpu(cp->conn_interval_min);
7669 max = __le16_to_cpu(cp->conn_interval_max);
7670 latency = __le16_to_cpu(cp->conn_latency);
7671 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
7673 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
7674 min, max, latency, supervision_timeout);
7676 err = check_le_conn_update_param(min, max, latency,
7677 supervision_timeout);
7680 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7681 MGMT_STATUS_INVALID_PARAMS);
7685 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
7687 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7688 MGMT_STATUS_NOT_CONNECTED);
7689 hci_dev_unlock(hdev);
7693 hci_dev_unlock(hdev);
7695 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
7697 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
7701 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
7704 struct mgmt_cp_set_manufacturer_data *cp;
7705 struct mgmt_pending_cmd *cmd;
7707 BT_DBG("status 0x%02x", status);
7711 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
7718 mgmt_cmd_status(cmd->sk, hdev->id,
7719 MGMT_OP_SET_MANUFACTURER_DATA,
7720 mgmt_status(status));
7722 mgmt_cmd_complete(cmd->sk, hdev->id,
7723 MGMT_OP_SET_MANUFACTURER_DATA, 0,
7726 mgmt_pending_remove(cmd);
7729 hci_dev_unlock(hdev);
7732 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
7733 void *data, u16 len)
7735 struct mgmt_pending_cmd *cmd;
7736 struct hci_request req;
7737 struct mgmt_cp_set_manufacturer_data *cp = data;
7738 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
7742 BT_DBG("%s", hdev->name);
7744 if (!lmp_bredr_capable(hdev))
7745 return mgmt_cmd_status(sk, hdev->id,
7746 MGMT_OP_SET_MANUFACTURER_DATA,
7747 MGMT_STATUS_NOT_SUPPORTED);
7749 if (cp->data[0] == 0 ||
7750 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
7751 return mgmt_cmd_status(sk, hdev->id,
7752 MGMT_OP_SET_MANUFACTURER_DATA,
7753 MGMT_STATUS_INVALID_PARAMS);
7755 if (cp->data[1] != 0xFF)
7756 return mgmt_cmd_status(sk, hdev->id,
7757 MGMT_OP_SET_MANUFACTURER_DATA,
7758 MGMT_STATUS_NOT_SUPPORTED);
7762 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
7763 err = mgmt_cmd_status(sk, hdev->id,
7764 MGMT_OP_SET_MANUFACTURER_DATA,
7769 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
7776 hci_req_init(&req, hdev);
7778 /* if new data is same as previous data then return command
7781 if (hdev->manufacturer_len == cp->data[0] - 1 &&
7782 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
7783 mgmt_pending_remove(cmd);
7784 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
7785 0, cp, sizeof(*cp));
7790 old_len = hdev->manufacturer_len;
7792 memcpy(old_data, hdev->manufacturer_data, old_len);
7794 hdev->manufacturer_len = cp->data[0] - 1;
7795 if (hdev->manufacturer_len > 0)
7796 memcpy(hdev->manufacturer_data, cp->data + 2,
7797 hdev->manufacturer_len);
7799 __hci_req_update_eir(&req);
7801 err = hci_req_run(&req, set_manufacturer_data_complete);
7803 mgmt_pending_remove(cmd);
7808 hci_dev_unlock(hdev);
7813 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
7814 hdev->manufacturer_len = old_len;
7815 if (hdev->manufacturer_len > 0)
7816 memcpy(hdev->manufacturer_data, old_data,
7817 hdev->manufacturer_len);
7818 hci_dev_unlock(hdev);
7822 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
7823 void *data, u16 len)
7825 struct mgmt_cp_le_set_scan_params *cp = data;
7826 __u16 interval, window;
7829 BT_DBG("%s", hdev->name);
7831 if (!lmp_le_capable(hdev))
7832 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7833 MGMT_STATUS_NOT_SUPPORTED);
7835 interval = __le16_to_cpu(cp->interval);
7837 if (interval < 0x0004 || interval > 0x4000)
7838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7839 MGMT_STATUS_INVALID_PARAMS);
7841 window = __le16_to_cpu(cp->window);
7843 if (window < 0x0004 || window > 0x4000)
7844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7845 MGMT_STATUS_INVALID_PARAMS);
7847 if (window > interval)
7848 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7849 MGMT_STATUS_INVALID_PARAMS);
7853 hdev->le_scan_type = cp->type;
7854 hdev->le_scan_interval = interval;
7855 hdev->le_scan_window = window;
7857 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
7860 /* If background scan is running, restart it so new parameters are
7863 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
7864 hdev->discovery.state == DISCOVERY_STOPPED) {
7865 struct hci_request req;
7867 hci_req_init(&req, hdev);
7869 hci_req_add_le_scan_disable(&req, false);
7870 hci_req_add_le_passive_scan(&req);
7872 hci_req_run(&req, NULL);
7875 hci_dev_unlock(hdev);
7880 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
7882 struct mgmt_ev_hardware_error ev;
7884 ev.error_code = err_code;
7885 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
7888 void mgmt_tx_timeout_error(struct hci_dev *hdev)
7890 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
7892 #endif /* TIZEN_BT */
7894 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7896 if (key->initiator != 0x00 && key->initiator != 0x01)
7899 switch (key->addr.type) {
7900 case BDADDR_LE_PUBLIC:
7903 case BDADDR_LE_RANDOM:
7904 /* Two most significant bits shall be set */
7905 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7913 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7914 void *cp_data, u16 len)
7916 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7917 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7918 sizeof(struct mgmt_ltk_info));
7919 u16 key_count, expected_len;
7922 bt_dev_dbg(hdev, "sock %p", sk);
7924 if (!lmp_le_capable(hdev))
7925 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7926 MGMT_STATUS_NOT_SUPPORTED);
7928 key_count = __le16_to_cpu(cp->key_count);
7929 if (key_count > max_key_count) {
7930 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7932 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7933 MGMT_STATUS_INVALID_PARAMS);
7936 expected_len = struct_size(cp, keys, key_count);
7937 if (expected_len != len) {
7938 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7940 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7941 MGMT_STATUS_INVALID_PARAMS);
7944 bt_dev_dbg(hdev, "key_count %u", key_count);
7946 for (i = 0; i < key_count; i++) {
7947 struct mgmt_ltk_info *key = &cp->keys[i];
7949 if (!ltk_is_valid(key))
7950 return mgmt_cmd_status(sk, hdev->id,
7951 MGMT_OP_LOAD_LONG_TERM_KEYS,
7952 MGMT_STATUS_INVALID_PARAMS);
7957 hci_smp_ltks_clear(hdev);
7959 for (i = 0; i < key_count; i++) {
7960 struct mgmt_ltk_info *key = &cp->keys[i];
7961 u8 type, authenticated;
7963 if (hci_is_blocked_key(hdev,
7964 HCI_BLOCKED_KEY_TYPE_LTK,
7966 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7971 switch (key->type) {
7972 case MGMT_LTK_UNAUTHENTICATED:
7973 authenticated = 0x00;
7974 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7976 case MGMT_LTK_AUTHENTICATED:
7977 authenticated = 0x01;
7978 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7980 case MGMT_LTK_P256_UNAUTH:
7981 authenticated = 0x00;
7982 type = SMP_LTK_P256;
7984 case MGMT_LTK_P256_AUTH:
7985 authenticated = 0x01;
7986 type = SMP_LTK_P256;
7988 case MGMT_LTK_P256_DEBUG:
7989 authenticated = 0x00;
7990 type = SMP_LTK_P256_DEBUG;
7996 hci_add_ltk(hdev, &key->addr.bdaddr,
7997 le_addr_type(key->addr.type), type, authenticated,
7998 key->val, key->enc_size, key->ediv, key->rand);
8001 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
8004 hci_dev_unlock(hdev);
8009 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8011 struct hci_conn *conn = cmd->user_data;
8012 struct mgmt_rp_get_conn_info rp;
8015 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8017 if (status == MGMT_STATUS_SUCCESS) {
8018 rp.rssi = conn->rssi;
8019 rp.tx_power = conn->tx_power;
8020 rp.max_tx_power = conn->max_tx_power;
8022 rp.rssi = HCI_RSSI_INVALID;
8023 rp.tx_power = HCI_TX_POWER_INVALID;
8024 rp.max_tx_power = HCI_TX_POWER_INVALID;
8027 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
8028 status, &rp, sizeof(rp));
8030 hci_conn_drop(conn);
8036 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
8039 struct hci_cp_read_rssi *cp;
8040 struct mgmt_pending_cmd *cmd;
8041 struct hci_conn *conn;
8045 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
8049 /* Commands sent in request are either Read RSSI or Read Transmit Power
8050 * Level so we check which one was last sent to retrieve connection
8051 * handle. Both commands have handle as first parameter so it's safe to
8052 * cast data on the same command struct.
8054 * First command sent is always Read RSSI and we fail only if it fails.
8055 * In other case we simply override error to indicate success as we
8056 * already remembered if TX power value is actually valid.
8058 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
8060 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
8061 status = MGMT_STATUS_SUCCESS;
8063 status = mgmt_status(hci_status);
8067 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
8071 handle = __le16_to_cpu(cp->handle);
8072 conn = hci_conn_hash_lookup_handle(hdev, handle);
8074 bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
8079 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
8083 cmd->cmd_complete(cmd, status);
8084 mgmt_pending_remove(cmd);
8087 hci_dev_unlock(hdev);
8090 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
8093 struct mgmt_cp_get_conn_info *cp = data;
8094 struct mgmt_rp_get_conn_info rp;
8095 struct hci_conn *conn;
8096 unsigned long conn_info_age;
8099 bt_dev_dbg(hdev, "sock %p", sk);
8101 memset(&rp, 0, sizeof(rp));
8102 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8103 rp.addr.type = cp->addr.type;
8105 if (!bdaddr_type_is_valid(cp->addr.type))
8106 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8107 MGMT_STATUS_INVALID_PARAMS,
8112 if (!hdev_is_powered(hdev)) {
8113 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8114 MGMT_STATUS_NOT_POWERED, &rp,
8119 if (cp->addr.type == BDADDR_BREDR)
8120 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8123 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8125 if (!conn || conn->state != BT_CONNECTED) {
8126 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8127 MGMT_STATUS_NOT_CONNECTED, &rp,
8132 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
8133 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8134 MGMT_STATUS_BUSY, &rp, sizeof(rp));
8138 /* To avoid client trying to guess when to poll again for information we
8139 * calculate conn info age as random value between min/max set in hdev.
8141 conn_info_age = hdev->conn_info_min_age +
8142 prandom_u32_max(hdev->conn_info_max_age -
8143 hdev->conn_info_min_age);
8145 /* Query controller to refresh cached values if they are too old or were
8148 if (time_after(jiffies, conn->conn_info_timestamp +
8149 msecs_to_jiffies(conn_info_age)) ||
8150 !conn->conn_info_timestamp) {
8151 struct hci_request req;
8152 struct hci_cp_read_tx_power req_txp_cp;
8153 struct hci_cp_read_rssi req_rssi_cp;
8154 struct mgmt_pending_cmd *cmd;
8156 hci_req_init(&req, hdev);
8157 req_rssi_cp.handle = cpu_to_le16(conn->handle);
8158 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
8161 /* For LE links TX power does not change thus we don't need to
8162 * query for it once value is known.
8164 if (!bdaddr_type_is_le(cp->addr.type) ||
8165 conn->tx_power == HCI_TX_POWER_INVALID) {
8166 req_txp_cp.handle = cpu_to_le16(conn->handle);
8167 req_txp_cp.type = 0x00;
8168 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8169 sizeof(req_txp_cp), &req_txp_cp);
8172 /* Max TX power needs to be read only once per connection */
8173 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
8174 req_txp_cp.handle = cpu_to_le16(conn->handle);
8175 req_txp_cp.type = 0x01;
8176 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8177 sizeof(req_txp_cp), &req_txp_cp);
8180 err = hci_req_run(&req, conn_info_refresh_complete);
8184 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
8191 hci_conn_hold(conn);
8192 cmd->user_data = hci_conn_get(conn);
8193 cmd->cmd_complete = conn_info_cmd_complete;
8195 conn->conn_info_timestamp = jiffies;
8197 /* Cache is valid, just reply with values cached in hci_conn */
8198 rp.rssi = conn->rssi;
8199 rp.tx_power = conn->tx_power;
8200 rp.max_tx_power = conn->max_tx_power;
8202 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8203 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8207 hci_dev_unlock(hdev);
8211 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8213 struct hci_conn *conn = cmd->user_data;
8214 struct mgmt_rp_get_clock_info rp;
8215 struct hci_dev *hdev;
8218 memset(&rp, 0, sizeof(rp));
8219 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8224 hdev = hci_dev_get(cmd->index);
8226 rp.local_clock = cpu_to_le32(hdev->clock);
8231 rp.piconet_clock = cpu_to_le32(conn->clock);
8232 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
8236 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
8240 hci_conn_drop(conn);
8247 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8249 struct hci_cp_read_clock *hci_cp;
8250 struct mgmt_pending_cmd *cmd;
8251 struct hci_conn *conn;
8253 bt_dev_dbg(hdev, "status %u", status);
8257 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
8261 if (hci_cp->which) {
8262 u16 handle = __le16_to_cpu(hci_cp->handle);
8263 conn = hci_conn_hash_lookup_handle(hdev, handle);
8268 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
8272 cmd->cmd_complete(cmd, mgmt_status(status));
8273 mgmt_pending_remove(cmd);
8276 hci_dev_unlock(hdev);
8279 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
8282 struct mgmt_cp_get_clock_info *cp = data;
8283 struct mgmt_rp_get_clock_info rp;
8284 struct hci_cp_read_clock hci_cp;
8285 struct mgmt_pending_cmd *cmd;
8286 struct hci_request req;
8287 struct hci_conn *conn;
8290 bt_dev_dbg(hdev, "sock %p", sk);
8292 memset(&rp, 0, sizeof(rp));
8293 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8294 rp.addr.type = cp->addr.type;
8296 if (cp->addr.type != BDADDR_BREDR)
8297 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8298 MGMT_STATUS_INVALID_PARAMS,
8303 if (!hdev_is_powered(hdev)) {
8304 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8305 MGMT_STATUS_NOT_POWERED, &rp,
8310 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8311 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8313 if (!conn || conn->state != BT_CONNECTED) {
8314 err = mgmt_cmd_complete(sk, hdev->id,
8315 MGMT_OP_GET_CLOCK_INFO,
8316 MGMT_STATUS_NOT_CONNECTED,
8324 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
8330 cmd->cmd_complete = clock_info_cmd_complete;
8332 hci_req_init(&req, hdev);
8334 memset(&hci_cp, 0, sizeof(hci_cp));
8335 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8338 hci_conn_hold(conn);
8339 cmd->user_data = hci_conn_get(conn);
8341 hci_cp.handle = cpu_to_le16(conn->handle);
8342 hci_cp.which = 0x01; /* Piconet clock */
8343 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8346 err = hci_req_run(&req, get_clock_info_complete);
8348 mgmt_pending_remove(cmd);
8351 hci_dev_unlock(hdev);
8355 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
8357 struct hci_conn *conn;
8359 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
8363 if (conn->dst_type != type)
8366 if (conn->state != BT_CONNECTED)
8372 /* This function requires the caller holds hdev->lock */
8373 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
8374 u8 addr_type, u8 auto_connect)
8376 struct hci_conn_params *params;
8378 params = hci_conn_params_add(hdev, addr, addr_type);
8382 if (params->auto_connect == auto_connect)
8385 list_del_init(¶ms->action);
8387 switch (auto_connect) {
8388 case HCI_AUTO_CONN_DISABLED:
8389 case HCI_AUTO_CONN_LINK_LOSS:
8390 /* If auto connect is being disabled when we're trying to
8391 * connect to device, keep connecting.
8393 if (params->explicit_connect)
8394 list_add(¶ms->action, &hdev->pend_le_conns);
8396 case HCI_AUTO_CONN_REPORT:
8397 if (params->explicit_connect)
8398 list_add(¶ms->action, &hdev->pend_le_conns);
8400 list_add(¶ms->action, &hdev->pend_le_reports);
8402 case HCI_AUTO_CONN_DIRECT:
8403 case HCI_AUTO_CONN_ALWAYS:
8404 if (!is_connected(hdev, addr, addr_type))
8405 list_add(¶ms->action, &hdev->pend_le_conns);
8409 params->auto_connect = auto_connect;
8411 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
8412 addr, addr_type, auto_connect);
8417 static void device_added(struct sock *sk, struct hci_dev *hdev,
8418 bdaddr_t *bdaddr, u8 type, u8 action)
8420 struct mgmt_ev_device_added ev;
8422 bacpy(&ev.addr.bdaddr, bdaddr);
8423 ev.addr.type = type;
8426 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
8429 static int add_device(struct sock *sk, struct hci_dev *hdev,
8430 void *data, u16 len)
8432 struct mgmt_cp_add_device *cp = data;
8433 u8 auto_conn, addr_type;
8434 struct hci_conn_params *params;
8436 u32 current_flags = 0;
8438 bt_dev_dbg(hdev, "sock %p", sk);
8440 if (!bdaddr_type_is_valid(cp->addr.type) ||
8441 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
8442 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8443 MGMT_STATUS_INVALID_PARAMS,
8444 &cp->addr, sizeof(cp->addr));
8446 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
8447 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8448 MGMT_STATUS_INVALID_PARAMS,
8449 &cp->addr, sizeof(cp->addr));
8453 if (cp->addr.type == BDADDR_BREDR) {
8454 /* Only incoming connections action is supported for now */
8455 if (cp->action != 0x01) {
8456 err = mgmt_cmd_complete(sk, hdev->id,
8458 MGMT_STATUS_INVALID_PARAMS,
8459 &cp->addr, sizeof(cp->addr));
8463 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
8469 hci_req_update_scan(hdev);
8474 addr_type = le_addr_type(cp->addr.type);
8476 if (cp->action == 0x02)
8477 auto_conn = HCI_AUTO_CONN_ALWAYS;
8478 else if (cp->action == 0x01)
8479 auto_conn = HCI_AUTO_CONN_DIRECT;
8481 auto_conn = HCI_AUTO_CONN_REPORT;
8483 /* Kernel internally uses conn_params with resolvable private
8484 * address, but Add Device allows only identity addresses.
8485 * Make sure it is enforced before calling
8486 * hci_conn_params_lookup.
8488 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8489 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8490 MGMT_STATUS_INVALID_PARAMS,
8491 &cp->addr, sizeof(cp->addr));
8495 /* If the connection parameters don't exist for this device,
8496 * they will be created and configured with defaults.
8498 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
8500 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8501 MGMT_STATUS_FAILED, &cp->addr,
8505 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8508 current_flags = params->current_flags;
8511 hci_update_background_scan(hdev);
8514 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
8515 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
8516 SUPPORTED_DEVICE_FLAGS(), current_flags);
8518 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8519 MGMT_STATUS_SUCCESS, &cp->addr,
8523 hci_dev_unlock(hdev);
8527 static void device_removed(struct sock *sk, struct hci_dev *hdev,
8528 bdaddr_t *bdaddr, u8 type)
8530 struct mgmt_ev_device_removed ev;
8532 bacpy(&ev.addr.bdaddr, bdaddr);
8533 ev.addr.type = type;
8535 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
8538 static int remove_device(struct sock *sk, struct hci_dev *hdev,
8539 void *data, u16 len)
8541 struct mgmt_cp_remove_device *cp = data;
8544 bt_dev_dbg(hdev, "sock %p", sk);
8548 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8549 struct hci_conn_params *params;
8552 if (!bdaddr_type_is_valid(cp->addr.type)) {
8553 err = mgmt_cmd_complete(sk, hdev->id,
8554 MGMT_OP_REMOVE_DEVICE,
8555 MGMT_STATUS_INVALID_PARAMS,
8556 &cp->addr, sizeof(cp->addr));
8560 if (cp->addr.type == BDADDR_BREDR) {
8561 err = hci_bdaddr_list_del(&hdev->accept_list,
8565 err = mgmt_cmd_complete(sk, hdev->id,
8566 MGMT_OP_REMOVE_DEVICE,
8567 MGMT_STATUS_INVALID_PARAMS,
8573 hci_req_update_scan(hdev);
8575 device_removed(sk, hdev, &cp->addr.bdaddr,
8580 addr_type = le_addr_type(cp->addr.type);
8582 /* Kernel internally uses conn_params with resolvable private
8583 * address, but Remove Device allows only identity addresses.
8584 * Make sure it is enforced before calling
8585 * hci_conn_params_lookup.
8587 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8588 err = mgmt_cmd_complete(sk, hdev->id,
8589 MGMT_OP_REMOVE_DEVICE,
8590 MGMT_STATUS_INVALID_PARAMS,
8591 &cp->addr, sizeof(cp->addr));
8595 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8598 err = mgmt_cmd_complete(sk, hdev->id,
8599 MGMT_OP_REMOVE_DEVICE,
8600 MGMT_STATUS_INVALID_PARAMS,
8601 &cp->addr, sizeof(cp->addr));
8605 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
8606 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
8607 err = mgmt_cmd_complete(sk, hdev->id,
8608 MGMT_OP_REMOVE_DEVICE,
8609 MGMT_STATUS_INVALID_PARAMS,
8610 &cp->addr, sizeof(cp->addr));
8614 list_del(¶ms->action);
8615 list_del(¶ms->list);
8617 hci_update_background_scan(hdev);
8619 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
8621 struct hci_conn_params *p, *tmp;
8622 struct bdaddr_list *b, *btmp;
8624 if (cp->addr.type) {
8625 err = mgmt_cmd_complete(sk, hdev->id,
8626 MGMT_OP_REMOVE_DEVICE,
8627 MGMT_STATUS_INVALID_PARAMS,
8628 &cp->addr, sizeof(cp->addr));
8632 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
8633 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
8638 hci_req_update_scan(hdev);
8640 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
8641 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
8643 device_removed(sk, hdev, &p->addr, p->addr_type);
8644 if (p->explicit_connect) {
8645 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
8648 list_del(&p->action);
8653 bt_dev_dbg(hdev, "All LE connection parameters were removed");
8655 hci_update_background_scan(hdev);
8659 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
8660 MGMT_STATUS_SUCCESS, &cp->addr,
8663 hci_dev_unlock(hdev);
8667 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
8670 struct mgmt_cp_load_conn_param *cp = data;
8671 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
8672 sizeof(struct mgmt_conn_param));
8673 u16 param_count, expected_len;
8676 if (!lmp_le_capable(hdev))
8677 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8678 MGMT_STATUS_NOT_SUPPORTED);
8680 param_count = __le16_to_cpu(cp->param_count);
8681 if (param_count > max_param_count) {
8682 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
8684 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8685 MGMT_STATUS_INVALID_PARAMS);
8688 expected_len = struct_size(cp, params, param_count);
8689 if (expected_len != len) {
8690 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
8692 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8693 MGMT_STATUS_INVALID_PARAMS);
8696 bt_dev_dbg(hdev, "param_count %u", param_count);
8700 hci_conn_params_clear_disabled(hdev);
8702 for (i = 0; i < param_count; i++) {
8703 struct mgmt_conn_param *param = &cp->params[i];
8704 struct hci_conn_params *hci_param;
8705 u16 min, max, latency, timeout;
8708 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
8711 if (param->addr.type == BDADDR_LE_PUBLIC) {
8712 addr_type = ADDR_LE_DEV_PUBLIC;
8713 } else if (param->addr.type == BDADDR_LE_RANDOM) {
8714 addr_type = ADDR_LE_DEV_RANDOM;
8716 bt_dev_err(hdev, "ignoring invalid connection parameters");
8720 min = le16_to_cpu(param->min_interval);
8721 max = le16_to_cpu(param->max_interval);
8722 latency = le16_to_cpu(param->latency);
8723 timeout = le16_to_cpu(param->timeout);
8725 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
8726 min, max, latency, timeout);
8728 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8729 bt_dev_err(hdev, "ignoring invalid connection parameters");
8733 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
8736 bt_dev_err(hdev, "failed to add connection parameters");
8740 hci_param->conn_min_interval = min;
8741 hci_param->conn_max_interval = max;
8742 hci_param->conn_latency = latency;
8743 hci_param->supervision_timeout = timeout;
8746 hci_dev_unlock(hdev);
8748 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8752 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8753 void *data, u16 len)
8755 struct mgmt_cp_set_external_config *cp = data;
8759 bt_dev_dbg(hdev, "sock %p", sk);
8761 if (hdev_is_powered(hdev))
8762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8763 MGMT_STATUS_REJECTED);
8765 if (cp->config != 0x00 && cp->config != 0x01)
8766 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8767 MGMT_STATUS_INVALID_PARAMS);
8769 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
8770 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8771 MGMT_STATUS_NOT_SUPPORTED);
8776 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8778 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8780 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8787 err = new_options(hdev, sk);
8789 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8790 mgmt_index_removed(hdev);
8792 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8793 hci_dev_set_flag(hdev, HCI_CONFIG);
8794 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8796 queue_work(hdev->req_workqueue, &hdev->power_on);
8798 set_bit(HCI_RAW, &hdev->flags);
8799 mgmt_index_added(hdev);
8804 hci_dev_unlock(hdev);
8808 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8809 void *data, u16 len)
8811 struct mgmt_cp_set_public_address *cp = data;
8815 bt_dev_dbg(hdev, "sock %p", sk);
8817 if (hdev_is_powered(hdev))
8818 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8819 MGMT_STATUS_REJECTED);
8821 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8823 MGMT_STATUS_INVALID_PARAMS);
8825 if (!hdev->set_bdaddr)
8826 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8827 MGMT_STATUS_NOT_SUPPORTED);
8831 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8832 bacpy(&hdev->public_addr, &cp->bdaddr);
8834 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8841 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8842 err = new_options(hdev, sk);
8844 if (is_configured(hdev)) {
8845 mgmt_index_removed(hdev);
8847 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8849 hci_dev_set_flag(hdev, HCI_CONFIG);
8850 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8852 queue_work(hdev->req_workqueue, &hdev->power_on);
8856 hci_dev_unlock(hdev);
8861 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
8865 struct mgmt_ev_device_name_update *ev = (void *)buf;
8871 bacpy(&ev->addr.bdaddr, bdaddr);
8872 ev->addr.type = BDADDR_BREDR;
8874 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8877 ev->eir_len = cpu_to_le16(eir_len);
8879 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
8880 sizeof(*ev) + eir_len, NULL);
8883 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8884 u8 link_type, u8 addr_type, u8 status)
8886 struct mgmt_ev_conn_update_failed ev;
8888 bacpy(&ev.addr.bdaddr, bdaddr);
8889 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8892 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
8893 &ev, sizeof(ev), NULL);
8896 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
8897 u8 link_type, u8 addr_type, u16 conn_interval,
8898 u16 conn_latency, u16 supervision_timeout)
8900 struct mgmt_ev_conn_updated ev;
8902 bacpy(&ev.addr.bdaddr, bdaddr);
8903 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8904 ev.conn_interval = cpu_to_le16(conn_interval);
8905 ev.conn_latency = cpu_to_le16(conn_latency);
8906 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
8908 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
8909 &ev, sizeof(ev), NULL);
8913 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
8914 u16 opcode, struct sk_buff *skb)
8916 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8917 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8918 u8 *h192, *r192, *h256, *r256;
8919 struct mgmt_pending_cmd *cmd;
8923 bt_dev_dbg(hdev, "status %u", status);
8925 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
8929 mgmt_cp = cmd->param;
8932 status = mgmt_status(status);
8939 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
8940 struct hci_rp_read_local_oob_data *rp;
8942 if (skb->len != sizeof(*rp)) {
8943 status = MGMT_STATUS_FAILED;
8946 status = MGMT_STATUS_SUCCESS;
8947 rp = (void *)skb->data;
8949 eir_len = 5 + 18 + 18;
8956 struct hci_rp_read_local_oob_ext_data *rp;
8958 if (skb->len != sizeof(*rp)) {
8959 status = MGMT_STATUS_FAILED;
8962 status = MGMT_STATUS_SUCCESS;
8963 rp = (void *)skb->data;
8965 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8966 eir_len = 5 + 18 + 18;
8970 eir_len = 5 + 18 + 18 + 18 + 18;
8980 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8987 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8988 hdev->dev_class, 3);
8991 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8992 EIR_SSP_HASH_C192, h192, 16);
8993 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8994 EIR_SSP_RAND_R192, r192, 16);
8998 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8999 EIR_SSP_HASH_C256, h256, 16);
9000 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9001 EIR_SSP_RAND_R256, r256, 16);
9005 mgmt_rp->type = mgmt_cp->type;
9006 mgmt_rp->eir_len = cpu_to_le16(eir_len);
9008 err = mgmt_cmd_complete(cmd->sk, hdev->id,
9009 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
9010 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
9011 if (err < 0 || status)
9014 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
9016 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9017 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
9018 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
9021 mgmt_pending_remove(cmd);
9024 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
9025 struct mgmt_cp_read_local_oob_ext_data *cp)
9027 struct mgmt_pending_cmd *cmd;
9028 struct hci_request req;
9031 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
9036 hci_req_init(&req, hdev);
9038 if (bredr_sc_enabled(hdev))
9039 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
9041 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
9043 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
9045 mgmt_pending_remove(cmd);
9052 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
9053 void *data, u16 data_len)
9055 struct mgmt_cp_read_local_oob_ext_data *cp = data;
9056 struct mgmt_rp_read_local_oob_ext_data *rp;
9059 u8 status, flags, role, addr[7], hash[16], rand[16];
9062 bt_dev_dbg(hdev, "sock %p", sk);
9064 if (hdev_is_powered(hdev)) {
9066 case BIT(BDADDR_BREDR):
9067 status = mgmt_bredr_support(hdev);
9073 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9074 status = mgmt_le_support(hdev);
9078 eir_len = 9 + 3 + 18 + 18 + 3;
9081 status = MGMT_STATUS_INVALID_PARAMS;
9086 status = MGMT_STATUS_NOT_POWERED;
9090 rp_len = sizeof(*rp) + eir_len;
9091 rp = kmalloc(rp_len, GFP_ATOMIC);
9102 case BIT(BDADDR_BREDR):
9103 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9104 err = read_local_ssp_oob_req(hdev, sk, cp);
9105 hci_dev_unlock(hdev);
9109 status = MGMT_STATUS_FAILED;
9112 eir_len = eir_append_data(rp->eir, eir_len,
9114 hdev->dev_class, 3);
9117 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9118 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
9119 smp_generate_oob(hdev, hash, rand) < 0) {
9120 hci_dev_unlock(hdev);
9121 status = MGMT_STATUS_FAILED;
9125 /* This should return the active RPA, but since the RPA
9126 * is only programmed on demand, it is really hard to fill
9127 * this in at the moment. For now disallow retrieving
9128 * local out-of-band data when privacy is in use.
9130 * Returning the identity address will not help here since
9131 * pairing happens before the identity resolving key is
9132 * known and thus the connection establishment happens
9133 * based on the RPA and not the identity address.
9135 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
9136 hci_dev_unlock(hdev);
9137 status = MGMT_STATUS_REJECTED;
9141 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
9142 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
9143 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
9144 bacmp(&hdev->static_addr, BDADDR_ANY))) {
9145 memcpy(addr, &hdev->static_addr, 6);
9148 memcpy(addr, &hdev->bdaddr, 6);
9152 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
9153 addr, sizeof(addr));
9155 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
9160 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
9161 &role, sizeof(role));
9163 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
9164 eir_len = eir_append_data(rp->eir, eir_len,
9166 hash, sizeof(hash));
9168 eir_len = eir_append_data(rp->eir, eir_len,
9170 rand, sizeof(rand));
9173 flags = mgmt_get_adv_discov_flags(hdev);
9175 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
9176 flags |= LE_AD_NO_BREDR;
9178 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
9179 &flags, sizeof(flags));
9183 hci_dev_unlock(hdev);
9185 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
9187 status = MGMT_STATUS_SUCCESS;
9190 rp->type = cp->type;
9191 rp->eir_len = cpu_to_le16(eir_len);
9193 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
9194 status, rp, sizeof(*rp) + eir_len);
9195 if (err < 0 || status)
9198 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9199 rp, sizeof(*rp) + eir_len,
9200 HCI_MGMT_OOB_DATA_EVENTS, sk);
9208 static u32 get_supported_adv_flags(struct hci_dev *hdev)
9212 flags |= MGMT_ADV_FLAG_CONNECTABLE;
9213 flags |= MGMT_ADV_FLAG_DISCOV;
9214 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
9215 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
9216 flags |= MGMT_ADV_FLAG_APPEARANCE;
9217 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
9218 flags |= MGMT_ADV_PARAM_DURATION;
9219 flags |= MGMT_ADV_PARAM_TIMEOUT;
9220 flags |= MGMT_ADV_PARAM_INTERVALS;
9221 flags |= MGMT_ADV_PARAM_TX_POWER;
9222 flags |= MGMT_ADV_PARAM_SCAN_RSP;
9224 /* In extended adv TX_POWER returned from Set Adv Param
9225 * will be always valid.
9227 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
9228 ext_adv_capable(hdev))
9229 flags |= MGMT_ADV_FLAG_TX_POWER;
9231 if (ext_adv_capable(hdev)) {
9232 flags |= MGMT_ADV_FLAG_SEC_1M;
9233 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
9234 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
9236 if (hdev->le_features[1] & HCI_LE_PHY_2M)
9237 flags |= MGMT_ADV_FLAG_SEC_2M;
9239 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
9240 flags |= MGMT_ADV_FLAG_SEC_CODED;
9246 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
9247 void *data, u16 data_len)
9249 struct mgmt_rp_read_adv_features *rp;
9252 struct adv_info *adv_instance;
9253 u32 supported_flags;
9256 bt_dev_dbg(hdev, "sock %p", sk);
9258 if (!lmp_le_capable(hdev))
9259 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9260 MGMT_STATUS_REJECTED);
9262 /* Enabling the experimental LL Privay support disables support for
9265 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9266 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9267 MGMT_STATUS_NOT_SUPPORTED);
9271 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
9272 rp = kmalloc(rp_len, GFP_ATOMIC);
9274 hci_dev_unlock(hdev);
9278 supported_flags = get_supported_adv_flags(hdev);
9280 rp->supported_flags = cpu_to_le32(supported_flags);
9281 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
9282 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
9283 rp->max_instances = hdev->le_num_of_adv_sets;
9284 rp->num_instances = hdev->adv_instance_cnt;
9286 instance = rp->instance;
9287 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
9288 *instance = adv_instance->instance;
9292 hci_dev_unlock(hdev);
9294 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9295 MGMT_STATUS_SUCCESS, rp, rp_len);
9302 static u8 calculate_name_len(struct hci_dev *hdev)
9304 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
9306 return append_local_name(hdev, buf, 0);
9309 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
9312 u8 max_len = HCI_MAX_AD_LENGTH;
9315 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
9316 MGMT_ADV_FLAG_LIMITED_DISCOV |
9317 MGMT_ADV_FLAG_MANAGED_FLAGS))
9320 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
9323 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
9324 max_len -= calculate_name_len(hdev);
9326 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
9333 static bool flags_managed(u32 adv_flags)
9335 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
9336 MGMT_ADV_FLAG_LIMITED_DISCOV |
9337 MGMT_ADV_FLAG_MANAGED_FLAGS);
9340 static bool tx_power_managed(u32 adv_flags)
9342 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
9345 static bool name_managed(u32 adv_flags)
9347 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
9350 static bool appearance_managed(u32 adv_flags)
9352 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
9355 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
9356 u8 len, bool is_adv_data)
9361 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
9366 /* Make sure that the data is correctly formatted. */
9367 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
9373 if (data[i + 1] == EIR_FLAGS &&
9374 (!is_adv_data || flags_managed(adv_flags)))
9377 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
9380 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
9383 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
9386 if (data[i + 1] == EIR_APPEARANCE &&
9387 appearance_managed(adv_flags))
9390 /* If the current field length would exceed the total data
9391 * length, then it's invalid.
9393 if (i + cur_len >= len)
9400 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
9402 u32 supported_flags, phy_flags;
9404 /* The current implementation only supports a subset of the specified
9405 * flags. Also need to check mutual exclusiveness of sec flags.
9407 supported_flags = get_supported_adv_flags(hdev);
9408 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
9409 if (adv_flags & ~supported_flags ||
9410 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
9416 static bool adv_busy(struct hci_dev *hdev)
9418 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
9419 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
9420 pending_find(MGMT_OP_SET_LE, hdev) ||
9421 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
9422 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
9425 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
9428 struct mgmt_pending_cmd *cmd;
9429 struct mgmt_cp_add_advertising *cp;
9430 struct mgmt_rp_add_advertising rp;
9431 struct adv_info *adv_instance, *n;
9434 bt_dev_dbg(hdev, "status %u", status);
9438 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
9440 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
9442 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
9443 if (!adv_instance->pending)
9447 adv_instance->pending = false;
9451 instance = adv_instance->instance;
9453 if (hdev->cur_adv_instance == instance)
9454 cancel_adv_timeout(hdev);
9456 hci_remove_adv_instance(hdev, instance);
9457 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
9464 rp.instance = cp->instance;
9467 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9468 mgmt_status(status));
9470 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9471 mgmt_status(status), &rp, sizeof(rp));
9473 mgmt_pending_remove(cmd);
9476 hci_dev_unlock(hdev);
9479 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
9480 void *data, u16 data_len)
9482 struct mgmt_cp_add_advertising *cp = data;
9483 struct mgmt_rp_add_advertising rp;
9486 u16 timeout, duration;
9487 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
9488 u8 schedule_instance = 0;
9489 struct adv_info *next_instance;
9491 struct mgmt_pending_cmd *cmd;
9492 struct hci_request req;
9494 bt_dev_dbg(hdev, "sock %p", sk);
9496 status = mgmt_le_support(hdev);
9498 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9501 /* Enabling the experimental LL Privay support disables support for
9504 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9505 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9506 MGMT_STATUS_NOT_SUPPORTED);
9508 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9509 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9510 MGMT_STATUS_INVALID_PARAMS);
9512 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
9513 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9514 MGMT_STATUS_INVALID_PARAMS);
9516 flags = __le32_to_cpu(cp->flags);
9517 timeout = __le16_to_cpu(cp->timeout);
9518 duration = __le16_to_cpu(cp->duration);
9520 if (!requested_adv_flags_are_valid(hdev, flags))
9521 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9522 MGMT_STATUS_INVALID_PARAMS);
9526 if (timeout && !hdev_is_powered(hdev)) {
9527 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9528 MGMT_STATUS_REJECTED);
9532 if (adv_busy(hdev)) {
9533 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9538 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
9539 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
9540 cp->scan_rsp_len, false)) {
9541 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9542 MGMT_STATUS_INVALID_PARAMS);
9546 err = hci_add_adv_instance(hdev, cp->instance, flags,
9547 cp->adv_data_len, cp->data,
9549 cp->data + cp->adv_data_len,
9551 HCI_ADV_TX_POWER_NO_PREFERENCE,
9552 hdev->le_adv_min_interval,
9553 hdev->le_adv_max_interval);
9555 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9556 MGMT_STATUS_FAILED);
9560 /* Only trigger an advertising added event if a new instance was
9563 if (hdev->adv_instance_cnt > prev_instance_cnt)
9564 mgmt_advertising_added(sk, hdev, cp->instance);
9566 if (hdev->cur_adv_instance == cp->instance) {
9567 /* If the currently advertised instance is being changed then
9568 * cancel the current advertising and schedule the next
9569 * instance. If there is only one instance then the overridden
9570 * advertising data will be visible right away.
9572 cancel_adv_timeout(hdev);
9574 next_instance = hci_get_next_instance(hdev, cp->instance);
9576 schedule_instance = next_instance->instance;
9577 } else if (!hdev->adv_instance_timeout) {
9578 /* Immediately advertise the new instance if no other
9579 * instance is currently being advertised.
9581 schedule_instance = cp->instance;
9584 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
9585 * there is no instance to be advertised then we have no HCI
9586 * communication to make. Simply return.
9588 if (!hdev_is_powered(hdev) ||
9589 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
9590 !schedule_instance) {
9591 rp.instance = cp->instance;
9592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9593 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9597 /* We're good to go, update advertising data, parameters, and start
9600 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
9607 hci_req_init(&req, hdev);
9609 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
9612 err = hci_req_run(&req, add_advertising_complete);
9615 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9616 MGMT_STATUS_FAILED);
9617 mgmt_pending_remove(cmd);
9621 hci_dev_unlock(hdev);
9626 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
9629 struct mgmt_pending_cmd *cmd;
9630 struct mgmt_cp_add_ext_adv_params *cp;
9631 struct mgmt_rp_add_ext_adv_params rp;
9632 struct adv_info *adv_instance;
9635 BT_DBG("%s", hdev->name);
9639 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
9644 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9648 rp.instance = cp->instance;
9649 rp.tx_power = adv_instance->tx_power;
9651 /* While we're at it, inform userspace of the available space for this
9652 * advertisement, given the flags that will be used.
9654 flags = __le32_to_cpu(cp->flags);
9655 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9656 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9659 /* If this advertisement was previously advertising and we
9660 * failed to update it, we signal that it has been removed and
9661 * delete its structure
9663 if (!adv_instance->pending)
9664 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
9666 hci_remove_adv_instance(hdev, cp->instance);
9668 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9669 mgmt_status(status));
9672 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9673 mgmt_status(status), &rp, sizeof(rp));
9678 mgmt_pending_remove(cmd);
9680 hci_dev_unlock(hdev);
9683 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
9684 void *data, u16 data_len)
9686 struct mgmt_cp_add_ext_adv_params *cp = data;
9687 struct mgmt_rp_add_ext_adv_params rp;
9688 struct mgmt_pending_cmd *cmd = NULL;
9689 struct adv_info *adv_instance;
9690 struct hci_request req;
9691 u32 flags, min_interval, max_interval;
9692 u16 timeout, duration;
9697 BT_DBG("%s", hdev->name);
9699 status = mgmt_le_support(hdev);
9701 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9704 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9705 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9706 MGMT_STATUS_INVALID_PARAMS);
9708 /* The purpose of breaking add_advertising into two separate MGMT calls
9709 * for params and data is to allow more parameters to be added to this
9710 * structure in the future. For this reason, we verify that we have the
9711 * bare minimum structure we know of when the interface was defined. Any
9712 * extra parameters we don't know about will be ignored in this request.
9714 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
9715 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9716 MGMT_STATUS_INVALID_PARAMS);
9718 flags = __le32_to_cpu(cp->flags);
9720 if (!requested_adv_flags_are_valid(hdev, flags))
9721 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9722 MGMT_STATUS_INVALID_PARAMS);
9726 /* In new interface, we require that we are powered to register */
9727 if (!hdev_is_powered(hdev)) {
9728 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9729 MGMT_STATUS_REJECTED);
9733 if (adv_busy(hdev)) {
9734 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9739 /* Parse defined parameters from request, use defaults otherwise */
9740 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
9741 __le16_to_cpu(cp->timeout) : 0;
9743 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
9744 __le16_to_cpu(cp->duration) :
9745 hdev->def_multi_adv_rotation_duration;
9747 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9748 __le32_to_cpu(cp->min_interval) :
9749 hdev->le_adv_min_interval;
9751 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9752 __le32_to_cpu(cp->max_interval) :
9753 hdev->le_adv_max_interval;
9755 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
9757 HCI_ADV_TX_POWER_NO_PREFERENCE;
9759 /* Create advertising instance with no advertising or response data */
9760 err = hci_add_adv_instance(hdev, cp->instance, flags,
9761 0, NULL, 0, NULL, timeout, duration,
9762 tx_power, min_interval, max_interval);
9765 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9766 MGMT_STATUS_FAILED);
9770 /* Submit request for advertising params if ext adv available */
9771 if (ext_adv_capable(hdev)) {
9772 hci_req_init(&req, hdev);
9773 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9775 /* Updating parameters of an active instance will return a
9776 * Command Disallowed error, so we must first disable the
9777 * instance if it is active.
9779 if (!adv_instance->pending)
9780 __hci_req_disable_ext_adv_instance(&req, cp->instance);
9782 __hci_req_setup_ext_adv_instance(&req, cp->instance);
9784 err = hci_req_run(&req, add_ext_adv_params_complete);
9787 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
9788 hdev, data, data_len);
9791 hci_remove_adv_instance(hdev, cp->instance);
9796 rp.instance = cp->instance;
9797 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9798 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9799 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9800 err = mgmt_cmd_complete(sk, hdev->id,
9801 MGMT_OP_ADD_EXT_ADV_PARAMS,
9802 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9806 hci_dev_unlock(hdev);
9811 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9814 struct mgmt_cp_add_ext_adv_data *cp = data;
9815 struct mgmt_rp_add_ext_adv_data rp;
9816 u8 schedule_instance = 0;
9817 struct adv_info *next_instance;
9818 struct adv_info *adv_instance;
9820 struct mgmt_pending_cmd *cmd;
9821 struct hci_request req;
9823 BT_DBG("%s", hdev->name);
9827 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9829 if (!adv_instance) {
9830 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9831 MGMT_STATUS_INVALID_PARAMS);
9835 /* In new interface, we require that we are powered to register */
9836 if (!hdev_is_powered(hdev)) {
9837 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9838 MGMT_STATUS_REJECTED);
9839 goto clear_new_instance;
9842 if (adv_busy(hdev)) {
9843 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9845 goto clear_new_instance;
9848 /* Validate new data */
9849 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9850 cp->adv_data_len, true) ||
9851 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9852 cp->adv_data_len, cp->scan_rsp_len, false)) {
9853 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9854 MGMT_STATUS_INVALID_PARAMS);
9855 goto clear_new_instance;
9858 /* Set the data in the advertising instance */
9859 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9860 cp->data, cp->scan_rsp_len,
9861 cp->data + cp->adv_data_len);
9863 /* We're good to go, update advertising data, parameters, and start
9867 hci_req_init(&req, hdev);
9869 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
9871 if (ext_adv_capable(hdev)) {
9872 __hci_req_update_adv_data(&req, cp->instance);
9873 __hci_req_update_scan_rsp_data(&req, cp->instance);
9874 __hci_req_enable_ext_advertising(&req, cp->instance);
9877 /* If using software rotation, determine next instance to use */
9879 if (hdev->cur_adv_instance == cp->instance) {
9880 /* If the currently advertised instance is being changed
9881 * then cancel the current advertising and schedule the
9882 * next instance. If there is only one instance then the
9883 * overridden advertising data will be visible right
9886 cancel_adv_timeout(hdev);
9888 next_instance = hci_get_next_instance(hdev,
9891 schedule_instance = next_instance->instance;
9892 } else if (!hdev->adv_instance_timeout) {
9893 /* Immediately advertise the new instance if no other
9894 * instance is currently being advertised.
9896 schedule_instance = cp->instance;
9899 /* If the HCI_ADVERTISING flag is set or there is no instance to
9900 * be advertised then we have no HCI communication to make.
9903 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
9904 !schedule_instance) {
9905 if (adv_instance->pending) {
9906 mgmt_advertising_added(sk, hdev, cp->instance);
9907 adv_instance->pending = false;
9909 rp.instance = cp->instance;
9910 err = mgmt_cmd_complete(sk, hdev->id,
9911 MGMT_OP_ADD_EXT_ADV_DATA,
9912 MGMT_STATUS_SUCCESS, &rp,
9917 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
9921 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9925 goto clear_new_instance;
9929 err = hci_req_run(&req, add_advertising_complete);
9932 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9933 MGMT_STATUS_FAILED);
9934 mgmt_pending_remove(cmd);
9935 goto clear_new_instance;
9938 /* We were successful in updating data, so trigger advertising_added
9939 * event if this is an instance that wasn't previously advertising. If
9940 * a failure occurs in the requests we initiated, we will remove the
9941 * instance again in add_advertising_complete
9943 if (adv_instance->pending)
9944 mgmt_advertising_added(sk, hdev, cp->instance);
9949 hci_remove_adv_instance(hdev, cp->instance);
9952 hci_dev_unlock(hdev);
9957 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
9960 struct mgmt_pending_cmd *cmd;
9961 struct mgmt_cp_remove_advertising *cp;
9962 struct mgmt_rp_remove_advertising rp;
9964 bt_dev_dbg(hdev, "status %u", status);
9968 /* A failure status here only means that we failed to disable
9969 * advertising. Otherwise, the advertising instance has been removed,
9970 * so report success.
9972 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
9977 rp.instance = cp->instance;
9979 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
9981 mgmt_pending_remove(cmd);
9984 hci_dev_unlock(hdev);
9987 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9988 void *data, u16 data_len)
9990 struct mgmt_cp_remove_advertising *cp = data;
9991 struct mgmt_rp_remove_advertising rp;
9992 struct mgmt_pending_cmd *cmd;
9993 struct hci_request req;
9996 bt_dev_dbg(hdev, "sock %p", sk);
9998 /* Enabling the experimental LL Privay support disables support for
10001 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
10002 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
10003 MGMT_STATUS_NOT_SUPPORTED);
10005 hci_dev_lock(hdev);
10007 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
10008 err = mgmt_cmd_status(sk, hdev->id,
10009 MGMT_OP_REMOVE_ADVERTISING,
10010 MGMT_STATUS_INVALID_PARAMS);
10014 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
10015 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
10016 pending_find(MGMT_OP_SET_LE, hdev)) {
10017 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10022 if (list_empty(&hdev->adv_instances)) {
10023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10024 MGMT_STATUS_INVALID_PARAMS);
10028 hci_req_init(&req, hdev);
10030 /* If we use extended advertising, instance is disabled and removed */
10031 if (ext_adv_capable(hdev)) {
10032 __hci_req_disable_ext_adv_instance(&req, cp->instance);
10033 __hci_req_remove_ext_adv_instance(&req, cp->instance);
10036 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
10038 if (list_empty(&hdev->adv_instances))
10039 __hci_req_disable_advertising(&req);
10041 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
10042 * flag is set or the device isn't powered then we have no HCI
10043 * communication to make. Simply return.
10045 if (skb_queue_empty(&req.cmd_q) ||
10046 !hdev_is_powered(hdev) ||
10047 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
10048 hci_req_purge(&req);
10049 rp.instance = cp->instance;
10050 err = mgmt_cmd_complete(sk, hdev->id,
10051 MGMT_OP_REMOVE_ADVERTISING,
10052 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10056 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
10063 err = hci_req_run(&req, remove_advertising_complete);
10065 mgmt_pending_remove(cmd);
10068 hci_dev_unlock(hdev);
10073 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
10074 void *data, u16 data_len)
10076 struct mgmt_cp_get_adv_size_info *cp = data;
10077 struct mgmt_rp_get_adv_size_info rp;
10078 u32 flags, supported_flags;
10081 bt_dev_dbg(hdev, "sock %p", sk);
10083 if (!lmp_le_capable(hdev))
10084 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10085 MGMT_STATUS_REJECTED);
10087 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10088 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10089 MGMT_STATUS_INVALID_PARAMS);
10091 flags = __le32_to_cpu(cp->flags);
10093 /* The current implementation only supports a subset of the specified
10096 supported_flags = get_supported_adv_flags(hdev);
10097 if (flags & ~supported_flags)
10098 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10099 MGMT_STATUS_INVALID_PARAMS);
10101 rp.instance = cp->instance;
10102 rp.flags = cp->flags;
10103 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10104 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10106 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10107 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10112 static const struct hci_mgmt_handler mgmt_handlers[] = {
10113 { NULL }, /* 0x0000 (no command) */
10114 { read_version, MGMT_READ_VERSION_SIZE,
10116 HCI_MGMT_UNTRUSTED },
10117 { read_commands, MGMT_READ_COMMANDS_SIZE,
10119 HCI_MGMT_UNTRUSTED },
10120 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
10122 HCI_MGMT_UNTRUSTED },
10123 { read_controller_info, MGMT_READ_INFO_SIZE,
10124 HCI_MGMT_UNTRUSTED },
10125 { set_powered, MGMT_SETTING_SIZE },
10126 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
10127 { set_connectable, MGMT_SETTING_SIZE },
10128 { set_fast_connectable, MGMT_SETTING_SIZE },
10129 { set_bondable, MGMT_SETTING_SIZE },
10130 { set_link_security, MGMT_SETTING_SIZE },
10131 { set_ssp, MGMT_SETTING_SIZE },
10132 { set_hs, MGMT_SETTING_SIZE },
10133 { set_le, MGMT_SETTING_SIZE },
10134 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
10135 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
10136 { add_uuid, MGMT_ADD_UUID_SIZE },
10137 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
10138 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
10139 HCI_MGMT_VAR_LEN },
10140 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
10141 HCI_MGMT_VAR_LEN },
10142 { disconnect, MGMT_DISCONNECT_SIZE },
10143 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
10144 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
10145 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
10146 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
10147 { pair_device, MGMT_PAIR_DEVICE_SIZE },
10148 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
10149 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
10150 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
10151 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
10152 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
10153 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
10154 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
10155 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
10156 HCI_MGMT_VAR_LEN },
10157 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
10158 { start_discovery, MGMT_START_DISCOVERY_SIZE },
10159 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
10160 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
10161 { block_device, MGMT_BLOCK_DEVICE_SIZE },
10162 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
10163 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
10164 { set_advertising, MGMT_SETTING_SIZE },
10165 { set_bredr, MGMT_SETTING_SIZE },
10166 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
10167 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
10168 { set_secure_conn, MGMT_SETTING_SIZE },
10169 { set_debug_keys, MGMT_SETTING_SIZE },
10170 { set_privacy, MGMT_SET_PRIVACY_SIZE },
10171 { load_irks, MGMT_LOAD_IRKS_SIZE,
10172 HCI_MGMT_VAR_LEN },
10173 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
10174 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
10175 { add_device, MGMT_ADD_DEVICE_SIZE },
10176 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
10177 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
10178 HCI_MGMT_VAR_LEN },
10179 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
10181 HCI_MGMT_UNTRUSTED },
10182 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
10183 HCI_MGMT_UNCONFIGURED |
10184 HCI_MGMT_UNTRUSTED },
10185 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
10186 HCI_MGMT_UNCONFIGURED },
10187 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
10188 HCI_MGMT_UNCONFIGURED },
10189 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
10190 HCI_MGMT_VAR_LEN },
10191 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
10192 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
10194 HCI_MGMT_UNTRUSTED },
10195 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
10196 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
10197 HCI_MGMT_VAR_LEN },
10198 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
10199 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
10200 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
10201 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
10202 HCI_MGMT_UNTRUSTED },
10203 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
10204 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
10205 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
10206 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
10207 HCI_MGMT_VAR_LEN },
10208 { set_wideband_speech, MGMT_SETTING_SIZE },
10209 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
10210 HCI_MGMT_UNTRUSTED },
10211 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
10212 HCI_MGMT_UNTRUSTED |
10213 HCI_MGMT_HDEV_OPTIONAL },
10214 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
10216 HCI_MGMT_HDEV_OPTIONAL },
10217 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
10218 HCI_MGMT_UNTRUSTED },
10219 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
10220 HCI_MGMT_VAR_LEN },
10221 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
10222 HCI_MGMT_UNTRUSTED },
10223 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
10224 HCI_MGMT_VAR_LEN },
10225 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
10226 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
10227 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
10228 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
10229 HCI_MGMT_VAR_LEN },
10230 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
10231 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
10232 HCI_MGMT_VAR_LEN },
10233 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
10234 HCI_MGMT_VAR_LEN },
10235 { add_adv_patterns_monitor_rssi,
10236 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
10237 HCI_MGMT_VAR_LEN },
10241 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
10242 { NULL }, /* 0x0000 (no command) */
10243 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
10244 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
10245 HCI_MGMT_VAR_LEN },
10246 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
10247 HCI_MGMT_VAR_LEN },
10248 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
10249 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
10250 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
10251 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
10252 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
10253 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
10254 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
10255 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
10256 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
10257 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
10258 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
10259 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
10263 void mgmt_index_added(struct hci_dev *hdev)
10265 struct mgmt_ev_ext_index ev;
10267 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10270 switch (hdev->dev_type) {
10272 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10273 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
10274 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10277 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
10278 HCI_MGMT_INDEX_EVENTS);
10289 ev.bus = hdev->bus;
10291 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
10292 HCI_MGMT_EXT_INDEX_EVENTS);
10295 void mgmt_index_removed(struct hci_dev *hdev)
10297 struct mgmt_ev_ext_index ev;
10298 u8 status = MGMT_STATUS_INVALID_INDEX;
10300 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10303 switch (hdev->dev_type) {
10305 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10307 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10308 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
10309 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10312 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
10313 HCI_MGMT_INDEX_EVENTS);
10324 ev.bus = hdev->bus;
10326 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
10327 HCI_MGMT_EXT_INDEX_EVENTS);
10330 /* This function requires the caller holds hdev->lock */
10331 static void restart_le_actions(struct hci_dev *hdev)
10333 struct hci_conn_params *p;
10335 list_for_each_entry(p, &hdev->le_conn_params, list) {
10336 /* Needed for AUTO_OFF case where might not "really"
10337 * have been powered off.
10339 list_del_init(&p->action);
10341 switch (p->auto_connect) {
10342 case HCI_AUTO_CONN_DIRECT:
10343 case HCI_AUTO_CONN_ALWAYS:
10344 list_add(&p->action, &hdev->pend_le_conns);
10346 case HCI_AUTO_CONN_REPORT:
10347 list_add(&p->action, &hdev->pend_le_reports);
10355 void mgmt_power_on(struct hci_dev *hdev, int err)
10357 struct cmd_lookup match = { NULL, hdev };
10359 bt_dev_dbg(hdev, "err %d", err);
10361 hci_dev_lock(hdev);
10364 restart_le_actions(hdev);
10365 hci_update_background_scan(hdev);
10368 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10370 new_settings(hdev, match.sk);
10373 sock_put(match.sk);
10375 hci_dev_unlock(hdev);
10378 void __mgmt_power_off(struct hci_dev *hdev)
10380 struct cmd_lookup match = { NULL, hdev };
10381 u8 status, zero_cod[] = { 0, 0, 0 };
10383 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10385 /* If the power off is because of hdev unregistration let
10386 * use the appropriate INVALID_INDEX status. Otherwise use
10387 * NOT_POWERED. We cover both scenarios here since later in
10388 * mgmt_index_removed() any hci_conn callbacks will have already
10389 * been triggered, potentially causing misleading DISCONNECTED
10390 * status responses.
10392 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
10393 status = MGMT_STATUS_INVALID_INDEX;
10395 status = MGMT_STATUS_NOT_POWERED;
10397 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10399 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
10400 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
10401 zero_cod, sizeof(zero_cod),
10402 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10403 ext_info_changed(hdev, NULL);
10406 new_settings(hdev, match.sk);
10409 sock_put(match.sk);
10412 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
10414 struct mgmt_pending_cmd *cmd;
10417 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10421 if (err == -ERFKILL)
10422 status = MGMT_STATUS_RFKILLED;
10424 status = MGMT_STATUS_FAILED;
10426 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
10428 mgmt_pending_remove(cmd);
10431 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
10434 struct mgmt_ev_new_link_key ev;
10436 memset(&ev, 0, sizeof(ev));
10438 ev.store_hint = persistent;
10439 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10440 ev.key.addr.type = BDADDR_BREDR;
10441 ev.key.type = key->type;
10442 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
10443 ev.key.pin_len = key->pin_len;
10445 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
10448 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
10450 switch (ltk->type) {
10452 case SMP_LTK_RESPONDER:
10453 if (ltk->authenticated)
10454 return MGMT_LTK_AUTHENTICATED;
10455 return MGMT_LTK_UNAUTHENTICATED;
10457 if (ltk->authenticated)
10458 return MGMT_LTK_P256_AUTH;
10459 return MGMT_LTK_P256_UNAUTH;
10460 case SMP_LTK_P256_DEBUG:
10461 return MGMT_LTK_P256_DEBUG;
10464 return MGMT_LTK_UNAUTHENTICATED;
10467 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
10469 struct mgmt_ev_new_long_term_key ev;
10471 memset(&ev, 0, sizeof(ev));
10473 /* Devices using resolvable or non-resolvable random addresses
10474 * without providing an identity resolving key don't require
10475 * to store long term keys. Their addresses will change the
10476 * next time around.
10478 * Only when a remote device provides an identity address
10479 * make sure the long term key is stored. If the remote
10480 * identity is known, the long term keys are internally
10481 * mapped to the identity address. So allow static random
10482 * and public addresses here.
10484 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10485 (key->bdaddr.b[5] & 0xc0) != 0xc0)
10486 ev.store_hint = 0x00;
10488 ev.store_hint = persistent;
10490 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10491 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
10492 ev.key.type = mgmt_ltk_type(key);
10493 ev.key.enc_size = key->enc_size;
10494 ev.key.ediv = key->ediv;
10495 ev.key.rand = key->rand;
10497 if (key->type == SMP_LTK)
10498 ev.key.initiator = 1;
10500 /* Make sure we copy only the significant bytes based on the
10501 * encryption key size, and set the rest of the value to zeroes.
10503 memcpy(ev.key.val, key->val, key->enc_size);
10504 memset(ev.key.val + key->enc_size, 0,
10505 sizeof(ev.key.val) - key->enc_size);
10507 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
10510 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
10512 struct mgmt_ev_new_irk ev;
10514 memset(&ev, 0, sizeof(ev));
10516 ev.store_hint = persistent;
10518 bacpy(&ev.rpa, &irk->rpa);
10519 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
10520 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
10521 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
10523 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
10526 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
10529 struct mgmt_ev_new_csrk ev;
10531 memset(&ev, 0, sizeof(ev));
10533 /* Devices using resolvable or non-resolvable random addresses
10534 * without providing an identity resolving key don't require
10535 * to store signature resolving keys. Their addresses will change
10536 * the next time around.
10538 * Only when a remote device provides an identity address
10539 * make sure the signature resolving key is stored. So allow
10540 * static random and public addresses here.
10542 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10543 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
10544 ev.store_hint = 0x00;
10546 ev.store_hint = persistent;
10548 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
10549 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
10550 ev.key.type = csrk->type;
10551 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
10553 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
10556 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
10557 u8 bdaddr_type, u8 store_hint, u16 min_interval,
10558 u16 max_interval, u16 latency, u16 timeout)
10560 struct mgmt_ev_new_conn_param ev;
10562 if (!hci_is_identity_address(bdaddr, bdaddr_type))
10565 memset(&ev, 0, sizeof(ev));
10566 bacpy(&ev.addr.bdaddr, bdaddr);
10567 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
10568 ev.store_hint = store_hint;
10569 ev.min_interval = cpu_to_le16(min_interval);
10570 ev.max_interval = cpu_to_le16(max_interval);
10571 ev.latency = cpu_to_le16(latency);
10572 ev.timeout = cpu_to_le16(timeout);
10574 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
10577 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
10578 u8 *name, u8 name_len)
10581 struct mgmt_ev_device_connected *ev = (void *) buf;
10585 bacpy(&ev->addr.bdaddr, &conn->dst);
10586 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10589 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
10591 ev->flags = __cpu_to_le32(flags);
10593 /* We must ensure that the EIR Data fields are ordered and
10594 * unique. Keep it simple for now and avoid the problem by not
10595 * adding any BR/EDR data to the LE adv.
10597 if (conn->le_adv_data_len > 0) {
10598 memcpy(&ev->eir[eir_len],
10599 conn->le_adv_data, conn->le_adv_data_len);
10600 eir_len = conn->le_adv_data_len;
10603 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
10606 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
10607 eir_len = eir_append_data(ev->eir, eir_len,
10609 conn->dev_class, 3);
10612 ev->eir_len = cpu_to_le16(eir_len);
10614 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
10615 sizeof(*ev) + eir_len, NULL);
10618 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
10620 struct sock **sk = data;
10622 cmd->cmd_complete(cmd, 0);
10627 mgmt_pending_remove(cmd);
10630 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
10632 struct hci_dev *hdev = data;
10633 struct mgmt_cp_unpair_device *cp = cmd->param;
10635 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
10637 cmd->cmd_complete(cmd, 0);
10638 mgmt_pending_remove(cmd);
10641 bool mgmt_powering_down(struct hci_dev *hdev)
10643 struct mgmt_pending_cmd *cmd;
10644 struct mgmt_mode *cp;
10646 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10657 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
10658 u8 link_type, u8 addr_type, u8 reason,
10659 bool mgmt_connected)
10661 struct mgmt_ev_device_disconnected ev;
10662 struct sock *sk = NULL;
10664 /* The connection is still in hci_conn_hash so test for 1
10665 * instead of 0 to know if this is the last one.
10667 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10668 cancel_delayed_work(&hdev->power_off);
10669 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10672 if (!mgmt_connected)
10675 if (link_type != ACL_LINK && link_type != LE_LINK)
10678 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
10680 bacpy(&ev.addr.bdaddr, bdaddr);
10681 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10682 ev.reason = reason;
10684 /* Report disconnects due to suspend */
10685 if (hdev->suspended)
10686 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
10688 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
10693 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10697 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
10698 u8 link_type, u8 addr_type, u8 status)
10700 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
10701 struct mgmt_cp_disconnect *cp;
10702 struct mgmt_pending_cmd *cmd;
10704 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10707 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
10713 if (bacmp(bdaddr, &cp->addr.bdaddr))
10716 if (cp->addr.type != bdaddr_type)
10719 cmd->cmd_complete(cmd, mgmt_status(status));
10720 mgmt_pending_remove(cmd);
10723 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10724 u8 addr_type, u8 status)
10726 struct mgmt_ev_connect_failed ev;
10728 /* The connection is still in hci_conn_hash so test for 1
10729 * instead of 0 to know if this is the last one.
10731 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10732 cancel_delayed_work(&hdev->power_off);
10733 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10736 bacpy(&ev.addr.bdaddr, bdaddr);
10737 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10738 ev.status = mgmt_status(status);
10740 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
10743 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
10745 struct mgmt_ev_pin_code_request ev;
10747 bacpy(&ev.addr.bdaddr, bdaddr);
10748 ev.addr.type = BDADDR_BREDR;
10749 ev.secure = secure;
10751 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
10754 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10757 struct mgmt_pending_cmd *cmd;
10759 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
10763 cmd->cmd_complete(cmd, mgmt_status(status));
10764 mgmt_pending_remove(cmd);
10767 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10770 struct mgmt_pending_cmd *cmd;
10772 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
10776 cmd->cmd_complete(cmd, mgmt_status(status));
10777 mgmt_pending_remove(cmd);
10780 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10781 u8 link_type, u8 addr_type, u32 value,
10784 struct mgmt_ev_user_confirm_request ev;
10786 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10788 bacpy(&ev.addr.bdaddr, bdaddr);
10789 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10790 ev.confirm_hint = confirm_hint;
10791 ev.value = cpu_to_le32(value);
10793 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
10797 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10798 u8 link_type, u8 addr_type)
10800 struct mgmt_ev_user_passkey_request ev;
10802 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10804 bacpy(&ev.addr.bdaddr, bdaddr);
10805 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10807 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
10811 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10812 u8 link_type, u8 addr_type, u8 status,
10815 struct mgmt_pending_cmd *cmd;
10817 cmd = pending_find(opcode, hdev);
10821 cmd->cmd_complete(cmd, mgmt_status(status));
10822 mgmt_pending_remove(cmd);
10827 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10828 u8 link_type, u8 addr_type, u8 status)
10830 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10831 status, MGMT_OP_USER_CONFIRM_REPLY);
10834 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10835 u8 link_type, u8 addr_type, u8 status)
10837 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10839 MGMT_OP_USER_CONFIRM_NEG_REPLY);
10842 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10843 u8 link_type, u8 addr_type, u8 status)
10845 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10846 status, MGMT_OP_USER_PASSKEY_REPLY);
10849 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10850 u8 link_type, u8 addr_type, u8 status)
10852 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10854 MGMT_OP_USER_PASSKEY_NEG_REPLY);
10857 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
10858 u8 link_type, u8 addr_type, u32 passkey,
10861 struct mgmt_ev_passkey_notify ev;
10863 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10865 bacpy(&ev.addr.bdaddr, bdaddr);
10866 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10867 ev.passkey = __cpu_to_le32(passkey);
10868 ev.entered = entered;
10870 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10873 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10875 struct mgmt_ev_auth_failed ev;
10876 struct mgmt_pending_cmd *cmd;
10877 u8 status = mgmt_status(hci_status);
10879 bacpy(&ev.addr.bdaddr, &conn->dst);
10880 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10881 ev.status = status;
10883 cmd = find_pairing(conn);
10885 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10886 cmd ? cmd->sk : NULL);
10889 cmd->cmd_complete(cmd, status);
10890 mgmt_pending_remove(cmd);
10894 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10896 struct cmd_lookup match = { NULL, hdev };
10900 u8 mgmt_err = mgmt_status(status);
10901 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10902 cmd_status_rsp, &mgmt_err);
10906 if (test_bit(HCI_AUTH, &hdev->flags))
10907 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10909 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10911 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10915 new_settings(hdev, match.sk);
10918 sock_put(match.sk);
10921 static void clear_eir(struct hci_request *req)
10923 struct hci_dev *hdev = req->hdev;
10924 struct hci_cp_write_eir cp;
10926 if (!lmp_ext_inq_capable(hdev))
10929 memset(hdev->eir, 0, sizeof(hdev->eir));
10931 memset(&cp, 0, sizeof(cp));
10933 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
10936 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
10938 struct cmd_lookup match = { NULL, hdev };
10939 struct hci_request req;
10940 bool changed = false;
10943 u8 mgmt_err = mgmt_status(status);
10945 if (enable && hci_dev_test_and_clear_flag(hdev,
10946 HCI_SSP_ENABLED)) {
10947 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
10948 new_settings(hdev, NULL);
10951 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
10957 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
10959 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
10961 changed = hci_dev_test_and_clear_flag(hdev,
10964 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
10967 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
10970 new_settings(hdev, match.sk);
10973 sock_put(match.sk);
10975 hci_req_init(&req, hdev);
10977 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
10978 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
10979 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
10980 sizeof(enable), &enable);
10981 __hci_req_update_eir(&req);
10986 hci_req_run(&req, NULL);
10989 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10991 struct cmd_lookup *match = data;
10993 if (match->sk == NULL) {
10994 match->sk = cmd->sk;
10995 sock_hold(match->sk);
10999 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
11002 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
11004 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
11005 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
11006 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
11009 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
11010 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11011 ext_info_changed(hdev, NULL);
11015 sock_put(match.sk);
11018 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
11020 struct mgmt_cp_set_local_name ev;
11021 struct mgmt_pending_cmd *cmd;
11026 memset(&ev, 0, sizeof(ev));
11027 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
11028 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
11030 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
11032 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
11034 /* If this is a HCI command related to powering on the
11035 * HCI dev don't send any mgmt signals.
11037 if (pending_find(MGMT_OP_SET_POWERED, hdev))
11041 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
11042 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
11043 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
11046 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
11050 for (i = 0; i < uuid_count; i++) {
11051 if (!memcmp(uuid, uuids[i], 16))
11058 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
11062 while (parsed < eir_len) {
11063 u8 field_len = eir[0];
11067 if (field_len == 0)
11070 if (eir_len - parsed < field_len + 1)
11074 case EIR_UUID16_ALL:
11075 case EIR_UUID16_SOME:
11076 for (i = 0; i + 3 <= field_len; i += 2) {
11077 memcpy(uuid, bluetooth_base_uuid, 16);
11078 uuid[13] = eir[i + 3];
11079 uuid[12] = eir[i + 2];
11080 if (has_uuid(uuid, uuid_count, uuids))
11084 case EIR_UUID32_ALL:
11085 case EIR_UUID32_SOME:
11086 for (i = 0; i + 5 <= field_len; i += 4) {
11087 memcpy(uuid, bluetooth_base_uuid, 16);
11088 uuid[15] = eir[i + 5];
11089 uuid[14] = eir[i + 4];
11090 uuid[13] = eir[i + 3];
11091 uuid[12] = eir[i + 2];
11092 if (has_uuid(uuid, uuid_count, uuids))
11096 case EIR_UUID128_ALL:
11097 case EIR_UUID128_SOME:
11098 for (i = 0; i + 17 <= field_len; i += 16) {
11099 memcpy(uuid, eir + i + 2, 16);
11100 if (has_uuid(uuid, uuid_count, uuids))
11106 parsed += field_len + 1;
11107 eir += field_len + 1;
11113 static void restart_le_scan(struct hci_dev *hdev)
11115 /* If controller is not scanning we are done. */
11116 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
11119 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
11120 hdev->discovery.scan_start +
11121 hdev->discovery.scan_duration))
11124 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
11125 DISCOV_LE_RESTART_DELAY);
11128 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
11129 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11131 /* If a RSSI threshold has been specified, and
11132 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
11133 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
11134 * is set, let it through for further processing, as we might need to
11135 * restart the scan.
11137 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
11138 * the results are also dropped.
11140 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11141 (rssi == HCI_RSSI_INVALID ||
11142 (rssi < hdev->discovery.rssi &&
11143 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
11146 if (hdev->discovery.uuid_count != 0) {
11147 /* If a list of UUIDs is provided in filter, results with no
11148 * matching UUID should be dropped.
11150 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
11151 hdev->discovery.uuids) &&
11152 !eir_has_uuids(scan_rsp, scan_rsp_len,
11153 hdev->discovery.uuid_count,
11154 hdev->discovery.uuids))
11158 /* If duplicate filtering does not report RSSI changes, then restart
11159 * scanning to ensure updated result with updated RSSI values.
11161 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
11162 restart_le_scan(hdev);
11164 /* Validate RSSI value against the RSSI threshold once more. */
11165 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11166 rssi < hdev->discovery.rssi)
11173 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11174 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
11175 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11178 struct mgmt_ev_device_found *ev = (void *)buf;
11181 /* Don't send events for a non-kernel initiated discovery. With
11182 * LE one exception is if we have pend_le_reports > 0 in which
11183 * case we're doing passive scanning and want these events.
11185 if (!hci_discovery_active(hdev)) {
11186 if (link_type == ACL_LINK)
11188 if (link_type == LE_LINK &&
11189 list_empty(&hdev->pend_le_reports) &&
11190 !hci_is_adv_monitoring(hdev)) {
11195 if (hdev->discovery.result_filtering) {
11196 /* We are using service discovery */
11197 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
11202 if (hdev->discovery.limited) {
11203 /* Check for limited discoverable bit */
11205 if (!(dev_class[1] & 0x20))
11208 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
11209 if (!flags || !(flags[0] & LE_AD_LIMITED))
11214 /* Make sure that the buffer is big enough. The 5 extra bytes
11215 * are for the potential CoD field.
11217 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
11220 memset(buf, 0, sizeof(buf));
11222 /* In case of device discovery with BR/EDR devices (pre 1.2), the
11223 * RSSI value was reported as 0 when not available. This behavior
11224 * is kept when using device discovery. This is required for full
11225 * backwards compatibility with the API.
11227 * However when using service discovery, the value 127 will be
11228 * returned when the RSSI is not available.
11230 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
11231 link_type == ACL_LINK)
11234 bacpy(&ev->addr.bdaddr, bdaddr);
11235 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11237 ev->flags = cpu_to_le32(flags);
11240 /* Copy EIR or advertising data into event */
11241 memcpy(ev->eir, eir, eir_len);
11243 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11245 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11248 if (scan_rsp_len > 0)
11249 /* Append scan response data to event */
11250 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
11252 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
11253 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
11255 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
11258 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11259 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
11261 struct mgmt_ev_device_found *ev;
11262 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
11265 ev = (struct mgmt_ev_device_found *) buf;
11267 memset(buf, 0, sizeof(buf));
11269 bacpy(&ev->addr.bdaddr, bdaddr);
11270 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11273 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
11276 ev->eir_len = cpu_to_le16(eir_len);
11278 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
11281 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
11283 struct mgmt_ev_discovering ev;
11285 bt_dev_dbg(hdev, "discovering %u", discovering);
11287 memset(&ev, 0, sizeof(ev));
11288 ev.type = hdev->discovery.type;
11289 ev.discovering = discovering;
11291 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
11294 void mgmt_suspending(struct hci_dev *hdev, u8 state)
11296 struct mgmt_ev_controller_suspend ev;
11298 ev.suspend_state = state;
11299 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
11302 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
11305 struct mgmt_ev_controller_resume ev;
11307 ev.wake_reason = reason;
11309 bacpy(&ev.addr.bdaddr, bdaddr);
11310 ev.addr.type = addr_type;
11312 memset(&ev.addr, 0, sizeof(ev.addr));
11315 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
11318 static struct hci_mgmt_chan chan = {
11319 .channel = HCI_CHANNEL_CONTROL,
11320 .handler_count = ARRAY_SIZE(mgmt_handlers),
11321 .handlers = mgmt_handlers,
11323 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
11324 .tizen_handlers = tizen_mgmt_handlers,
11326 .hdev_init = mgmt_init_hdev,
11329 int mgmt_init(void)
11331 return hci_mgmt_chan_register(&chan);
11334 void mgmt_exit(void)
11336 hci_mgmt_chan_unregister(&chan);