2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
39 #include "hci_request.h"
41 #include "mgmt_util.h"
42 #include "mgmt_config.h"
45 #define MGMT_VERSION 1
46 #define MGMT_REVISION 21
48 static const u16 mgmt_commands[] = {
49 MGMT_OP_READ_INDEX_LIST,
52 MGMT_OP_SET_DISCOVERABLE,
53 MGMT_OP_SET_CONNECTABLE,
54 MGMT_OP_SET_FAST_CONNECTABLE,
56 MGMT_OP_SET_LINK_SECURITY,
60 MGMT_OP_SET_DEV_CLASS,
61 MGMT_OP_SET_LOCAL_NAME,
64 MGMT_OP_LOAD_LINK_KEYS,
65 MGMT_OP_LOAD_LONG_TERM_KEYS,
67 MGMT_OP_GET_CONNECTIONS,
68 MGMT_OP_PIN_CODE_REPLY,
69 MGMT_OP_PIN_CODE_NEG_REPLY,
70 MGMT_OP_SET_IO_CAPABILITY,
72 MGMT_OP_CANCEL_PAIR_DEVICE,
73 MGMT_OP_UNPAIR_DEVICE,
74 MGMT_OP_USER_CONFIRM_REPLY,
75 MGMT_OP_USER_CONFIRM_NEG_REPLY,
76 MGMT_OP_USER_PASSKEY_REPLY,
77 MGMT_OP_USER_PASSKEY_NEG_REPLY,
78 MGMT_OP_READ_LOCAL_OOB_DATA,
79 MGMT_OP_ADD_REMOTE_OOB_DATA,
80 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
81 MGMT_OP_START_DISCOVERY,
82 MGMT_OP_STOP_DISCOVERY,
85 MGMT_OP_UNBLOCK_DEVICE,
86 MGMT_OP_SET_DEVICE_ID,
87 MGMT_OP_SET_ADVERTISING,
89 MGMT_OP_SET_STATIC_ADDRESS,
90 MGMT_OP_SET_SCAN_PARAMS,
91 MGMT_OP_SET_SECURE_CONN,
92 MGMT_OP_SET_DEBUG_KEYS,
95 MGMT_OP_GET_CONN_INFO,
96 MGMT_OP_GET_CLOCK_INFO,
98 MGMT_OP_REMOVE_DEVICE,
99 MGMT_OP_LOAD_CONN_PARAM,
100 MGMT_OP_READ_UNCONF_INDEX_LIST,
101 MGMT_OP_READ_CONFIG_INFO,
102 MGMT_OP_SET_EXTERNAL_CONFIG,
103 MGMT_OP_SET_PUBLIC_ADDRESS,
104 MGMT_OP_START_SERVICE_DISCOVERY,
105 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
106 MGMT_OP_READ_EXT_INDEX_LIST,
107 MGMT_OP_READ_ADV_FEATURES,
108 MGMT_OP_ADD_ADVERTISING,
109 MGMT_OP_REMOVE_ADVERTISING,
110 MGMT_OP_GET_ADV_SIZE_INFO,
111 MGMT_OP_START_LIMITED_DISCOVERY,
112 MGMT_OP_READ_EXT_INFO,
113 MGMT_OP_SET_APPEARANCE,
114 MGMT_OP_GET_PHY_CONFIGURATION,
115 MGMT_OP_SET_PHY_CONFIGURATION,
116 MGMT_OP_SET_BLOCKED_KEYS,
117 MGMT_OP_SET_WIDEBAND_SPEECH,
118 MGMT_OP_READ_CONTROLLER_CAP,
119 MGMT_OP_READ_EXP_FEATURES_INFO,
120 MGMT_OP_SET_EXP_FEATURE,
121 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
122 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
123 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
124 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
125 MGMT_OP_GET_DEVICE_FLAGS,
126 MGMT_OP_SET_DEVICE_FLAGS,
127 MGMT_OP_READ_ADV_MONITOR_FEATURES,
128 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
129 MGMT_OP_REMOVE_ADV_MONITOR,
130 MGMT_OP_ADD_EXT_ADV_PARAMS,
131 MGMT_OP_ADD_EXT_ADV_DATA,
132 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
135 static const u16 mgmt_events[] = {
136 MGMT_EV_CONTROLLER_ERROR,
138 MGMT_EV_INDEX_REMOVED,
139 MGMT_EV_NEW_SETTINGS,
140 MGMT_EV_CLASS_OF_DEV_CHANGED,
141 MGMT_EV_LOCAL_NAME_CHANGED,
142 MGMT_EV_NEW_LINK_KEY,
143 MGMT_EV_NEW_LONG_TERM_KEY,
144 MGMT_EV_DEVICE_CONNECTED,
145 MGMT_EV_DEVICE_DISCONNECTED,
146 MGMT_EV_CONNECT_FAILED,
147 MGMT_EV_PIN_CODE_REQUEST,
148 MGMT_EV_USER_CONFIRM_REQUEST,
149 MGMT_EV_USER_PASSKEY_REQUEST,
151 MGMT_EV_DEVICE_FOUND,
153 MGMT_EV_DEVICE_BLOCKED,
154 MGMT_EV_DEVICE_UNBLOCKED,
155 MGMT_EV_DEVICE_UNPAIRED,
156 MGMT_EV_PASSKEY_NOTIFY,
159 MGMT_EV_DEVICE_ADDED,
160 MGMT_EV_DEVICE_REMOVED,
161 MGMT_EV_NEW_CONN_PARAM,
162 MGMT_EV_UNCONF_INDEX_ADDED,
163 MGMT_EV_UNCONF_INDEX_REMOVED,
164 MGMT_EV_NEW_CONFIG_OPTIONS,
165 MGMT_EV_EXT_INDEX_ADDED,
166 MGMT_EV_EXT_INDEX_REMOVED,
167 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
168 MGMT_EV_ADVERTISING_ADDED,
169 MGMT_EV_ADVERTISING_REMOVED,
170 MGMT_EV_EXT_INFO_CHANGED,
171 MGMT_EV_PHY_CONFIGURATION_CHANGED,
172 MGMT_EV_EXP_FEATURE_CHANGED,
173 MGMT_EV_DEVICE_FLAGS_CHANGED,
174 MGMT_EV_ADV_MONITOR_ADDED,
175 MGMT_EV_ADV_MONITOR_REMOVED,
176 MGMT_EV_CONTROLLER_SUSPEND,
177 MGMT_EV_CONTROLLER_RESUME,
180 static const u16 mgmt_untrusted_commands[] = {
181 MGMT_OP_READ_INDEX_LIST,
183 MGMT_OP_READ_UNCONF_INDEX_LIST,
184 MGMT_OP_READ_CONFIG_INFO,
185 MGMT_OP_READ_EXT_INDEX_LIST,
186 MGMT_OP_READ_EXT_INFO,
187 MGMT_OP_READ_CONTROLLER_CAP,
188 MGMT_OP_READ_EXP_FEATURES_INFO,
189 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
190 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
193 static const u16 mgmt_untrusted_events[] = {
195 MGMT_EV_INDEX_REMOVED,
196 MGMT_EV_NEW_SETTINGS,
197 MGMT_EV_CLASS_OF_DEV_CHANGED,
198 MGMT_EV_LOCAL_NAME_CHANGED,
199 MGMT_EV_UNCONF_INDEX_ADDED,
200 MGMT_EV_UNCONF_INDEX_REMOVED,
201 MGMT_EV_NEW_CONFIG_OPTIONS,
202 MGMT_EV_EXT_INDEX_ADDED,
203 MGMT_EV_EXT_INDEX_REMOVED,
204 MGMT_EV_EXT_INFO_CHANGED,
205 MGMT_EV_EXP_FEATURE_CHANGED,
208 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
210 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
211 "\x00\x00\x00\x00\x00\x00\x00\x00"
213 /* HCI to MGMT error code conversion table */
214 static const u8 mgmt_status_table[] = {
216 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
217 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
218 MGMT_STATUS_FAILED, /* Hardware Failure */
219 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
220 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
221 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
222 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
223 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
224 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
225 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
226 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
227 MGMT_STATUS_BUSY, /* Command Disallowed */
228 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
229 MGMT_STATUS_REJECTED, /* Rejected Security */
230 MGMT_STATUS_REJECTED, /* Rejected Personal */
231 MGMT_STATUS_TIMEOUT, /* Host Timeout */
232 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
233 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
234 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
235 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
236 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
237 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
238 MGMT_STATUS_BUSY, /* Repeated Attempts */
239 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
240 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
241 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
242 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
243 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
244 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
245 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
246 MGMT_STATUS_FAILED, /* Unspecified Error */
247 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
248 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
249 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
250 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
251 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
252 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
253 MGMT_STATUS_FAILED, /* Unit Link Key Used */
254 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
255 MGMT_STATUS_TIMEOUT, /* Instant Passed */
256 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
257 MGMT_STATUS_FAILED, /* Transaction Collision */
258 MGMT_STATUS_FAILED, /* Reserved for future use */
259 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
260 MGMT_STATUS_REJECTED, /* QoS Rejected */
261 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
262 MGMT_STATUS_REJECTED, /* Insufficient Security */
263 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
264 MGMT_STATUS_FAILED, /* Reserved for future use */
265 MGMT_STATUS_BUSY, /* Role Switch Pending */
266 MGMT_STATUS_FAILED, /* Reserved for future use */
267 MGMT_STATUS_FAILED, /* Slot Violation */
268 MGMT_STATUS_FAILED, /* Role Switch Failed */
269 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
270 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
271 MGMT_STATUS_BUSY, /* Host Busy Pairing */
272 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
273 MGMT_STATUS_BUSY, /* Controller Busy */
274 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
275 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
276 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
277 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
278 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
281 static u8 mgmt_status(u8 hci_status)
283 if (hci_status < ARRAY_SIZE(mgmt_status_table))
284 return mgmt_status_table[hci_status];
286 return MGMT_STATUS_FAILED;
289 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
292 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
296 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
297 u16 len, int flag, struct sock *skip_sk)
299 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
303 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
304 struct sock *skip_sk)
306 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
307 HCI_SOCK_TRUSTED, skip_sk);
310 static u8 le_addr_type(u8 mgmt_addr_type)
312 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
313 return ADDR_LE_DEV_PUBLIC;
315 return ADDR_LE_DEV_RANDOM;
318 void mgmt_fill_version_info(void *ver)
320 struct mgmt_rp_read_version *rp = ver;
322 rp->version = MGMT_VERSION;
323 rp->revision = cpu_to_le16(MGMT_REVISION);
326 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
329 struct mgmt_rp_read_version rp;
331 bt_dev_dbg(hdev, "sock %p", sk);
333 mgmt_fill_version_info(&rp);
335 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
339 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
342 struct mgmt_rp_read_commands *rp;
343 u16 num_commands, num_events;
347 bt_dev_dbg(hdev, "sock %p", sk);
349 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
350 num_commands = ARRAY_SIZE(mgmt_commands);
351 num_events = ARRAY_SIZE(mgmt_events);
353 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
354 num_events = ARRAY_SIZE(mgmt_untrusted_events);
357 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
359 rp = kmalloc(rp_size, GFP_KERNEL);
363 rp->num_commands = cpu_to_le16(num_commands);
364 rp->num_events = cpu_to_le16(num_events);
366 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
367 __le16 *opcode = rp->opcodes;
369 for (i = 0; i < num_commands; i++, opcode++)
370 put_unaligned_le16(mgmt_commands[i], opcode);
372 for (i = 0; i < num_events; i++, opcode++)
373 put_unaligned_le16(mgmt_events[i], opcode);
375 __le16 *opcode = rp->opcodes;
377 for (i = 0; i < num_commands; i++, opcode++)
378 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
380 for (i = 0; i < num_events; i++, opcode++)
381 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
384 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
391 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
394 struct mgmt_rp_read_index_list *rp;
400 bt_dev_dbg(hdev, "sock %p", sk);
402 read_lock(&hci_dev_list_lock);
405 list_for_each_entry(d, &hci_dev_list, list) {
406 if (d->dev_type == HCI_PRIMARY &&
407 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
411 rp_len = sizeof(*rp) + (2 * count);
412 rp = kmalloc(rp_len, GFP_ATOMIC);
414 read_unlock(&hci_dev_list_lock);
419 list_for_each_entry(d, &hci_dev_list, list) {
420 if (hci_dev_test_flag(d, HCI_SETUP) ||
421 hci_dev_test_flag(d, HCI_CONFIG) ||
422 hci_dev_test_flag(d, HCI_USER_CHANNEL))
425 /* Devices marked as raw-only are neither configured
426 * nor unconfigured controllers.
428 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
431 if (d->dev_type == HCI_PRIMARY &&
432 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
433 rp->index[count++] = cpu_to_le16(d->id);
434 bt_dev_dbg(hdev, "Added hci%u", d->id);
438 rp->num_controllers = cpu_to_le16(count);
439 rp_len = sizeof(*rp) + (2 * count);
441 read_unlock(&hci_dev_list_lock);
443 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
451 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
452 void *data, u16 data_len)
454 struct mgmt_rp_read_unconf_index_list *rp;
460 bt_dev_dbg(hdev, "sock %p", sk);
462 read_lock(&hci_dev_list_lock);
465 list_for_each_entry(d, &hci_dev_list, list) {
466 if (d->dev_type == HCI_PRIMARY &&
467 hci_dev_test_flag(d, HCI_UNCONFIGURED))
471 rp_len = sizeof(*rp) + (2 * count);
472 rp = kmalloc(rp_len, GFP_ATOMIC);
474 read_unlock(&hci_dev_list_lock);
479 list_for_each_entry(d, &hci_dev_list, list) {
480 if (hci_dev_test_flag(d, HCI_SETUP) ||
481 hci_dev_test_flag(d, HCI_CONFIG) ||
482 hci_dev_test_flag(d, HCI_USER_CHANNEL))
485 /* Devices marked as raw-only are neither configured
486 * nor unconfigured controllers.
488 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
491 if (d->dev_type == HCI_PRIMARY &&
492 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
493 rp->index[count++] = cpu_to_le16(d->id);
494 bt_dev_dbg(hdev, "Added hci%u", d->id);
498 rp->num_controllers = cpu_to_le16(count);
499 rp_len = sizeof(*rp) + (2 * count);
501 read_unlock(&hci_dev_list_lock);
503 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
504 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
511 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
512 void *data, u16 data_len)
514 struct mgmt_rp_read_ext_index_list *rp;
519 bt_dev_dbg(hdev, "sock %p", sk);
521 read_lock(&hci_dev_list_lock);
524 list_for_each_entry(d, &hci_dev_list, list) {
525 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
529 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
531 read_unlock(&hci_dev_list_lock);
536 list_for_each_entry(d, &hci_dev_list, list) {
537 if (hci_dev_test_flag(d, HCI_SETUP) ||
538 hci_dev_test_flag(d, HCI_CONFIG) ||
539 hci_dev_test_flag(d, HCI_USER_CHANNEL))
542 /* Devices marked as raw-only are neither configured
543 * nor unconfigured controllers.
545 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
548 if (d->dev_type == HCI_PRIMARY) {
549 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
550 rp->entry[count].type = 0x01;
552 rp->entry[count].type = 0x00;
553 } else if (d->dev_type == HCI_AMP) {
554 rp->entry[count].type = 0x02;
559 rp->entry[count].bus = d->bus;
560 rp->entry[count++].index = cpu_to_le16(d->id);
561 bt_dev_dbg(hdev, "Added hci%u", d->id);
564 rp->num_controllers = cpu_to_le16(count);
566 read_unlock(&hci_dev_list_lock);
568 /* If this command is called at least once, then all the
569 * default index and unconfigured index events are disabled
570 * and from now on only extended index events are used.
572 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
573 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
574 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
576 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
577 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
578 struct_size(rp, entry, count));
585 static bool is_configured(struct hci_dev *hdev)
587 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
588 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
591 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
592 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
593 !bacmp(&hdev->public_addr, BDADDR_ANY))
599 static __le32 get_missing_options(struct hci_dev *hdev)
603 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
604 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
605 options |= MGMT_OPTION_EXTERNAL_CONFIG;
607 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
608 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
609 !bacmp(&hdev->public_addr, BDADDR_ANY))
610 options |= MGMT_OPTION_PUBLIC_ADDRESS;
612 return cpu_to_le32(options);
615 static int new_options(struct hci_dev *hdev, struct sock *skip)
617 __le32 options = get_missing_options(hdev);
619 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
620 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
623 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
625 __le32 options = get_missing_options(hdev);
627 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
631 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
632 void *data, u16 data_len)
634 struct mgmt_rp_read_config_info rp;
637 bt_dev_dbg(hdev, "sock %p", sk);
641 memset(&rp, 0, sizeof(rp));
642 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
644 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
647 if (hdev->set_bdaddr)
648 options |= MGMT_OPTION_PUBLIC_ADDRESS;
650 rp.supported_options = cpu_to_le32(options);
651 rp.missing_options = get_missing_options(hdev);
653 hci_dev_unlock(hdev);
655 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
659 static u32 get_supported_phys(struct hci_dev *hdev)
661 u32 supported_phys = 0;
663 if (lmp_bredr_capable(hdev)) {
664 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
666 if (hdev->features[0][0] & LMP_3SLOT)
667 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
669 if (hdev->features[0][0] & LMP_5SLOT)
670 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
672 if (lmp_edr_2m_capable(hdev)) {
673 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
675 if (lmp_edr_3slot_capable(hdev))
676 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
678 if (lmp_edr_5slot_capable(hdev))
679 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
681 if (lmp_edr_3m_capable(hdev)) {
682 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
684 if (lmp_edr_3slot_capable(hdev))
685 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
687 if (lmp_edr_5slot_capable(hdev))
688 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
693 if (lmp_le_capable(hdev)) {
694 supported_phys |= MGMT_PHY_LE_1M_TX;
695 supported_phys |= MGMT_PHY_LE_1M_RX;
697 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
698 supported_phys |= MGMT_PHY_LE_2M_TX;
699 supported_phys |= MGMT_PHY_LE_2M_RX;
702 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
703 supported_phys |= MGMT_PHY_LE_CODED_TX;
704 supported_phys |= MGMT_PHY_LE_CODED_RX;
708 return supported_phys;
711 static u32 get_selected_phys(struct hci_dev *hdev)
713 u32 selected_phys = 0;
715 if (lmp_bredr_capable(hdev)) {
716 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
718 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
719 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
721 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
722 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
724 if (lmp_edr_2m_capable(hdev)) {
725 if (!(hdev->pkt_type & HCI_2DH1))
726 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
728 if (lmp_edr_3slot_capable(hdev) &&
729 !(hdev->pkt_type & HCI_2DH3))
730 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
732 if (lmp_edr_5slot_capable(hdev) &&
733 !(hdev->pkt_type & HCI_2DH5))
734 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
736 if (lmp_edr_3m_capable(hdev)) {
737 if (!(hdev->pkt_type & HCI_3DH1))
738 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
740 if (lmp_edr_3slot_capable(hdev) &&
741 !(hdev->pkt_type & HCI_3DH3))
742 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
744 if (lmp_edr_5slot_capable(hdev) &&
745 !(hdev->pkt_type & HCI_3DH5))
746 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
751 if (lmp_le_capable(hdev)) {
752 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
753 selected_phys |= MGMT_PHY_LE_1M_TX;
755 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
756 selected_phys |= MGMT_PHY_LE_1M_RX;
758 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
759 selected_phys |= MGMT_PHY_LE_2M_TX;
761 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
762 selected_phys |= MGMT_PHY_LE_2M_RX;
764 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
765 selected_phys |= MGMT_PHY_LE_CODED_TX;
767 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
768 selected_phys |= MGMT_PHY_LE_CODED_RX;
771 return selected_phys;
774 static u32 get_configurable_phys(struct hci_dev *hdev)
776 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
777 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
780 static u32 get_supported_settings(struct hci_dev *hdev)
784 settings |= MGMT_SETTING_POWERED;
785 settings |= MGMT_SETTING_BONDABLE;
786 settings |= MGMT_SETTING_DEBUG_KEYS;
787 settings |= MGMT_SETTING_CONNECTABLE;
788 settings |= MGMT_SETTING_DISCOVERABLE;
790 if (lmp_bredr_capable(hdev)) {
791 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
792 settings |= MGMT_SETTING_FAST_CONNECTABLE;
793 settings |= MGMT_SETTING_BREDR;
794 settings |= MGMT_SETTING_LINK_SECURITY;
796 if (lmp_ssp_capable(hdev)) {
797 settings |= MGMT_SETTING_SSP;
798 if (IS_ENABLED(CONFIG_BT_HS))
799 settings |= MGMT_SETTING_HS;
802 if (lmp_sc_capable(hdev))
803 settings |= MGMT_SETTING_SECURE_CONN;
805 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
807 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
810 if (lmp_le_capable(hdev)) {
811 settings |= MGMT_SETTING_LE;
812 settings |= MGMT_SETTING_SECURE_CONN;
813 settings |= MGMT_SETTING_PRIVACY;
814 settings |= MGMT_SETTING_STATIC_ADDRESS;
816 /* When the experimental feature for LL Privacy support is
817 * enabled, then advertising is no longer supported.
819 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
820 settings |= MGMT_SETTING_ADVERTISING;
823 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
825 settings |= MGMT_SETTING_CONFIGURATION;
827 settings |= MGMT_SETTING_PHY_CONFIGURATION;
832 static u32 get_current_settings(struct hci_dev *hdev)
836 if (hdev_is_powered(hdev))
837 settings |= MGMT_SETTING_POWERED;
839 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
840 settings |= MGMT_SETTING_CONNECTABLE;
842 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
843 settings |= MGMT_SETTING_FAST_CONNECTABLE;
845 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
846 settings |= MGMT_SETTING_DISCOVERABLE;
848 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
849 settings |= MGMT_SETTING_BONDABLE;
851 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
852 settings |= MGMT_SETTING_BREDR;
854 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
855 settings |= MGMT_SETTING_LE;
857 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
858 settings |= MGMT_SETTING_LINK_SECURITY;
860 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
861 settings |= MGMT_SETTING_SSP;
863 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
864 settings |= MGMT_SETTING_HS;
866 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
867 settings |= MGMT_SETTING_ADVERTISING;
869 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
870 settings |= MGMT_SETTING_SECURE_CONN;
872 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
873 settings |= MGMT_SETTING_DEBUG_KEYS;
875 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
876 settings |= MGMT_SETTING_PRIVACY;
878 /* The current setting for static address has two purposes. The
879 * first is to indicate if the static address will be used and
880 * the second is to indicate if it is actually set.
882 * This means if the static address is not configured, this flag
883 * will never be set. If the address is configured, then if the
884 * address is actually used decides if the flag is set or not.
886 * For single mode LE only controllers and dual-mode controllers
887 * with BR/EDR disabled, the existence of the static address will
890 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
891 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
892 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
893 if (bacmp(&hdev->static_addr, BDADDR_ANY))
894 settings |= MGMT_SETTING_STATIC_ADDRESS;
897 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
898 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
903 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
905 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
908 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
909 struct hci_dev *hdev,
912 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
915 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
917 struct mgmt_pending_cmd *cmd;
919 /* If there's a pending mgmt command the flags will not yet have
920 * their final values, so check for this first.
922 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
924 struct mgmt_mode *cp = cmd->param;
926 return LE_AD_GENERAL;
927 else if (cp->val == 0x02)
928 return LE_AD_LIMITED;
930 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
931 return LE_AD_LIMITED;
932 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
933 return LE_AD_GENERAL;
939 bool mgmt_get_connectable(struct hci_dev *hdev)
941 struct mgmt_pending_cmd *cmd;
943 /* If there's a pending mgmt command the flag will not yet have
944 * it's final value, so check for this first.
946 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
948 struct mgmt_mode *cp = cmd->param;
953 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
956 static void service_cache_off(struct work_struct *work)
958 struct hci_dev *hdev = container_of(work, struct hci_dev,
960 struct hci_request req;
962 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
965 hci_req_init(&req, hdev);
969 __hci_req_update_eir(&req);
970 __hci_req_update_class(&req);
972 hci_dev_unlock(hdev);
974 hci_req_run(&req, NULL);
977 static void rpa_expired(struct work_struct *work)
979 struct hci_dev *hdev = container_of(work, struct hci_dev,
981 struct hci_request req;
983 bt_dev_dbg(hdev, "");
985 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
987 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
990 /* The generation of a new RPA and programming it into the
991 * controller happens in the hci_req_enable_advertising()
994 hci_req_init(&req, hdev);
995 if (ext_adv_capable(hdev))
996 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
998 __hci_req_enable_advertising(&req);
999 hci_req_run(&req, NULL);
1002 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1004 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1007 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1008 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1010 /* Non-mgmt controlled devices get this bit set
1011 * implicitly so that pairing works for them, however
1012 * for mgmt we require user-space to explicitly enable
1015 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1018 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1019 void *data, u16 data_len)
1021 struct mgmt_rp_read_info rp;
1023 bt_dev_dbg(hdev, "sock %p", sk);
1027 memset(&rp, 0, sizeof(rp));
1029 bacpy(&rp.bdaddr, &hdev->bdaddr);
1031 rp.version = hdev->hci_ver;
1032 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1034 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1035 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1037 memcpy(rp.dev_class, hdev->dev_class, 3);
1039 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1040 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1042 hci_dev_unlock(hdev);
1044 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1048 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1053 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1054 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1055 hdev->dev_class, 3);
1057 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1058 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1061 name_len = strlen(hdev->dev_name);
1062 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1063 hdev->dev_name, name_len);
1065 name_len = strlen(hdev->short_name);
1066 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1067 hdev->short_name, name_len);
1072 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1073 void *data, u16 data_len)
1076 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1079 bt_dev_dbg(hdev, "sock %p", sk);
1081 memset(&buf, 0, sizeof(buf));
1085 bacpy(&rp->bdaddr, &hdev->bdaddr);
1087 rp->version = hdev->hci_ver;
1088 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1090 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1091 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1094 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1095 rp->eir_len = cpu_to_le16(eir_len);
1097 hci_dev_unlock(hdev);
1099 /* If this command is called at least once, then the events
1100 * for class of device and local name changes are disabled
1101 * and only the new extended controller information event
1104 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1105 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1106 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1108 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1109 sizeof(*rp) + eir_len);
1112 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1115 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1118 memset(buf, 0, sizeof(buf));
1120 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1121 ev->eir_len = cpu_to_le16(eir_len);
1123 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1124 sizeof(*ev) + eir_len,
1125 HCI_MGMT_EXT_INFO_EVENTS, skip);
1128 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1130 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1132 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1136 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1138 bt_dev_dbg(hdev, "status 0x%02x", status);
1140 if (hci_conn_count(hdev) == 0) {
1141 cancel_delayed_work(&hdev->power_off);
1142 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1146 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1148 struct mgmt_ev_advertising_added ev;
1150 ev.instance = instance;
1152 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1155 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1158 struct mgmt_ev_advertising_removed ev;
1160 ev.instance = instance;
1162 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1165 static void cancel_adv_timeout(struct hci_dev *hdev)
1167 if (hdev->adv_instance_timeout) {
1168 hdev->adv_instance_timeout = 0;
1169 cancel_delayed_work(&hdev->adv_instance_expire);
1173 static int clean_up_hci_state(struct hci_dev *hdev)
1175 struct hci_request req;
1176 struct hci_conn *conn;
1177 bool discov_stopped;
1180 hci_req_init(&req, hdev);
1182 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1183 test_bit(HCI_PSCAN, &hdev->flags)) {
1185 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1188 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1190 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1191 __hci_req_disable_advertising(&req);
1193 discov_stopped = hci_req_stop_discovery(&req);
1195 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1196 /* 0x15 == Terminated due to Power Off */
1197 __hci_abort_conn(&req, conn, 0x15);
1200 err = hci_req_run(&req, clean_up_hci_complete);
1201 if (!err && discov_stopped)
1202 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1207 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1210 struct mgmt_mode *cp = data;
1211 struct mgmt_pending_cmd *cmd;
1214 bt_dev_dbg(hdev, "sock %p", sk);
1216 if (cp->val != 0x00 && cp->val != 0x01)
1217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1218 MGMT_STATUS_INVALID_PARAMS);
1222 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1223 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1228 if (!!cp->val == hdev_is_powered(hdev)) {
1229 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1233 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1240 queue_work(hdev->req_workqueue, &hdev->power_on);
1243 /* Disconnect connections, stop scans, etc */
1244 err = clean_up_hci_state(hdev);
1246 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1247 HCI_POWER_OFF_TIMEOUT);
1249 /* ENODATA means there were no HCI commands queued */
1250 if (err == -ENODATA) {
1251 cancel_delayed_work(&hdev->power_off);
1252 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1258 hci_dev_unlock(hdev);
1262 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1264 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1266 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1267 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1270 int mgmt_new_settings(struct hci_dev *hdev)
1272 return new_settings(hdev, NULL);
1277 struct hci_dev *hdev;
1281 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1283 struct cmd_lookup *match = data;
1285 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1287 list_del(&cmd->list);
1289 if (match->sk == NULL) {
1290 match->sk = cmd->sk;
1291 sock_hold(match->sk);
1294 mgmt_pending_free(cmd);
1297 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1301 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1302 mgmt_pending_remove(cmd);
1305 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1307 if (cmd->cmd_complete) {
1310 cmd->cmd_complete(cmd, *status);
1311 mgmt_pending_remove(cmd);
1316 cmd_status_rsp(cmd, data);
1319 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1321 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1322 cmd->param, cmd->param_len);
1325 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1327 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1328 cmd->param, sizeof(struct mgmt_addr_info));
1331 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1333 if (!lmp_bredr_capable(hdev))
1334 return MGMT_STATUS_NOT_SUPPORTED;
1335 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1336 return MGMT_STATUS_REJECTED;
1338 return MGMT_STATUS_SUCCESS;
1341 static u8 mgmt_le_support(struct hci_dev *hdev)
1343 if (!lmp_le_capable(hdev))
1344 return MGMT_STATUS_NOT_SUPPORTED;
1345 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1346 return MGMT_STATUS_REJECTED;
1348 return MGMT_STATUS_SUCCESS;
1351 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1353 struct mgmt_pending_cmd *cmd;
1355 bt_dev_dbg(hdev, "status 0x%02x", status);
1359 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1364 u8 mgmt_err = mgmt_status(status);
1365 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1366 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1370 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1371 hdev->discov_timeout > 0) {
1372 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1373 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1376 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1377 new_settings(hdev, cmd->sk);
1380 mgmt_pending_remove(cmd);
1383 hci_dev_unlock(hdev);
1386 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1389 struct mgmt_cp_set_discoverable *cp = data;
1390 struct mgmt_pending_cmd *cmd;
1394 bt_dev_dbg(hdev, "sock %p", sk);
1396 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1397 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1398 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1399 MGMT_STATUS_REJECTED);
1401 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1402 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1403 MGMT_STATUS_INVALID_PARAMS);
1405 timeout = __le16_to_cpu(cp->timeout);
1407 /* Disabling discoverable requires that no timeout is set,
1408 * and enabling limited discoverable requires a timeout.
1410 if ((cp->val == 0x00 && timeout > 0) ||
1411 (cp->val == 0x02 && timeout == 0))
1412 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1413 MGMT_STATUS_INVALID_PARAMS);
1417 if (!hdev_is_powered(hdev) && timeout > 0) {
1418 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1419 MGMT_STATUS_NOT_POWERED);
1423 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1424 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1425 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1430 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1431 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1432 MGMT_STATUS_REJECTED);
1436 if (hdev->advertising_paused) {
1437 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1442 if (!hdev_is_powered(hdev)) {
1443 bool changed = false;
1445 /* Setting limited discoverable when powered off is
1446 * not a valid operation since it requires a timeout
1447 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1449 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1450 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1454 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1459 err = new_settings(hdev, sk);
1464 /* If the current mode is the same, then just update the timeout
1465 * value with the new value. And if only the timeout gets updated,
1466 * then no need for any HCI transactions.
1468 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1469 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1470 HCI_LIMITED_DISCOVERABLE)) {
1471 cancel_delayed_work(&hdev->discov_off);
1472 hdev->discov_timeout = timeout;
1474 if (cp->val && hdev->discov_timeout > 0) {
1475 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1476 queue_delayed_work(hdev->req_workqueue,
1477 &hdev->discov_off, to);
1480 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1484 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1490 /* Cancel any potential discoverable timeout that might be
1491 * still active and store new timeout value. The arming of
1492 * the timeout happens in the complete handler.
1494 cancel_delayed_work(&hdev->discov_off);
1495 hdev->discov_timeout = timeout;
1498 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1500 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1502 /* Limited discoverable mode */
1503 if (cp->val == 0x02)
1504 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1506 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1508 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1512 hci_dev_unlock(hdev);
1516 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1518 struct mgmt_pending_cmd *cmd;
1520 bt_dev_dbg(hdev, "status 0x%02x", status);
1524 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1529 u8 mgmt_err = mgmt_status(status);
1530 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1534 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1535 new_settings(hdev, cmd->sk);
1538 mgmt_pending_remove(cmd);
1541 hci_dev_unlock(hdev);
1544 static int set_connectable_update_settings(struct hci_dev *hdev,
1545 struct sock *sk, u8 val)
1547 bool changed = false;
1550 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1554 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1556 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1557 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1560 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1565 hci_req_update_scan(hdev);
1566 hci_update_background_scan(hdev);
1567 return new_settings(hdev, sk);
1573 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1576 struct mgmt_mode *cp = data;
1577 struct mgmt_pending_cmd *cmd;
1580 bt_dev_dbg(hdev, "sock %p", sk);
1582 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1583 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1585 MGMT_STATUS_REJECTED);
1587 if (cp->val != 0x00 && cp->val != 0x01)
1588 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1589 MGMT_STATUS_INVALID_PARAMS);
1593 if (!hdev_is_powered(hdev)) {
1594 err = set_connectable_update_settings(hdev, sk, cp->val);
1598 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1599 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1605 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1612 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1614 if (hdev->discov_timeout > 0)
1615 cancel_delayed_work(&hdev->discov_off);
1617 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1618 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1619 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1622 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1626 hci_dev_unlock(hdev);
1630 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1633 struct mgmt_mode *cp = data;
1637 bt_dev_dbg(hdev, "sock %p", sk);
1639 if (cp->val != 0x00 && cp->val != 0x01)
1640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1641 MGMT_STATUS_INVALID_PARAMS);
1646 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1648 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1650 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1655 /* In limited privacy mode the change of bondable mode
1656 * may affect the local advertising address.
1658 if (hdev_is_powered(hdev) &&
1659 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1660 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1661 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1662 queue_work(hdev->req_workqueue,
1663 &hdev->discoverable_update);
1665 err = new_settings(hdev, sk);
1669 hci_dev_unlock(hdev);
1673 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1676 struct mgmt_mode *cp = data;
1677 struct mgmt_pending_cmd *cmd;
1681 bt_dev_dbg(hdev, "sock %p", sk);
1683 status = mgmt_bredr_support(hdev);
1685 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1688 if (cp->val != 0x00 && cp->val != 0x01)
1689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1690 MGMT_STATUS_INVALID_PARAMS);
1694 if (!hdev_is_powered(hdev)) {
1695 bool changed = false;
1697 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1698 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1702 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1707 err = new_settings(hdev, sk);
1712 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1713 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1720 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1721 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1725 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1731 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1733 mgmt_pending_remove(cmd);
1738 hci_dev_unlock(hdev);
1742 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1744 struct mgmt_mode *cp = data;
1745 struct mgmt_pending_cmd *cmd;
1749 bt_dev_dbg(hdev, "sock %p", sk);
1751 status = mgmt_bredr_support(hdev);
1753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1755 if (!lmp_ssp_capable(hdev))
1756 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1757 MGMT_STATUS_NOT_SUPPORTED);
1759 if (cp->val != 0x00 && cp->val != 0x01)
1760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1761 MGMT_STATUS_INVALID_PARAMS);
1765 if (!hdev_is_powered(hdev)) {
1769 changed = !hci_dev_test_and_set_flag(hdev,
1772 changed = hci_dev_test_and_clear_flag(hdev,
1775 changed = hci_dev_test_and_clear_flag(hdev,
1778 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1781 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1786 err = new_settings(hdev, sk);
1791 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1792 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1797 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1798 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1802 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1808 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1809 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1810 sizeof(cp->val), &cp->val);
1812 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1814 mgmt_pending_remove(cmd);
1819 hci_dev_unlock(hdev);
1823 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1825 struct mgmt_mode *cp = data;
1830 bt_dev_dbg(hdev, "sock %p", sk);
1832 if (!IS_ENABLED(CONFIG_BT_HS))
1833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1834 MGMT_STATUS_NOT_SUPPORTED);
1836 status = mgmt_bredr_support(hdev);
1838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1840 if (!lmp_ssp_capable(hdev))
1841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1842 MGMT_STATUS_NOT_SUPPORTED);
1844 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1846 MGMT_STATUS_REJECTED);
1848 if (cp->val != 0x00 && cp->val != 0x01)
1849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1850 MGMT_STATUS_INVALID_PARAMS);
1854 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1855 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1861 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1863 if (hdev_is_powered(hdev)) {
1864 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1865 MGMT_STATUS_REJECTED);
1869 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1872 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1877 err = new_settings(hdev, sk);
1880 hci_dev_unlock(hdev);
1884 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1886 struct cmd_lookup match = { NULL, hdev };
1891 u8 mgmt_err = mgmt_status(status);
1893 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1898 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1900 new_settings(hdev, match.sk);
1905 /* Make sure the controller has a good default for
1906 * advertising data. Restrict the update to when LE
1907 * has actually been enabled. During power on, the
1908 * update in powered_update_hci will take care of it.
1910 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1911 struct hci_request req;
1912 hci_req_init(&req, hdev);
1913 if (ext_adv_capable(hdev)) {
1916 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1918 __hci_req_update_scan_rsp_data(&req, 0x00);
1920 __hci_req_update_adv_data(&req, 0x00);
1921 __hci_req_update_scan_rsp_data(&req, 0x00);
1923 hci_req_run(&req, NULL);
1924 hci_update_background_scan(hdev);
1928 hci_dev_unlock(hdev);
1931 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1933 struct mgmt_mode *cp = data;
1934 struct hci_cp_write_le_host_supported hci_cp;
1935 struct mgmt_pending_cmd *cmd;
1936 struct hci_request req;
1940 bt_dev_dbg(hdev, "sock %p", sk);
1942 if (!lmp_le_capable(hdev))
1943 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1944 MGMT_STATUS_NOT_SUPPORTED);
1946 if (cp->val != 0x00 && cp->val != 0x01)
1947 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1948 MGMT_STATUS_INVALID_PARAMS);
1950 /* Bluetooth single mode LE only controllers or dual-mode
1951 * controllers configured as LE only devices, do not allow
1952 * switching LE off. These have either LE enabled explicitly
1953 * or BR/EDR has been previously switched off.
1955 * When trying to enable an already enabled LE, then gracefully
1956 * send a positive response. Trying to disable it however will
1957 * result into rejection.
1959 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1960 if (cp->val == 0x01)
1961 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1964 MGMT_STATUS_REJECTED);
1970 enabled = lmp_host_le_capable(hdev);
1973 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1975 if (!hdev_is_powered(hdev) || val == enabled) {
1976 bool changed = false;
1978 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1979 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1983 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1984 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1988 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1993 err = new_settings(hdev, sk);
1998 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1999 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2000 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2005 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2011 hci_req_init(&req, hdev);
2013 memset(&hci_cp, 0, sizeof(hci_cp));
2017 hci_cp.simul = 0x00;
2019 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2020 __hci_req_disable_advertising(&req);
2022 if (ext_adv_capable(hdev))
2023 __hci_req_clear_ext_adv_sets(&req);
2026 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2029 err = hci_req_run(&req, le_enable_complete);
2031 mgmt_pending_remove(cmd);
2034 hci_dev_unlock(hdev);
2038 /* This is a helper function to test for pending mgmt commands that can
2039 * cause CoD or EIR HCI commands. We can only allow one such pending
2040 * mgmt command at a time since otherwise we cannot easily track what
2041 * the current values are, will be, and based on that calculate if a new
2042 * HCI command needs to be sent and if yes with what value.
2044 static bool pending_eir_or_class(struct hci_dev *hdev)
2046 struct mgmt_pending_cmd *cmd;
2048 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2049 switch (cmd->opcode) {
2050 case MGMT_OP_ADD_UUID:
2051 case MGMT_OP_REMOVE_UUID:
2052 case MGMT_OP_SET_DEV_CLASS:
2053 case MGMT_OP_SET_POWERED:
2061 static const u8 bluetooth_base_uuid[] = {
2062 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2063 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2066 static u8 get_uuid_size(const u8 *uuid)
2070 if (memcmp(uuid, bluetooth_base_uuid, 12))
2073 val = get_unaligned_le32(&uuid[12]);
2080 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2082 struct mgmt_pending_cmd *cmd;
2086 cmd = pending_find(mgmt_op, hdev);
2090 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2091 mgmt_status(status), hdev->dev_class, 3);
2093 mgmt_pending_remove(cmd);
2096 hci_dev_unlock(hdev);
2099 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2101 bt_dev_dbg(hdev, "status 0x%02x", status);
2103 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2106 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2108 struct mgmt_cp_add_uuid *cp = data;
2109 struct mgmt_pending_cmd *cmd;
2110 struct hci_request req;
2111 struct bt_uuid *uuid;
2114 bt_dev_dbg(hdev, "sock %p", sk);
2118 if (pending_eir_or_class(hdev)) {
2119 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2124 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2130 memcpy(uuid->uuid, cp->uuid, 16);
2131 uuid->svc_hint = cp->svc_hint;
2132 uuid->size = get_uuid_size(cp->uuid);
2134 list_add_tail(&uuid->list, &hdev->uuids);
2136 hci_req_init(&req, hdev);
2138 __hci_req_update_class(&req);
2139 __hci_req_update_eir(&req);
2141 err = hci_req_run(&req, add_uuid_complete);
2143 if (err != -ENODATA)
2146 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2147 hdev->dev_class, 3);
2151 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2160 hci_dev_unlock(hdev);
2164 static bool enable_service_cache(struct hci_dev *hdev)
2166 if (!hdev_is_powered(hdev))
2169 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2170 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2178 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2180 bt_dev_dbg(hdev, "status 0x%02x", status);
2182 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2185 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2188 struct mgmt_cp_remove_uuid *cp = data;
2189 struct mgmt_pending_cmd *cmd;
2190 struct bt_uuid *match, *tmp;
2191 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2192 struct hci_request req;
2195 bt_dev_dbg(hdev, "sock %p", sk);
2199 if (pending_eir_or_class(hdev)) {
2200 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2205 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2206 hci_uuids_clear(hdev);
2208 if (enable_service_cache(hdev)) {
2209 err = mgmt_cmd_complete(sk, hdev->id,
2210 MGMT_OP_REMOVE_UUID,
2211 0, hdev->dev_class, 3);
2220 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2221 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2224 list_del(&match->list);
2230 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2231 MGMT_STATUS_INVALID_PARAMS);
2236 hci_req_init(&req, hdev);
2238 __hci_req_update_class(&req);
2239 __hci_req_update_eir(&req);
2241 err = hci_req_run(&req, remove_uuid_complete);
2243 if (err != -ENODATA)
2246 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2247 hdev->dev_class, 3);
2251 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2260 hci_dev_unlock(hdev);
2264 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2266 bt_dev_dbg(hdev, "status 0x%02x", status);
2268 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2271 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2274 struct mgmt_cp_set_dev_class *cp = data;
2275 struct mgmt_pending_cmd *cmd;
2276 struct hci_request req;
2279 bt_dev_dbg(hdev, "sock %p", sk);
2281 if (!lmp_bredr_capable(hdev))
2282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2283 MGMT_STATUS_NOT_SUPPORTED);
2287 if (pending_eir_or_class(hdev)) {
2288 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2293 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2294 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2295 MGMT_STATUS_INVALID_PARAMS);
2299 hdev->major_class = cp->major;
2300 hdev->minor_class = cp->minor;
2302 if (!hdev_is_powered(hdev)) {
2303 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2304 hdev->dev_class, 3);
2308 hci_req_init(&req, hdev);
2310 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2311 hci_dev_unlock(hdev);
2312 cancel_delayed_work_sync(&hdev->service_cache);
2314 __hci_req_update_eir(&req);
2317 __hci_req_update_class(&req);
2319 err = hci_req_run(&req, set_class_complete);
2321 if (err != -ENODATA)
2324 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2325 hdev->dev_class, 3);
2329 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2338 hci_dev_unlock(hdev);
2342 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2345 struct mgmt_cp_load_link_keys *cp = data;
2346 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2347 sizeof(struct mgmt_link_key_info));
2348 u16 key_count, expected_len;
2352 bt_dev_dbg(hdev, "sock %p", sk);
2354 if (!lmp_bredr_capable(hdev))
2355 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2356 MGMT_STATUS_NOT_SUPPORTED);
2358 key_count = __le16_to_cpu(cp->key_count);
2359 if (key_count > max_key_count) {
2360 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 MGMT_STATUS_INVALID_PARAMS);
2366 expected_len = struct_size(cp, keys, key_count);
2367 if (expected_len != len) {
2368 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2371 MGMT_STATUS_INVALID_PARAMS);
2374 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2376 MGMT_STATUS_INVALID_PARAMS);
2378 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2381 for (i = 0; i < key_count; i++) {
2382 struct mgmt_link_key_info *key = &cp->keys[i];
2384 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2385 return mgmt_cmd_status(sk, hdev->id,
2386 MGMT_OP_LOAD_LINK_KEYS,
2387 MGMT_STATUS_INVALID_PARAMS);
2392 hci_link_keys_clear(hdev);
2395 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2397 changed = hci_dev_test_and_clear_flag(hdev,
2398 HCI_KEEP_DEBUG_KEYS);
2401 new_settings(hdev, NULL);
2403 for (i = 0; i < key_count; i++) {
2404 struct mgmt_link_key_info *key = &cp->keys[i];
2406 if (hci_is_blocked_key(hdev,
2407 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2409 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2414 /* Always ignore debug keys and require a new pairing if
2415 * the user wants to use them.
2417 if (key->type == HCI_LK_DEBUG_COMBINATION)
2420 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2421 key->type, key->pin_len, NULL);
2424 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2426 hci_dev_unlock(hdev);
2431 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2432 u8 addr_type, struct sock *skip_sk)
2434 struct mgmt_ev_device_unpaired ev;
2436 bacpy(&ev.addr.bdaddr, bdaddr);
2437 ev.addr.type = addr_type;
2439 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2443 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2446 struct mgmt_cp_unpair_device *cp = data;
2447 struct mgmt_rp_unpair_device rp;
2448 struct hci_conn_params *params;
2449 struct mgmt_pending_cmd *cmd;
2450 struct hci_conn *conn;
2454 memset(&rp, 0, sizeof(rp));
2455 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2456 rp.addr.type = cp->addr.type;
2458 if (!bdaddr_type_is_valid(cp->addr.type))
2459 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2460 MGMT_STATUS_INVALID_PARAMS,
2463 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2464 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2465 MGMT_STATUS_INVALID_PARAMS,
2470 if (!hdev_is_powered(hdev)) {
2471 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2472 MGMT_STATUS_NOT_POWERED, &rp,
2477 if (cp->addr.type == BDADDR_BREDR) {
2478 /* If disconnection is requested, then look up the
2479 * connection. If the remote device is connected, it
2480 * will be later used to terminate the link.
2482 * Setting it to NULL explicitly will cause no
2483 * termination of the link.
2486 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2491 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2493 err = mgmt_cmd_complete(sk, hdev->id,
2494 MGMT_OP_UNPAIR_DEVICE,
2495 MGMT_STATUS_NOT_PAIRED, &rp,
2503 /* LE address type */
2504 addr_type = le_addr_type(cp->addr.type);
2506 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2507 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2509 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2510 MGMT_STATUS_NOT_PAIRED, &rp,
2515 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2517 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2522 /* Defer clearing up the connection parameters until closing to
2523 * give a chance of keeping them if a repairing happens.
2525 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2527 /* Disable auto-connection parameters if present */
2528 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2530 if (params->explicit_connect)
2531 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2533 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2536 /* If disconnection is not requested, then clear the connection
2537 * variable so that the link is not terminated.
2539 if (!cp->disconnect)
2543 /* If the connection variable is set, then termination of the
2544 * link is requested.
2547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2549 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2553 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2560 cmd->cmd_complete = addr_cmd_complete;
2562 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2564 mgmt_pending_remove(cmd);
2567 hci_dev_unlock(hdev);
2571 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2574 struct mgmt_cp_disconnect *cp = data;
2575 struct mgmt_rp_disconnect rp;
2576 struct mgmt_pending_cmd *cmd;
2577 struct hci_conn *conn;
2580 bt_dev_dbg(hdev, "sock %p", sk);
2582 memset(&rp, 0, sizeof(rp));
2583 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2584 rp.addr.type = cp->addr.type;
2586 if (!bdaddr_type_is_valid(cp->addr.type))
2587 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2588 MGMT_STATUS_INVALID_PARAMS,
2593 if (!test_bit(HCI_UP, &hdev->flags)) {
2594 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2595 MGMT_STATUS_NOT_POWERED, &rp,
2600 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2601 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2602 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2606 if (cp->addr.type == BDADDR_BREDR)
2607 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2610 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2611 le_addr_type(cp->addr.type));
2613 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2614 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2615 MGMT_STATUS_NOT_CONNECTED, &rp,
2620 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2626 cmd->cmd_complete = generic_cmd_complete;
2628 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2630 mgmt_pending_remove(cmd);
2633 hci_dev_unlock(hdev);
2637 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2639 switch (link_type) {
2641 switch (addr_type) {
2642 case ADDR_LE_DEV_PUBLIC:
2643 return BDADDR_LE_PUBLIC;
2646 /* Fallback to LE Random address type */
2647 return BDADDR_LE_RANDOM;
2651 /* Fallback to BR/EDR type */
2652 return BDADDR_BREDR;
2656 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2659 struct mgmt_rp_get_connections *rp;
2664 bt_dev_dbg(hdev, "sock %p", sk);
2668 if (!hdev_is_powered(hdev)) {
2669 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2670 MGMT_STATUS_NOT_POWERED);
2675 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2676 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2680 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2687 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2688 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2690 bacpy(&rp->addr[i].bdaddr, &c->dst);
2691 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2692 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2697 rp->conn_count = cpu_to_le16(i);
2699 /* Recalculate length in case of filtered SCO connections, etc */
2700 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2701 struct_size(rp, addr, i));
2706 hci_dev_unlock(hdev);
2710 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2711 struct mgmt_cp_pin_code_neg_reply *cp)
2713 struct mgmt_pending_cmd *cmd;
2716 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2721 cmd->cmd_complete = addr_cmd_complete;
2723 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2724 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2726 mgmt_pending_remove(cmd);
2731 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2734 struct hci_conn *conn;
2735 struct mgmt_cp_pin_code_reply *cp = data;
2736 struct hci_cp_pin_code_reply reply;
2737 struct mgmt_pending_cmd *cmd;
2740 bt_dev_dbg(hdev, "sock %p", sk);
2744 if (!hdev_is_powered(hdev)) {
2745 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2746 MGMT_STATUS_NOT_POWERED);
2750 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2752 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2753 MGMT_STATUS_NOT_CONNECTED);
2757 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2758 struct mgmt_cp_pin_code_neg_reply ncp;
2760 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2762 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2764 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2766 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2767 MGMT_STATUS_INVALID_PARAMS);
2772 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2778 cmd->cmd_complete = addr_cmd_complete;
2780 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2781 reply.pin_len = cp->pin_len;
2782 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2784 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2786 mgmt_pending_remove(cmd);
2789 hci_dev_unlock(hdev);
2793 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2796 struct mgmt_cp_set_io_capability *cp = data;
2798 bt_dev_dbg(hdev, "sock %p", sk);
2800 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2801 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2802 MGMT_STATUS_INVALID_PARAMS);
2806 hdev->io_capability = cp->io_capability;
2808 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2810 hci_dev_unlock(hdev);
2812 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2816 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2818 struct hci_dev *hdev = conn->hdev;
2819 struct mgmt_pending_cmd *cmd;
2821 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2822 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2825 if (cmd->user_data != conn)
2834 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2836 struct mgmt_rp_pair_device rp;
2837 struct hci_conn *conn = cmd->user_data;
2840 bacpy(&rp.addr.bdaddr, &conn->dst);
2841 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2843 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2844 status, &rp, sizeof(rp));
2846 /* So we don't get further callbacks for this connection */
2847 conn->connect_cfm_cb = NULL;
2848 conn->security_cfm_cb = NULL;
2849 conn->disconn_cfm_cb = NULL;
2851 hci_conn_drop(conn);
2853 /* The device is paired so there is no need to remove
2854 * its connection parameters anymore.
2856 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2863 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2865 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2866 struct mgmt_pending_cmd *cmd;
2868 cmd = find_pairing(conn);
2870 cmd->cmd_complete(cmd, status);
2871 mgmt_pending_remove(cmd);
2875 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2877 struct mgmt_pending_cmd *cmd;
2879 BT_DBG("status %u", status);
2881 cmd = find_pairing(conn);
2883 BT_DBG("Unable to find a pending command");
2887 cmd->cmd_complete(cmd, mgmt_status(status));
2888 mgmt_pending_remove(cmd);
2891 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2893 struct mgmt_pending_cmd *cmd;
2895 BT_DBG("status %u", status);
2900 cmd = find_pairing(conn);
2902 BT_DBG("Unable to find a pending command");
2906 cmd->cmd_complete(cmd, mgmt_status(status));
2907 mgmt_pending_remove(cmd);
2910 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2913 struct mgmt_cp_pair_device *cp = data;
2914 struct mgmt_rp_pair_device rp;
2915 struct mgmt_pending_cmd *cmd;
2916 u8 sec_level, auth_type;
2917 struct hci_conn *conn;
2920 bt_dev_dbg(hdev, "sock %p", sk);
2922 memset(&rp, 0, sizeof(rp));
2923 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2924 rp.addr.type = cp->addr.type;
2926 if (!bdaddr_type_is_valid(cp->addr.type))
2927 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2928 MGMT_STATUS_INVALID_PARAMS,
2931 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2932 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2933 MGMT_STATUS_INVALID_PARAMS,
2938 if (!hdev_is_powered(hdev)) {
2939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2940 MGMT_STATUS_NOT_POWERED, &rp,
2945 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2946 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2947 MGMT_STATUS_ALREADY_PAIRED, &rp,
2952 sec_level = BT_SECURITY_MEDIUM;
2953 auth_type = HCI_AT_DEDICATED_BONDING;
2955 if (cp->addr.type == BDADDR_BREDR) {
2956 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2957 auth_type, CONN_REASON_PAIR_DEVICE);
2959 u8 addr_type = le_addr_type(cp->addr.type);
2960 struct hci_conn_params *p;
2962 /* When pairing a new device, it is expected to remember
2963 * this device for future connections. Adding the connection
2964 * parameter information ahead of time allows tracking
2965 * of the peripheral preferred values and will speed up any
2966 * further connection establishment.
2968 * If connection parameters already exist, then they
2969 * will be kept and this function does nothing.
2971 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2973 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2974 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2976 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2977 sec_level, HCI_LE_CONN_TIMEOUT,
2978 CONN_REASON_PAIR_DEVICE);
2984 if (PTR_ERR(conn) == -EBUSY)
2985 status = MGMT_STATUS_BUSY;
2986 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2987 status = MGMT_STATUS_NOT_SUPPORTED;
2988 else if (PTR_ERR(conn) == -ECONNREFUSED)
2989 status = MGMT_STATUS_REJECTED;
2991 status = MGMT_STATUS_CONNECT_FAILED;
2993 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2994 status, &rp, sizeof(rp));
2998 if (conn->connect_cfm_cb) {
2999 hci_conn_drop(conn);
3000 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3001 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3005 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3008 hci_conn_drop(conn);
3012 cmd->cmd_complete = pairing_complete;
3014 /* For LE, just connecting isn't a proof that the pairing finished */
3015 if (cp->addr.type == BDADDR_BREDR) {
3016 conn->connect_cfm_cb = pairing_complete_cb;
3017 conn->security_cfm_cb = pairing_complete_cb;
3018 conn->disconn_cfm_cb = pairing_complete_cb;
3020 conn->connect_cfm_cb = le_pairing_complete_cb;
3021 conn->security_cfm_cb = le_pairing_complete_cb;
3022 conn->disconn_cfm_cb = le_pairing_complete_cb;
3025 conn->io_capability = cp->io_cap;
3026 cmd->user_data = hci_conn_get(conn);
3028 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3029 hci_conn_security(conn, sec_level, auth_type, true)) {
3030 cmd->cmd_complete(cmd, 0);
3031 mgmt_pending_remove(cmd);
3037 hci_dev_unlock(hdev);
3041 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3044 struct mgmt_addr_info *addr = data;
3045 struct mgmt_pending_cmd *cmd;
3046 struct hci_conn *conn;
3049 bt_dev_dbg(hdev, "sock %p", sk);
3053 if (!hdev_is_powered(hdev)) {
3054 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3055 MGMT_STATUS_NOT_POWERED);
3059 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3061 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3062 MGMT_STATUS_INVALID_PARAMS);
3066 conn = cmd->user_data;
3068 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3069 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3070 MGMT_STATUS_INVALID_PARAMS);
3074 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3075 mgmt_pending_remove(cmd);
3077 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3078 addr, sizeof(*addr));
3080 /* Since user doesn't want to proceed with the connection, abort any
3081 * ongoing pairing and then terminate the link if it was created
3082 * because of the pair device action.
3084 if (addr->type == BDADDR_BREDR)
3085 hci_remove_link_key(hdev, &addr->bdaddr);
3087 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3088 le_addr_type(addr->type));
3090 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3091 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3094 hci_dev_unlock(hdev);
3098 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3099 struct mgmt_addr_info *addr, u16 mgmt_op,
3100 u16 hci_op, __le32 passkey)
3102 struct mgmt_pending_cmd *cmd;
3103 struct hci_conn *conn;
3108 if (!hdev_is_powered(hdev)) {
3109 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3110 MGMT_STATUS_NOT_POWERED, addr,
3115 if (addr->type == BDADDR_BREDR)
3116 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3118 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3119 le_addr_type(addr->type));
3122 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3123 MGMT_STATUS_NOT_CONNECTED, addr,
3128 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3129 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3131 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3132 MGMT_STATUS_SUCCESS, addr,
3135 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3136 MGMT_STATUS_FAILED, addr,
3142 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3148 cmd->cmd_complete = addr_cmd_complete;
3150 /* Continue with pairing via HCI */
3151 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3152 struct hci_cp_user_passkey_reply cp;
3154 bacpy(&cp.bdaddr, &addr->bdaddr);
3155 cp.passkey = passkey;
3156 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3158 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3162 mgmt_pending_remove(cmd);
3165 hci_dev_unlock(hdev);
3169 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3170 void *data, u16 len)
3172 struct mgmt_cp_pin_code_neg_reply *cp = data;
3174 bt_dev_dbg(hdev, "sock %p", sk);
3176 return user_pairing_resp(sk, hdev, &cp->addr,
3177 MGMT_OP_PIN_CODE_NEG_REPLY,
3178 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3181 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3184 struct mgmt_cp_user_confirm_reply *cp = data;
3186 bt_dev_dbg(hdev, "sock %p", sk);
3188 if (len != sizeof(*cp))
3189 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3190 MGMT_STATUS_INVALID_PARAMS);
3192 return user_pairing_resp(sk, hdev, &cp->addr,
3193 MGMT_OP_USER_CONFIRM_REPLY,
3194 HCI_OP_USER_CONFIRM_REPLY, 0);
3197 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3198 void *data, u16 len)
3200 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3202 bt_dev_dbg(hdev, "sock %p", sk);
3204 return user_pairing_resp(sk, hdev, &cp->addr,
3205 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3206 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3209 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3212 struct mgmt_cp_user_passkey_reply *cp = data;
3214 bt_dev_dbg(hdev, "sock %p", sk);
3216 return user_pairing_resp(sk, hdev, &cp->addr,
3217 MGMT_OP_USER_PASSKEY_REPLY,
3218 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3221 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3222 void *data, u16 len)
3224 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3226 bt_dev_dbg(hdev, "sock %p", sk);
3228 return user_pairing_resp(sk, hdev, &cp->addr,
3229 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3230 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3233 static void adv_expire(struct hci_dev *hdev, u32 flags)
3235 struct adv_info *adv_instance;
3236 struct hci_request req;
3239 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3243 /* stop if current instance doesn't need to be changed */
3244 if (!(adv_instance->flags & flags))
3247 cancel_adv_timeout(hdev);
3249 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3253 hci_req_init(&req, hdev);
3254 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3259 hci_req_run(&req, NULL);
3262 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3264 struct mgmt_cp_set_local_name *cp;
3265 struct mgmt_pending_cmd *cmd;
3267 bt_dev_dbg(hdev, "status 0x%02x", status);
3271 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3278 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3279 mgmt_status(status));
3281 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3284 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3285 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3288 mgmt_pending_remove(cmd);
3291 hci_dev_unlock(hdev);
3294 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3297 struct mgmt_cp_set_local_name *cp = data;
3298 struct mgmt_pending_cmd *cmd;
3299 struct hci_request req;
3302 bt_dev_dbg(hdev, "sock %p", sk);
3306 /* If the old values are the same as the new ones just return a
3307 * direct command complete event.
3309 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3310 !memcmp(hdev->short_name, cp->short_name,
3311 sizeof(hdev->short_name))) {
3312 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3317 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3319 if (!hdev_is_powered(hdev)) {
3320 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3322 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3327 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3328 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3329 ext_info_changed(hdev, sk);
3334 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3340 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3342 hci_req_init(&req, hdev);
3344 if (lmp_bredr_capable(hdev)) {
3345 __hci_req_update_name(&req);
3346 __hci_req_update_eir(&req);
3349 /* The name is stored in the scan response data and so
3350 * no need to update the advertising data here.
3352 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3353 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3355 err = hci_req_run(&req, set_name_complete);
3357 mgmt_pending_remove(cmd);
3360 hci_dev_unlock(hdev);
3364 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3367 struct mgmt_cp_set_appearance *cp = data;
3371 bt_dev_dbg(hdev, "sock %p", sk);
3373 if (!lmp_le_capable(hdev))
3374 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3375 MGMT_STATUS_NOT_SUPPORTED);
3377 appearance = le16_to_cpu(cp->appearance);
3381 if (hdev->appearance != appearance) {
3382 hdev->appearance = appearance;
3384 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3385 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3387 ext_info_changed(hdev, sk);
3390 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3393 hci_dev_unlock(hdev);
3398 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3399 void *data, u16 len)
3401 struct mgmt_rp_get_phy_configuration rp;
3403 bt_dev_dbg(hdev, "sock %p", sk);
3407 memset(&rp, 0, sizeof(rp));
3409 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3410 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3411 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3413 hci_dev_unlock(hdev);
3415 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3419 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3421 struct mgmt_ev_phy_configuration_changed ev;
3423 memset(&ev, 0, sizeof(ev));
3425 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3427 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3431 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3432 u16 opcode, struct sk_buff *skb)
3434 struct mgmt_pending_cmd *cmd;
3436 bt_dev_dbg(hdev, "status 0x%02x", status);
3440 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3445 mgmt_cmd_status(cmd->sk, hdev->id,
3446 MGMT_OP_SET_PHY_CONFIGURATION,
3447 mgmt_status(status));
3449 mgmt_cmd_complete(cmd->sk, hdev->id,
3450 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3453 mgmt_phy_configuration_changed(hdev, cmd->sk);
3456 mgmt_pending_remove(cmd);
3459 hci_dev_unlock(hdev);
3462 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3463 void *data, u16 len)
3465 struct mgmt_cp_set_phy_configuration *cp = data;
3466 struct hci_cp_le_set_default_phy cp_phy;
3467 struct mgmt_pending_cmd *cmd;
3468 struct hci_request req;
3469 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3470 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3471 bool changed = false;
3474 bt_dev_dbg(hdev, "sock %p", sk);
3476 configurable_phys = get_configurable_phys(hdev);
3477 supported_phys = get_supported_phys(hdev);
3478 selected_phys = __le32_to_cpu(cp->selected_phys);
3480 if (selected_phys & ~supported_phys)
3481 return mgmt_cmd_status(sk, hdev->id,
3482 MGMT_OP_SET_PHY_CONFIGURATION,
3483 MGMT_STATUS_INVALID_PARAMS);
3485 unconfigure_phys = supported_phys & ~configurable_phys;
3487 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3488 return mgmt_cmd_status(sk, hdev->id,
3489 MGMT_OP_SET_PHY_CONFIGURATION,
3490 MGMT_STATUS_INVALID_PARAMS);
3492 if (selected_phys == get_selected_phys(hdev))
3493 return mgmt_cmd_complete(sk, hdev->id,
3494 MGMT_OP_SET_PHY_CONFIGURATION,
3499 if (!hdev_is_powered(hdev)) {
3500 err = mgmt_cmd_status(sk, hdev->id,
3501 MGMT_OP_SET_PHY_CONFIGURATION,
3502 MGMT_STATUS_REJECTED);
3506 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3507 err = mgmt_cmd_status(sk, hdev->id,
3508 MGMT_OP_SET_PHY_CONFIGURATION,
3513 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3514 pkt_type |= (HCI_DH3 | HCI_DM3);
3516 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3518 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3519 pkt_type |= (HCI_DH5 | HCI_DM5);
3521 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3523 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3524 pkt_type &= ~HCI_2DH1;
3526 pkt_type |= HCI_2DH1;
3528 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3529 pkt_type &= ~HCI_2DH3;
3531 pkt_type |= HCI_2DH3;
3533 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3534 pkt_type &= ~HCI_2DH5;
3536 pkt_type |= HCI_2DH5;
3538 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3539 pkt_type &= ~HCI_3DH1;
3541 pkt_type |= HCI_3DH1;
3543 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3544 pkt_type &= ~HCI_3DH3;
3546 pkt_type |= HCI_3DH3;
3548 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3549 pkt_type &= ~HCI_3DH5;
3551 pkt_type |= HCI_3DH5;
3553 if (pkt_type != hdev->pkt_type) {
3554 hdev->pkt_type = pkt_type;
3558 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3559 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3561 mgmt_phy_configuration_changed(hdev, sk);
3563 err = mgmt_cmd_complete(sk, hdev->id,
3564 MGMT_OP_SET_PHY_CONFIGURATION,
3570 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3577 hci_req_init(&req, hdev);
3579 memset(&cp_phy, 0, sizeof(cp_phy));
3581 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3582 cp_phy.all_phys |= 0x01;
3584 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3585 cp_phy.all_phys |= 0x02;
3587 if (selected_phys & MGMT_PHY_LE_1M_TX)
3588 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3590 if (selected_phys & MGMT_PHY_LE_2M_TX)
3591 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3593 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3594 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3596 if (selected_phys & MGMT_PHY_LE_1M_RX)
3597 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3599 if (selected_phys & MGMT_PHY_LE_2M_RX)
3600 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3602 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3603 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3605 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3607 err = hci_req_run_skb(&req, set_default_phy_complete);
3609 mgmt_pending_remove(cmd);
3612 hci_dev_unlock(hdev);
3617 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3620 int err = MGMT_STATUS_SUCCESS;
3621 struct mgmt_cp_set_blocked_keys *keys = data;
3622 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3623 sizeof(struct mgmt_blocked_key_info));
3624 u16 key_count, expected_len;
3627 bt_dev_dbg(hdev, "sock %p", sk);
3629 key_count = __le16_to_cpu(keys->key_count);
3630 if (key_count > max_key_count) {
3631 bt_dev_err(hdev, "too big key_count value %u", key_count);
3632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3633 MGMT_STATUS_INVALID_PARAMS);
3636 expected_len = struct_size(keys, keys, key_count);
3637 if (expected_len != len) {
3638 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3641 MGMT_STATUS_INVALID_PARAMS);
3646 hci_blocked_keys_clear(hdev);
3648 for (i = 0; i < keys->key_count; ++i) {
3649 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3652 err = MGMT_STATUS_NO_RESOURCES;
3656 b->type = keys->keys[i].type;
3657 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3658 list_add_rcu(&b->list, &hdev->blocked_keys);
3660 hci_dev_unlock(hdev);
3662 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3666 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3667 void *data, u16 len)
3669 struct mgmt_mode *cp = data;
3671 bool changed = false;
3673 bt_dev_dbg(hdev, "sock %p", sk);
3675 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3676 return mgmt_cmd_status(sk, hdev->id,
3677 MGMT_OP_SET_WIDEBAND_SPEECH,
3678 MGMT_STATUS_NOT_SUPPORTED);
3680 if (cp->val != 0x00 && cp->val != 0x01)
3681 return mgmt_cmd_status(sk, hdev->id,
3682 MGMT_OP_SET_WIDEBAND_SPEECH,
3683 MGMT_STATUS_INVALID_PARAMS);
3687 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3688 err = mgmt_cmd_status(sk, hdev->id,
3689 MGMT_OP_SET_WIDEBAND_SPEECH,
3694 if (hdev_is_powered(hdev) &&
3695 !!cp->val != hci_dev_test_flag(hdev,
3696 HCI_WIDEBAND_SPEECH_ENABLED)) {
3697 err = mgmt_cmd_status(sk, hdev->id,
3698 MGMT_OP_SET_WIDEBAND_SPEECH,
3699 MGMT_STATUS_REJECTED);
3704 changed = !hci_dev_test_and_set_flag(hdev,
3705 HCI_WIDEBAND_SPEECH_ENABLED);
3707 changed = hci_dev_test_and_clear_flag(hdev,
3708 HCI_WIDEBAND_SPEECH_ENABLED);
3710 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3715 err = new_settings(hdev, sk);
3718 hci_dev_unlock(hdev);
3722 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3723 void *data, u16 data_len)
3726 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3729 u8 tx_power_range[2];
3731 bt_dev_dbg(hdev, "sock %p", sk);
3733 memset(&buf, 0, sizeof(buf));
3737 /* When the Read Simple Pairing Options command is supported, then
3738 * the remote public key validation is supported.
3740 * Alternatively, when Microsoft extensions are available, they can
3741 * indicate support for public key validation as well.
3743 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3744 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3746 flags |= 0x02; /* Remote public key validation (LE) */
3748 /* When the Read Encryption Key Size command is supported, then the
3749 * encryption key size is enforced.
3751 if (hdev->commands[20] & 0x10)
3752 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3754 flags |= 0x08; /* Encryption key size enforcement (LE) */
3756 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3759 /* When the Read Simple Pairing Options command is supported, then
3760 * also max encryption key size information is provided.
3762 if (hdev->commands[41] & 0x08)
3763 cap_len = eir_append_le16(rp->cap, cap_len,
3764 MGMT_CAP_MAX_ENC_KEY_SIZE,
3765 hdev->max_enc_key_size);
3767 cap_len = eir_append_le16(rp->cap, cap_len,
3768 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3769 SMP_MAX_ENC_KEY_SIZE);
3771 /* Append the min/max LE tx power parameters if we were able to fetch
3772 * it from the controller
3774 if (hdev->commands[38] & 0x80) {
3775 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3776 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3777 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3781 rp->cap_len = cpu_to_le16(cap_len);
3783 hci_dev_unlock(hdev);
3785 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3786 rp, sizeof(*rp) + cap_len);
3789 #ifdef CONFIG_BT_FEATURE_DEBUG
3790 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3791 static const u8 debug_uuid[16] = {
3792 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3793 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3797 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3798 static const u8 simult_central_periph_uuid[16] = {
3799 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3800 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3803 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3804 static const u8 rpa_resolution_uuid[16] = {
3805 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3806 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3809 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3810 void *data, u16 data_len)
3812 char buf[62]; /* Enough space for 3 features */
3813 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3817 bt_dev_dbg(hdev, "sock %p", sk);
3819 memset(&buf, 0, sizeof(buf));
3821 #ifdef CONFIG_BT_FEATURE_DEBUG
3823 flags = bt_dbg_get() ? BIT(0) : 0;
3825 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3826 rp->features[idx].flags = cpu_to_le32(flags);
3832 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3833 (hdev->le_states[4] & 0x08) && /* Central */
3834 (hdev->le_states[4] & 0x40) && /* Peripheral */
3835 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3840 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3841 rp->features[idx].flags = cpu_to_le32(flags);
3845 if (hdev && use_ll_privacy(hdev)) {
3846 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3847 flags = BIT(0) | BIT(1);
3851 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3852 rp->features[idx].flags = cpu_to_le32(flags);
3856 rp->feature_count = cpu_to_le16(idx);
3858 /* After reading the experimental features information, enable
3859 * the events to update client on any future change.
3861 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3863 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3864 MGMT_OP_READ_EXP_FEATURES_INFO,
3865 0, rp, sizeof(*rp) + (20 * idx));
3868 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3871 struct mgmt_ev_exp_feature_changed ev;
3873 memset(&ev, 0, sizeof(ev));
3874 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3875 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3877 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3879 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3883 #ifdef CONFIG_BT_FEATURE_DEBUG
3884 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3886 struct mgmt_ev_exp_feature_changed ev;
3888 memset(&ev, 0, sizeof(ev));
3889 memcpy(ev.uuid, debug_uuid, 16);
3890 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3892 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3894 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3898 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3899 void *data, u16 data_len)
3901 struct mgmt_cp_set_exp_feature *cp = data;
3902 struct mgmt_rp_set_exp_feature rp;
3904 bt_dev_dbg(hdev, "sock %p", sk);
3906 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3907 memset(rp.uuid, 0, 16);
3908 rp.flags = cpu_to_le32(0);
3910 #ifdef CONFIG_BT_FEATURE_DEBUG
3912 bool changed = bt_dbg_get();
3917 exp_debug_feature_changed(false, sk);
3921 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3922 bool changed = hci_dev_test_flag(hdev,
3923 HCI_ENABLE_LL_PRIVACY);
3925 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3928 exp_ll_privacy_feature_changed(false, hdev, sk);
3931 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3933 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3934 MGMT_OP_SET_EXP_FEATURE, 0,
3938 #ifdef CONFIG_BT_FEATURE_DEBUG
3939 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3943 /* Command requires to use the non-controller index */
3945 return mgmt_cmd_status(sk, hdev->id,
3946 MGMT_OP_SET_EXP_FEATURE,
3947 MGMT_STATUS_INVALID_INDEX);
3949 /* Parameters are limited to a single octet */
3950 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3951 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3952 MGMT_OP_SET_EXP_FEATURE,
3953 MGMT_STATUS_INVALID_PARAMS);
3955 /* Only boolean on/off is supported */
3956 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3957 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3958 MGMT_OP_SET_EXP_FEATURE,
3959 MGMT_STATUS_INVALID_PARAMS);
3961 val = !!cp->param[0];
3962 changed = val ? !bt_dbg_get() : bt_dbg_get();
3965 memcpy(rp.uuid, debug_uuid, 16);
3966 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3968 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3970 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3971 MGMT_OP_SET_EXP_FEATURE, 0,
3975 exp_debug_feature_changed(val, sk);
3981 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3986 /* Command requires to use the controller index */
3988 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3989 MGMT_OP_SET_EXP_FEATURE,
3990 MGMT_STATUS_INVALID_INDEX);
3992 /* Changes can only be made when controller is powered down */
3993 if (hdev_is_powered(hdev))
3994 return mgmt_cmd_status(sk, hdev->id,
3995 MGMT_OP_SET_EXP_FEATURE,
3996 MGMT_STATUS_REJECTED);
3998 /* Parameters are limited to a single octet */
3999 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4000 return mgmt_cmd_status(sk, hdev->id,
4001 MGMT_OP_SET_EXP_FEATURE,
4002 MGMT_STATUS_INVALID_PARAMS);
4004 /* Only boolean on/off is supported */
4005 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4006 return mgmt_cmd_status(sk, hdev->id,
4007 MGMT_OP_SET_EXP_FEATURE,
4008 MGMT_STATUS_INVALID_PARAMS);
4010 val = !!cp->param[0];
4013 changed = !hci_dev_test_flag(hdev,
4014 HCI_ENABLE_LL_PRIVACY);
4015 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4016 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4018 /* Enable LL privacy + supported settings changed */
4019 flags = BIT(0) | BIT(1);
4021 changed = hci_dev_test_flag(hdev,
4022 HCI_ENABLE_LL_PRIVACY);
4023 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4025 /* Disable LL privacy + supported settings changed */
4029 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4030 rp.flags = cpu_to_le32(flags);
4032 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4034 err = mgmt_cmd_complete(sk, hdev->id,
4035 MGMT_OP_SET_EXP_FEATURE, 0,
4039 exp_ll_privacy_feature_changed(val, hdev, sk);
4044 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4045 MGMT_OP_SET_EXP_FEATURE,
4046 MGMT_STATUS_NOT_SUPPORTED);
4049 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4051 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4054 struct mgmt_cp_get_device_flags *cp = data;
4055 struct mgmt_rp_get_device_flags rp;
4056 struct bdaddr_list_with_flags *br_params;
4057 struct hci_conn_params *params;
4058 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4059 u32 current_flags = 0;
4060 u8 status = MGMT_STATUS_INVALID_PARAMS;
4062 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4063 &cp->addr.bdaddr, cp->addr.type);
4067 memset(&rp, 0, sizeof(rp));
4069 if (cp->addr.type == BDADDR_BREDR) {
4070 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4076 current_flags = br_params->current_flags;
4078 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4079 le_addr_type(cp->addr.type));
4084 current_flags = params->current_flags;
4087 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4088 rp.addr.type = cp->addr.type;
4089 rp.supported_flags = cpu_to_le32(supported_flags);
4090 rp.current_flags = cpu_to_le32(current_flags);
4092 status = MGMT_STATUS_SUCCESS;
4095 hci_dev_unlock(hdev);
4097 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4101 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4102 bdaddr_t *bdaddr, u8 bdaddr_type,
4103 u32 supported_flags, u32 current_flags)
4105 struct mgmt_ev_device_flags_changed ev;
4107 bacpy(&ev.addr.bdaddr, bdaddr);
4108 ev.addr.type = bdaddr_type;
4109 ev.supported_flags = cpu_to_le32(supported_flags);
4110 ev.current_flags = cpu_to_le32(current_flags);
4112 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4115 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4118 struct mgmt_cp_set_device_flags *cp = data;
4119 struct bdaddr_list_with_flags *br_params;
4120 struct hci_conn_params *params;
4121 u8 status = MGMT_STATUS_INVALID_PARAMS;
4122 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4123 u32 current_flags = __le32_to_cpu(cp->current_flags);
4125 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4126 &cp->addr.bdaddr, cp->addr.type,
4127 __le32_to_cpu(current_flags));
4129 if ((supported_flags | current_flags) != supported_flags) {
4130 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4131 current_flags, supported_flags);
4137 if (cp->addr.type == BDADDR_BREDR) {
4138 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4143 br_params->current_flags = current_flags;
4144 status = MGMT_STATUS_SUCCESS;
4146 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4147 &cp->addr.bdaddr, cp->addr.type);
4150 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4151 le_addr_type(cp->addr.type));
4153 params->current_flags = current_flags;
4154 status = MGMT_STATUS_SUCCESS;
4156 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4158 le_addr_type(cp->addr.type));
4163 hci_dev_unlock(hdev);
4165 if (status == MGMT_STATUS_SUCCESS)
4166 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4167 supported_flags, current_flags);
4169 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4170 &cp->addr, sizeof(cp->addr));
4173 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4176 struct mgmt_ev_adv_monitor_added ev;
4178 ev.monitor_handle = cpu_to_le16(handle);
4180 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4183 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4185 struct mgmt_ev_adv_monitor_removed ev;
4186 struct mgmt_pending_cmd *cmd;
4187 struct sock *sk_skip = NULL;
4188 struct mgmt_cp_remove_adv_monitor *cp;
4190 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4194 if (cp->monitor_handle)
4198 ev.monitor_handle = cpu_to_le16(handle);
4200 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4203 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4204 void *data, u16 len)
4206 struct adv_monitor *monitor = NULL;
4207 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4210 __u32 supported = 0;
4212 __u16 num_handles = 0;
4213 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4215 BT_DBG("request for %s", hdev->name);
4219 if (msft_monitor_supported(hdev))
4220 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4222 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4223 handles[num_handles++] = monitor->handle;
4225 hci_dev_unlock(hdev);
4227 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4228 rp = kmalloc(rp_size, GFP_KERNEL);
4232 /* All supported features are currently enabled */
4233 enabled = supported;
4235 rp->supported_features = cpu_to_le32(supported);
4236 rp->enabled_features = cpu_to_le32(enabled);
4237 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4238 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4239 rp->num_handles = cpu_to_le16(num_handles);
4241 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4243 err = mgmt_cmd_complete(sk, hdev->id,
4244 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4245 MGMT_STATUS_SUCCESS, rp, rp_size);
4252 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4254 struct mgmt_rp_add_adv_patterns_monitor rp;
4255 struct mgmt_pending_cmd *cmd;
4256 struct adv_monitor *monitor;
4261 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4263 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4268 monitor = cmd->user_data;
4269 rp.monitor_handle = cpu_to_le16(monitor->handle);
4272 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4273 hdev->adv_monitors_cnt++;
4274 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4275 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4276 hci_update_background_scan(hdev);
4279 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4280 mgmt_status(status), &rp, sizeof(rp));
4281 mgmt_pending_remove(cmd);
4284 hci_dev_unlock(hdev);
4285 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4286 rp.monitor_handle, status);
4291 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4292 struct adv_monitor *m, u8 status,
4293 void *data, u16 len, u16 op)
4295 struct mgmt_rp_add_adv_patterns_monitor rp;
4296 struct mgmt_pending_cmd *cmd;
4305 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4306 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4307 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4308 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4309 status = MGMT_STATUS_BUSY;
4313 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4315 status = MGMT_STATUS_NO_RESOURCES;
4320 pending = hci_add_adv_monitor(hdev, m, &err);
4322 if (err == -ENOSPC || err == -ENOMEM)
4323 status = MGMT_STATUS_NO_RESOURCES;
4324 else if (err == -EINVAL)
4325 status = MGMT_STATUS_INVALID_PARAMS;
4327 status = MGMT_STATUS_FAILED;
4329 mgmt_pending_remove(cmd);
4334 mgmt_pending_remove(cmd);
4335 rp.monitor_handle = cpu_to_le16(m->handle);
4336 mgmt_adv_monitor_added(sk, hdev, m->handle);
4337 m->state = ADV_MONITOR_STATE_REGISTERED;
4338 hdev->adv_monitors_cnt++;
4340 hci_dev_unlock(hdev);
4341 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4345 hci_dev_unlock(hdev);
4350 hci_free_adv_monitor(hdev, m);
4351 hci_dev_unlock(hdev);
4352 return mgmt_cmd_status(sk, hdev->id, op, status);
4355 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4356 struct mgmt_adv_rssi_thresholds *rssi)
4359 m->rssi.low_threshold = rssi->low_threshold;
4360 m->rssi.low_threshold_timeout =
4361 __le16_to_cpu(rssi->low_threshold_timeout);
4362 m->rssi.high_threshold = rssi->high_threshold;
4363 m->rssi.high_threshold_timeout =
4364 __le16_to_cpu(rssi->high_threshold_timeout);
4365 m->rssi.sampling_period = rssi->sampling_period;
4367 /* Default values. These numbers are the least constricting
4368 * parameters for MSFT API to work, so it behaves as if there
4369 * are no rssi parameter to consider. May need to be changed
4370 * if other API are to be supported.
4372 m->rssi.low_threshold = -127;
4373 m->rssi.low_threshold_timeout = 60;
4374 m->rssi.high_threshold = -127;
4375 m->rssi.high_threshold_timeout = 0;
4376 m->rssi.sampling_period = 0;
4380 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4381 struct mgmt_adv_pattern *patterns)
4383 u8 offset = 0, length = 0;
4384 struct adv_pattern *p = NULL;
4387 for (i = 0; i < pattern_count; i++) {
4388 offset = patterns[i].offset;
4389 length = patterns[i].length;
4390 if (offset >= HCI_MAX_AD_LENGTH ||
4391 length > HCI_MAX_AD_LENGTH ||
4392 (offset + length) > HCI_MAX_AD_LENGTH)
4393 return MGMT_STATUS_INVALID_PARAMS;
4395 p = kmalloc(sizeof(*p), GFP_KERNEL);
4397 return MGMT_STATUS_NO_RESOURCES;
4399 p->ad_type = patterns[i].ad_type;
4400 p->offset = patterns[i].offset;
4401 p->length = patterns[i].length;
4402 memcpy(p->value, patterns[i].value, p->length);
4404 INIT_LIST_HEAD(&p->list);
4405 list_add(&p->list, &m->patterns);
4408 return MGMT_STATUS_SUCCESS;
4411 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4412 void *data, u16 len)
4414 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4415 struct adv_monitor *m = NULL;
4416 u8 status = MGMT_STATUS_SUCCESS;
4417 size_t expected_size = sizeof(*cp);
4419 BT_DBG("request for %s", hdev->name);
4421 if (len <= sizeof(*cp)) {
4422 status = MGMT_STATUS_INVALID_PARAMS;
4426 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4427 if (len != expected_size) {
4428 status = MGMT_STATUS_INVALID_PARAMS;
4432 m = kzalloc(sizeof(*m), GFP_KERNEL);
4434 status = MGMT_STATUS_NO_RESOURCES;
4438 INIT_LIST_HEAD(&m->patterns);
4440 parse_adv_monitor_rssi(m, NULL);
4441 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4444 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4445 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4448 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4449 void *data, u16 len)
4451 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4452 struct adv_monitor *m = NULL;
4453 u8 status = MGMT_STATUS_SUCCESS;
4454 size_t expected_size = sizeof(*cp);
4456 BT_DBG("request for %s", hdev->name);
4458 if (len <= sizeof(*cp)) {
4459 status = MGMT_STATUS_INVALID_PARAMS;
4463 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4464 if (len != expected_size) {
4465 status = MGMT_STATUS_INVALID_PARAMS;
4469 m = kzalloc(sizeof(*m), GFP_KERNEL);
4471 status = MGMT_STATUS_NO_RESOURCES;
4475 INIT_LIST_HEAD(&m->patterns);
4477 parse_adv_monitor_rssi(m, &cp->rssi);
4478 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4481 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4482 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4485 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4487 struct mgmt_rp_remove_adv_monitor rp;
4488 struct mgmt_cp_remove_adv_monitor *cp;
4489 struct mgmt_pending_cmd *cmd;
4494 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4499 rp.monitor_handle = cp->monitor_handle;
4502 hci_update_background_scan(hdev);
4504 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4505 mgmt_status(status), &rp, sizeof(rp));
4506 mgmt_pending_remove(cmd);
4509 hci_dev_unlock(hdev);
4510 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4511 rp.monitor_handle, status);
4516 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4517 void *data, u16 len)
4519 struct mgmt_cp_remove_adv_monitor *cp = data;
4520 struct mgmt_rp_remove_adv_monitor rp;
4521 struct mgmt_pending_cmd *cmd;
4522 u16 handle = __le16_to_cpu(cp->monitor_handle);
4526 BT_DBG("request for %s", hdev->name);
4527 rp.monitor_handle = cp->monitor_handle;
4531 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4532 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4533 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4534 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4535 status = MGMT_STATUS_BUSY;
4539 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4541 status = MGMT_STATUS_NO_RESOURCES;
4546 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4548 pending = hci_remove_all_adv_monitor(hdev, &err);
4551 mgmt_pending_remove(cmd);
4554 status = MGMT_STATUS_INVALID_INDEX;
4556 status = MGMT_STATUS_FAILED;
4561 /* monitor can be removed without forwarding request to controller */
4563 mgmt_pending_remove(cmd);
4564 hci_dev_unlock(hdev);
4566 return mgmt_cmd_complete(sk, hdev->id,
4567 MGMT_OP_REMOVE_ADV_MONITOR,
4568 MGMT_STATUS_SUCCESS,
4572 hci_dev_unlock(hdev);
4576 hci_dev_unlock(hdev);
4577 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4581 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4582 u16 opcode, struct sk_buff *skb)
4584 struct mgmt_rp_read_local_oob_data mgmt_rp;
4585 size_t rp_size = sizeof(mgmt_rp);
4586 struct mgmt_pending_cmd *cmd;
4588 bt_dev_dbg(hdev, "status %u", status);
4590 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4594 if (status || !skb) {
4595 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4596 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4600 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4602 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4603 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4605 if (skb->len < sizeof(*rp)) {
4606 mgmt_cmd_status(cmd->sk, hdev->id,
4607 MGMT_OP_READ_LOCAL_OOB_DATA,
4608 MGMT_STATUS_FAILED);
4612 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4613 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4615 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4617 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4619 if (skb->len < sizeof(*rp)) {
4620 mgmt_cmd_status(cmd->sk, hdev->id,
4621 MGMT_OP_READ_LOCAL_OOB_DATA,
4622 MGMT_STATUS_FAILED);
4626 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4627 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4629 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4630 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4633 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4634 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4637 mgmt_pending_remove(cmd);
4640 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4641 void *data, u16 data_len)
4643 struct mgmt_pending_cmd *cmd;
4644 struct hci_request req;
4647 bt_dev_dbg(hdev, "sock %p", sk);
4651 if (!hdev_is_powered(hdev)) {
4652 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4653 MGMT_STATUS_NOT_POWERED);
4657 if (!lmp_ssp_capable(hdev)) {
4658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4659 MGMT_STATUS_NOT_SUPPORTED);
4663 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4669 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4675 hci_req_init(&req, hdev);
4677 if (bredr_sc_enabled(hdev))
4678 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4680 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4682 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4684 mgmt_pending_remove(cmd);
4687 hci_dev_unlock(hdev);
4691 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4692 void *data, u16 len)
4694 struct mgmt_addr_info *addr = data;
4697 bt_dev_dbg(hdev, "sock %p", sk);
4699 if (!bdaddr_type_is_valid(addr->type))
4700 return mgmt_cmd_complete(sk, hdev->id,
4701 MGMT_OP_ADD_REMOTE_OOB_DATA,
4702 MGMT_STATUS_INVALID_PARAMS,
4703 addr, sizeof(*addr));
4707 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4708 struct mgmt_cp_add_remote_oob_data *cp = data;
4711 if (cp->addr.type != BDADDR_BREDR) {
4712 err = mgmt_cmd_complete(sk, hdev->id,
4713 MGMT_OP_ADD_REMOTE_OOB_DATA,
4714 MGMT_STATUS_INVALID_PARAMS,
4715 &cp->addr, sizeof(cp->addr));
4719 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4720 cp->addr.type, cp->hash,
4721 cp->rand, NULL, NULL);
4723 status = MGMT_STATUS_FAILED;
4725 status = MGMT_STATUS_SUCCESS;
4727 err = mgmt_cmd_complete(sk, hdev->id,
4728 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4729 &cp->addr, sizeof(cp->addr));
4730 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4731 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4732 u8 *rand192, *hash192, *rand256, *hash256;
4735 if (bdaddr_type_is_le(cp->addr.type)) {
4736 /* Enforce zero-valued 192-bit parameters as
4737 * long as legacy SMP OOB isn't implemented.
4739 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4740 memcmp(cp->hash192, ZERO_KEY, 16)) {
4741 err = mgmt_cmd_complete(sk, hdev->id,
4742 MGMT_OP_ADD_REMOTE_OOB_DATA,
4743 MGMT_STATUS_INVALID_PARAMS,
4744 addr, sizeof(*addr));
4751 /* In case one of the P-192 values is set to zero,
4752 * then just disable OOB data for P-192.
4754 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4755 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4759 rand192 = cp->rand192;
4760 hash192 = cp->hash192;
4764 /* In case one of the P-256 values is set to zero, then just
4765 * disable OOB data for P-256.
4767 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4768 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4772 rand256 = cp->rand256;
4773 hash256 = cp->hash256;
4776 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4777 cp->addr.type, hash192, rand192,
4780 status = MGMT_STATUS_FAILED;
4782 status = MGMT_STATUS_SUCCESS;
4784 err = mgmt_cmd_complete(sk, hdev->id,
4785 MGMT_OP_ADD_REMOTE_OOB_DATA,
4786 status, &cp->addr, sizeof(cp->addr));
4788 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4790 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4791 MGMT_STATUS_INVALID_PARAMS);
4795 hci_dev_unlock(hdev);
4799 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4800 void *data, u16 len)
4802 struct mgmt_cp_remove_remote_oob_data *cp = data;
4806 bt_dev_dbg(hdev, "sock %p", sk);
4808 if (cp->addr.type != BDADDR_BREDR)
4809 return mgmt_cmd_complete(sk, hdev->id,
4810 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4811 MGMT_STATUS_INVALID_PARAMS,
4812 &cp->addr, sizeof(cp->addr));
4816 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4817 hci_remote_oob_data_clear(hdev);
4818 status = MGMT_STATUS_SUCCESS;
4822 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4824 status = MGMT_STATUS_INVALID_PARAMS;
4826 status = MGMT_STATUS_SUCCESS;
4829 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4830 status, &cp->addr, sizeof(cp->addr));
4832 hci_dev_unlock(hdev);
4836 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4838 struct mgmt_pending_cmd *cmd;
4840 bt_dev_dbg(hdev, "status %u", status);
4844 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4846 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4849 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4852 cmd->cmd_complete(cmd, mgmt_status(status));
4853 mgmt_pending_remove(cmd);
4856 hci_dev_unlock(hdev);
4858 /* Handle suspend notifier */
4859 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4860 hdev->suspend_tasks)) {
4861 bt_dev_dbg(hdev, "Unpaused discovery");
4862 wake_up(&hdev->suspend_wait_q);
4866 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4867 uint8_t *mgmt_status)
4870 case DISCOV_TYPE_LE:
4871 *mgmt_status = mgmt_le_support(hdev);
4875 case DISCOV_TYPE_INTERLEAVED:
4876 *mgmt_status = mgmt_le_support(hdev);
4880 case DISCOV_TYPE_BREDR:
4881 *mgmt_status = mgmt_bredr_support(hdev);
4886 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4893 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4894 u16 op, void *data, u16 len)
4896 struct mgmt_cp_start_discovery *cp = data;
4897 struct mgmt_pending_cmd *cmd;
4901 bt_dev_dbg(hdev, "sock %p", sk);
4905 if (!hdev_is_powered(hdev)) {
4906 err = mgmt_cmd_complete(sk, hdev->id, op,
4907 MGMT_STATUS_NOT_POWERED,
4908 &cp->type, sizeof(cp->type));
4912 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4913 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4914 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4915 &cp->type, sizeof(cp->type));
4919 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4920 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4921 &cp->type, sizeof(cp->type));
4925 /* Can't start discovery when it is paused */
4926 if (hdev->discovery_paused) {
4927 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4928 &cp->type, sizeof(cp->type));
4932 /* Clear the discovery filter first to free any previously
4933 * allocated memory for the UUID list.
4935 hci_discovery_filter_clear(hdev);
4937 hdev->discovery.type = cp->type;
4938 hdev->discovery.report_invalid_rssi = false;
4939 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4940 hdev->discovery.limited = true;
4942 hdev->discovery.limited = false;
4944 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4950 cmd->cmd_complete = generic_cmd_complete;
4952 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4953 queue_work(hdev->req_workqueue, &hdev->discov_update);
4957 hci_dev_unlock(hdev);
4961 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4962 void *data, u16 len)
4964 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4968 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4969 void *data, u16 len)
4971 return start_discovery_internal(sk, hdev,
4972 MGMT_OP_START_LIMITED_DISCOVERY,
4976 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4979 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4983 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4984 void *data, u16 len)
4986 struct mgmt_cp_start_service_discovery *cp = data;
4987 struct mgmt_pending_cmd *cmd;
4988 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4989 u16 uuid_count, expected_len;
4993 bt_dev_dbg(hdev, "sock %p", sk);
4997 if (!hdev_is_powered(hdev)) {
4998 err = mgmt_cmd_complete(sk, hdev->id,
4999 MGMT_OP_START_SERVICE_DISCOVERY,
5000 MGMT_STATUS_NOT_POWERED,
5001 &cp->type, sizeof(cp->type));
5005 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5006 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5007 err = mgmt_cmd_complete(sk, hdev->id,
5008 MGMT_OP_START_SERVICE_DISCOVERY,
5009 MGMT_STATUS_BUSY, &cp->type,
5014 if (hdev->discovery_paused) {
5015 err = mgmt_cmd_complete(sk, hdev->id,
5016 MGMT_OP_START_SERVICE_DISCOVERY,
5017 MGMT_STATUS_BUSY, &cp->type,
5022 uuid_count = __le16_to_cpu(cp->uuid_count);
5023 if (uuid_count > max_uuid_count) {
5024 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5026 err = mgmt_cmd_complete(sk, hdev->id,
5027 MGMT_OP_START_SERVICE_DISCOVERY,
5028 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5033 expected_len = sizeof(*cp) + uuid_count * 16;
5034 if (expected_len != len) {
5035 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5037 err = mgmt_cmd_complete(sk, hdev->id,
5038 MGMT_OP_START_SERVICE_DISCOVERY,
5039 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5044 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5045 err = mgmt_cmd_complete(sk, hdev->id,
5046 MGMT_OP_START_SERVICE_DISCOVERY,
5047 status, &cp->type, sizeof(cp->type));
5051 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5058 cmd->cmd_complete = service_discovery_cmd_complete;
5060 /* Clear the discovery filter first to free any previously
5061 * allocated memory for the UUID list.
5063 hci_discovery_filter_clear(hdev);
5065 hdev->discovery.result_filtering = true;
5066 hdev->discovery.type = cp->type;
5067 hdev->discovery.rssi = cp->rssi;
5068 hdev->discovery.uuid_count = uuid_count;
5070 if (uuid_count > 0) {
5071 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5073 if (!hdev->discovery.uuids) {
5074 err = mgmt_cmd_complete(sk, hdev->id,
5075 MGMT_OP_START_SERVICE_DISCOVERY,
5077 &cp->type, sizeof(cp->type));
5078 mgmt_pending_remove(cmd);
5083 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5084 queue_work(hdev->req_workqueue, &hdev->discov_update);
5088 hci_dev_unlock(hdev);
5092 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5094 struct mgmt_pending_cmd *cmd;
5096 bt_dev_dbg(hdev, "status %u", status);
5100 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5102 cmd->cmd_complete(cmd, mgmt_status(status));
5103 mgmt_pending_remove(cmd);
5106 hci_dev_unlock(hdev);
5108 /* Handle suspend notifier */
5109 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5110 bt_dev_dbg(hdev, "Paused discovery");
5111 wake_up(&hdev->suspend_wait_q);
5115 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5118 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5119 struct mgmt_pending_cmd *cmd;
5122 bt_dev_dbg(hdev, "sock %p", sk);
5126 if (!hci_discovery_active(hdev)) {
5127 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5128 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5129 sizeof(mgmt_cp->type));
5133 if (hdev->discovery.type != mgmt_cp->type) {
5134 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5135 MGMT_STATUS_INVALID_PARAMS,
5136 &mgmt_cp->type, sizeof(mgmt_cp->type));
5140 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5146 cmd->cmd_complete = generic_cmd_complete;
5148 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5149 queue_work(hdev->req_workqueue, &hdev->discov_update);
5153 hci_dev_unlock(hdev);
5157 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5160 struct mgmt_cp_confirm_name *cp = data;
5161 struct inquiry_entry *e;
5164 bt_dev_dbg(hdev, "sock %p", sk);
5168 if (!hci_discovery_active(hdev)) {
5169 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5170 MGMT_STATUS_FAILED, &cp->addr,
5175 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5177 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5178 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5183 if (cp->name_known) {
5184 e->name_state = NAME_KNOWN;
5187 e->name_state = NAME_NEEDED;
5188 hci_inquiry_cache_update_resolve(hdev, e);
5191 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5192 &cp->addr, sizeof(cp->addr));
5195 hci_dev_unlock(hdev);
5199 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5202 struct mgmt_cp_block_device *cp = data;
5206 bt_dev_dbg(hdev, "sock %p", sk);
5208 if (!bdaddr_type_is_valid(cp->addr.type))
5209 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5210 MGMT_STATUS_INVALID_PARAMS,
5211 &cp->addr, sizeof(cp->addr));
5215 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5218 status = MGMT_STATUS_FAILED;
5222 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5224 status = MGMT_STATUS_SUCCESS;
5227 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5228 &cp->addr, sizeof(cp->addr));
5230 hci_dev_unlock(hdev);
5235 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5238 struct mgmt_cp_unblock_device *cp = data;
5242 bt_dev_dbg(hdev, "sock %p", sk);
5244 if (!bdaddr_type_is_valid(cp->addr.type))
5245 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5246 MGMT_STATUS_INVALID_PARAMS,
5247 &cp->addr, sizeof(cp->addr));
5251 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5254 status = MGMT_STATUS_INVALID_PARAMS;
5258 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5260 status = MGMT_STATUS_SUCCESS;
5263 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5264 &cp->addr, sizeof(cp->addr));
5266 hci_dev_unlock(hdev);
5271 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5274 struct mgmt_cp_set_device_id *cp = data;
5275 struct hci_request req;
5279 bt_dev_dbg(hdev, "sock %p", sk);
5281 source = __le16_to_cpu(cp->source);
5283 if (source > 0x0002)
5284 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5285 MGMT_STATUS_INVALID_PARAMS);
5289 hdev->devid_source = source;
5290 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5291 hdev->devid_product = __le16_to_cpu(cp->product);
5292 hdev->devid_version = __le16_to_cpu(cp->version);
5294 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5297 hci_req_init(&req, hdev);
5298 __hci_req_update_eir(&req);
5299 hci_req_run(&req, NULL);
5301 hci_dev_unlock(hdev);
5306 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5309 bt_dev_dbg(hdev, "status %u", status);
5312 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5315 struct cmd_lookup match = { NULL, hdev };
5316 struct hci_request req;
5318 struct adv_info *adv_instance;
5324 u8 mgmt_err = mgmt_status(status);
5326 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5327 cmd_status_rsp, &mgmt_err);
5331 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5332 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5334 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5336 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5339 new_settings(hdev, match.sk);
5344 /* Handle suspend notifier */
5345 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5346 hdev->suspend_tasks)) {
5347 bt_dev_dbg(hdev, "Paused advertising");
5348 wake_up(&hdev->suspend_wait_q);
5349 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5350 hdev->suspend_tasks)) {
5351 bt_dev_dbg(hdev, "Unpaused advertising");
5352 wake_up(&hdev->suspend_wait_q);
5355 /* If "Set Advertising" was just disabled and instance advertising was
5356 * set up earlier, then re-enable multi-instance advertising.
5358 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5359 list_empty(&hdev->adv_instances))
5362 instance = hdev->cur_adv_instance;
5364 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5365 struct adv_info, list);
5369 instance = adv_instance->instance;
5372 hci_req_init(&req, hdev);
5374 err = __hci_req_schedule_adv_instance(&req, instance, true);
5377 err = hci_req_run(&req, enable_advertising_instance);
5380 bt_dev_err(hdev, "failed to re-configure advertising");
5383 hci_dev_unlock(hdev);
5386 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5389 struct mgmt_mode *cp = data;
5390 struct mgmt_pending_cmd *cmd;
5391 struct hci_request req;
5395 bt_dev_dbg(hdev, "sock %p", sk);
5397 status = mgmt_le_support(hdev);
5399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5402 /* Enabling the experimental LL Privay support disables support for
5405 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5406 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5407 MGMT_STATUS_NOT_SUPPORTED);
5409 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5410 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5411 MGMT_STATUS_INVALID_PARAMS);
5413 if (hdev->advertising_paused)
5414 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5421 /* The following conditions are ones which mean that we should
5422 * not do any HCI communication but directly send a mgmt
5423 * response to user space (after toggling the flag if
5426 if (!hdev_is_powered(hdev) ||
5427 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5428 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5429 hci_conn_num(hdev, LE_LINK) > 0 ||
5430 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5431 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5435 hdev->cur_adv_instance = 0x00;
5436 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5437 if (cp->val == 0x02)
5438 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5440 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5442 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5443 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5446 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5451 err = new_settings(hdev, sk);
5456 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5457 pending_find(MGMT_OP_SET_LE, hdev)) {
5458 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5463 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5469 hci_req_init(&req, hdev);
5471 if (cp->val == 0x02)
5472 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5474 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5476 cancel_adv_timeout(hdev);
5479 /* Switch to instance "0" for the Set Advertising setting.
5480 * We cannot use update_[adv|scan_rsp]_data() here as the
5481 * HCI_ADVERTISING flag is not yet set.
5483 hdev->cur_adv_instance = 0x00;
5485 if (ext_adv_capable(hdev)) {
5486 __hci_req_start_ext_adv(&req, 0x00);
5488 __hci_req_update_adv_data(&req, 0x00);
5489 __hci_req_update_scan_rsp_data(&req, 0x00);
5490 __hci_req_enable_advertising(&req);
5493 __hci_req_disable_advertising(&req);
5496 err = hci_req_run(&req, set_advertising_complete);
5498 mgmt_pending_remove(cmd);
5501 hci_dev_unlock(hdev);
5505 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5506 void *data, u16 len)
5508 struct mgmt_cp_set_static_address *cp = data;
5511 bt_dev_dbg(hdev, "sock %p", sk);
5513 if (!lmp_le_capable(hdev))
5514 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5515 MGMT_STATUS_NOT_SUPPORTED);
5517 if (hdev_is_powered(hdev))
5518 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5519 MGMT_STATUS_REJECTED);
5521 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5522 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5523 return mgmt_cmd_status(sk, hdev->id,
5524 MGMT_OP_SET_STATIC_ADDRESS,
5525 MGMT_STATUS_INVALID_PARAMS);
5527 /* Two most significant bits shall be set */
5528 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5529 return mgmt_cmd_status(sk, hdev->id,
5530 MGMT_OP_SET_STATIC_ADDRESS,
5531 MGMT_STATUS_INVALID_PARAMS);
5536 bacpy(&hdev->static_addr, &cp->bdaddr);
5538 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5542 err = new_settings(hdev, sk);
5545 hci_dev_unlock(hdev);
5549 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5550 void *data, u16 len)
5552 struct mgmt_cp_set_scan_params *cp = data;
5553 __u16 interval, window;
5556 bt_dev_dbg(hdev, "sock %p", sk);
5558 if (!lmp_le_capable(hdev))
5559 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5560 MGMT_STATUS_NOT_SUPPORTED);
5562 interval = __le16_to_cpu(cp->interval);
5564 if (interval < 0x0004 || interval > 0x4000)
5565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5566 MGMT_STATUS_INVALID_PARAMS);
5568 window = __le16_to_cpu(cp->window);
5570 if (window < 0x0004 || window > 0x4000)
5571 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5572 MGMT_STATUS_INVALID_PARAMS);
5574 if (window > interval)
5575 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5576 MGMT_STATUS_INVALID_PARAMS);
5580 hdev->le_scan_interval = interval;
5581 hdev->le_scan_window = window;
5583 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5586 /* If background scan is running, restart it so new parameters are
5589 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5590 hdev->discovery.state == DISCOVERY_STOPPED) {
5591 struct hci_request req;
5593 hci_req_init(&req, hdev);
5595 hci_req_add_le_scan_disable(&req, false);
5596 hci_req_add_le_passive_scan(&req);
5598 hci_req_run(&req, NULL);
5601 hci_dev_unlock(hdev);
5606 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5609 struct mgmt_pending_cmd *cmd;
5611 bt_dev_dbg(hdev, "status 0x%02x", status);
5615 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5620 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5621 mgmt_status(status));
5623 struct mgmt_mode *cp = cmd->param;
5626 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5628 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5630 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5631 new_settings(hdev, cmd->sk);
5634 mgmt_pending_remove(cmd);
5637 hci_dev_unlock(hdev);
5640 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5641 void *data, u16 len)
5643 struct mgmt_mode *cp = data;
5644 struct mgmt_pending_cmd *cmd;
5645 struct hci_request req;
5648 bt_dev_dbg(hdev, "sock %p", sk);
5650 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5651 hdev->hci_ver < BLUETOOTH_VER_1_2)
5652 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5653 MGMT_STATUS_NOT_SUPPORTED);
5655 if (cp->val != 0x00 && cp->val != 0x01)
5656 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5657 MGMT_STATUS_INVALID_PARAMS);
5661 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5662 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5667 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5668 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5673 if (!hdev_is_powered(hdev)) {
5674 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5675 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5677 new_settings(hdev, sk);
5681 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5688 hci_req_init(&req, hdev);
5690 __hci_req_write_fast_connectable(&req, cp->val);
5692 err = hci_req_run(&req, fast_connectable_complete);
5694 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5695 MGMT_STATUS_FAILED);
5696 mgmt_pending_remove(cmd);
5700 hci_dev_unlock(hdev);
5705 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5707 struct mgmt_pending_cmd *cmd;
5709 bt_dev_dbg(hdev, "status 0x%02x", status);
5713 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5718 u8 mgmt_err = mgmt_status(status);
5720 /* We need to restore the flag if related HCI commands
5723 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5725 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5727 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5728 new_settings(hdev, cmd->sk);
5731 mgmt_pending_remove(cmd);
5734 hci_dev_unlock(hdev);
5737 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5739 struct mgmt_mode *cp = data;
5740 struct mgmt_pending_cmd *cmd;
5741 struct hci_request req;
5744 bt_dev_dbg(hdev, "sock %p", sk);
5746 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5748 MGMT_STATUS_NOT_SUPPORTED);
5750 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5752 MGMT_STATUS_REJECTED);
5754 if (cp->val != 0x00 && cp->val != 0x01)
5755 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5756 MGMT_STATUS_INVALID_PARAMS);
5760 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5761 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5765 if (!hdev_is_powered(hdev)) {
5767 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5768 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5769 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5770 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5771 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5774 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5776 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5780 err = new_settings(hdev, sk);
5784 /* Reject disabling when powered on */
5786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5787 MGMT_STATUS_REJECTED);
5790 /* When configuring a dual-mode controller to operate
5791 * with LE only and using a static address, then switching
5792 * BR/EDR back on is not allowed.
5794 * Dual-mode controllers shall operate with the public
5795 * address as its identity address for BR/EDR and LE. So
5796 * reject the attempt to create an invalid configuration.
5798 * The same restrictions applies when secure connections
5799 * has been enabled. For BR/EDR this is a controller feature
5800 * while for LE it is a host stack feature. This means that
5801 * switching BR/EDR back on when secure connections has been
5802 * enabled is not a supported transaction.
5804 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5805 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5806 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5807 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5808 MGMT_STATUS_REJECTED);
5813 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5814 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5819 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5825 /* We need to flip the bit already here so that
5826 * hci_req_update_adv_data generates the correct flags.
5828 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5830 hci_req_init(&req, hdev);
5832 __hci_req_write_fast_connectable(&req, false);
5833 __hci_req_update_scan(&req);
5835 /* Since only the advertising data flags will change, there
5836 * is no need to update the scan response data.
5838 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5840 err = hci_req_run(&req, set_bredr_complete);
5842 mgmt_pending_remove(cmd);
5845 hci_dev_unlock(hdev);
5849 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5851 struct mgmt_pending_cmd *cmd;
5852 struct mgmt_mode *cp;
5854 bt_dev_dbg(hdev, "status %u", status);
5858 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5863 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5864 mgmt_status(status));
5872 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5873 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5876 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5877 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5880 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5881 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5885 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5886 new_settings(hdev, cmd->sk);
5889 mgmt_pending_remove(cmd);
5891 hci_dev_unlock(hdev);
5894 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5895 void *data, u16 len)
5897 struct mgmt_mode *cp = data;
5898 struct mgmt_pending_cmd *cmd;
5899 struct hci_request req;
5903 bt_dev_dbg(hdev, "sock %p", sk);
5905 if (!lmp_sc_capable(hdev) &&
5906 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5908 MGMT_STATUS_NOT_SUPPORTED);
5910 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5911 lmp_sc_capable(hdev) &&
5912 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5913 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5914 MGMT_STATUS_REJECTED);
5916 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5917 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5918 MGMT_STATUS_INVALID_PARAMS);
5922 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5923 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5927 changed = !hci_dev_test_and_set_flag(hdev,
5929 if (cp->val == 0x02)
5930 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5932 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5934 changed = hci_dev_test_and_clear_flag(hdev,
5936 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5939 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5944 err = new_settings(hdev, sk);
5949 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5950 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5957 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5958 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5959 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5963 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5969 hci_req_init(&req, hdev);
5970 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5971 err = hci_req_run(&req, sc_enable_complete);
5973 mgmt_pending_remove(cmd);
5978 hci_dev_unlock(hdev);
5982 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5983 void *data, u16 len)
5985 struct mgmt_mode *cp = data;
5986 bool changed, use_changed;
5989 bt_dev_dbg(hdev, "sock %p", sk);
5991 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5993 MGMT_STATUS_INVALID_PARAMS);
5998 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6000 changed = hci_dev_test_and_clear_flag(hdev,
6001 HCI_KEEP_DEBUG_KEYS);
6003 if (cp->val == 0x02)
6004 use_changed = !hci_dev_test_and_set_flag(hdev,
6005 HCI_USE_DEBUG_KEYS);
6007 use_changed = hci_dev_test_and_clear_flag(hdev,
6008 HCI_USE_DEBUG_KEYS);
6010 if (hdev_is_powered(hdev) && use_changed &&
6011 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6012 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6013 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6014 sizeof(mode), &mode);
6017 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6022 err = new_settings(hdev, sk);
6025 hci_dev_unlock(hdev);
6029 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6032 struct mgmt_cp_set_privacy *cp = cp_data;
6036 bt_dev_dbg(hdev, "sock %p", sk);
6038 if (!lmp_le_capable(hdev))
6039 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6040 MGMT_STATUS_NOT_SUPPORTED);
6042 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6043 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6044 MGMT_STATUS_INVALID_PARAMS);
6046 if (hdev_is_powered(hdev))
6047 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6048 MGMT_STATUS_REJECTED);
6052 /* If user space supports this command it is also expected to
6053 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6055 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6058 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6059 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6060 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6061 hci_adv_instances_set_rpa_expired(hdev, true);
6062 if (cp->privacy == 0x02)
6063 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6065 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6067 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6068 memset(hdev->irk, 0, sizeof(hdev->irk));
6069 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6070 hci_adv_instances_set_rpa_expired(hdev, false);
6071 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6074 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6079 err = new_settings(hdev, sk);
6082 hci_dev_unlock(hdev);
6086 static bool irk_is_valid(struct mgmt_irk_info *irk)
6088 switch (irk->addr.type) {
6089 case BDADDR_LE_PUBLIC:
6092 case BDADDR_LE_RANDOM:
6093 /* Two most significant bits shall be set */
6094 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6102 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6105 struct mgmt_cp_load_irks *cp = cp_data;
6106 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6107 sizeof(struct mgmt_irk_info));
6108 u16 irk_count, expected_len;
6111 bt_dev_dbg(hdev, "sock %p", sk);
6113 if (!lmp_le_capable(hdev))
6114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6115 MGMT_STATUS_NOT_SUPPORTED);
6117 irk_count = __le16_to_cpu(cp->irk_count);
6118 if (irk_count > max_irk_count) {
6119 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6121 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6122 MGMT_STATUS_INVALID_PARAMS);
6125 expected_len = struct_size(cp, irks, irk_count);
6126 if (expected_len != len) {
6127 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6129 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6130 MGMT_STATUS_INVALID_PARAMS);
6133 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6135 for (i = 0; i < irk_count; i++) {
6136 struct mgmt_irk_info *key = &cp->irks[i];
6138 if (!irk_is_valid(key))
6139 return mgmt_cmd_status(sk, hdev->id,
6141 MGMT_STATUS_INVALID_PARAMS);
6146 hci_smp_irks_clear(hdev);
6148 for (i = 0; i < irk_count; i++) {
6149 struct mgmt_irk_info *irk = &cp->irks[i];
6151 if (hci_is_blocked_key(hdev,
6152 HCI_BLOCKED_KEY_TYPE_IRK,
6154 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6159 hci_add_irk(hdev, &irk->addr.bdaddr,
6160 le_addr_type(irk->addr.type), irk->val,
6164 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6166 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6168 hci_dev_unlock(hdev);
6174 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
6175 void *data, u16 len)
6177 struct mgmt_cp_set_advertising_params *cp = data;
6182 BT_DBG("%s", hdev->name);
6184 if (!lmp_le_capable(hdev))
6185 return mgmt_cmd_status(sk, hdev->id,
6186 MGMT_OP_SET_ADVERTISING_PARAMS,
6187 MGMT_STATUS_NOT_SUPPORTED);
6189 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6190 return mgmt_cmd_status(sk, hdev->id,
6191 MGMT_OP_SET_ADVERTISING_PARAMS,
6194 min_interval = __le16_to_cpu(cp->interval_min);
6195 max_interval = __le16_to_cpu(cp->interval_max);
6197 if (min_interval > max_interval ||
6198 min_interval < 0x0020 || max_interval > 0x4000)
6199 return mgmt_cmd_status(sk, hdev->id,
6200 MGMT_OP_SET_ADVERTISING_PARAMS,
6201 MGMT_STATUS_INVALID_PARAMS);
6205 hdev->le_adv_min_interval = min_interval;
6206 hdev->le_adv_max_interval = max_interval;
6207 hdev->adv_filter_policy = cp->filter_policy;
6208 hdev->adv_type = cp->type;
6210 err = mgmt_cmd_complete(sk, hdev->id,
6211 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
6213 hci_dev_unlock(hdev);
6218 static void set_advertising_data_complete(struct hci_dev *hdev,
6219 u8 status, u16 opcode)
6221 struct mgmt_cp_set_advertising_data *cp;
6222 struct mgmt_pending_cmd *cmd;
6224 BT_DBG("status 0x%02x", status);
6228 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
6235 mgmt_cmd_status(cmd->sk, hdev->id,
6236 MGMT_OP_SET_ADVERTISING_DATA,
6237 mgmt_status(status));
6239 mgmt_cmd_complete(cmd->sk, hdev->id,
6240 MGMT_OP_SET_ADVERTISING_DATA, 0,
6243 mgmt_pending_remove(cmd);
6246 hci_dev_unlock(hdev);
6249 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
6250 void *data, u16 len)
6252 struct mgmt_pending_cmd *cmd;
6253 struct hci_request req;
6254 struct mgmt_cp_set_advertising_data *cp = data;
6255 struct hci_cp_le_set_adv_data adv;
6258 BT_DBG("%s", hdev->name);
6260 if (!lmp_le_capable(hdev)) {
6261 return mgmt_cmd_status(sk, hdev->id,
6262 MGMT_OP_SET_ADVERTISING_DATA,
6263 MGMT_STATUS_NOT_SUPPORTED);
6268 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
6269 err = mgmt_cmd_status(sk, hdev->id,
6270 MGMT_OP_SET_ADVERTISING_DATA,
6275 if (len > HCI_MAX_AD_LENGTH) {
6276 err = mgmt_cmd_status(sk, hdev->id,
6277 MGMT_OP_SET_ADVERTISING_DATA,
6278 MGMT_STATUS_INVALID_PARAMS);
6282 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
6289 hci_req_init(&req, hdev);
6291 memset(&adv, 0, sizeof(adv));
6292 memcpy(adv.data, cp->data, len);
6295 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
6297 err = hci_req_run(&req, set_advertising_data_complete);
6299 mgmt_pending_remove(cmd);
6302 hci_dev_unlock(hdev);
6307 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
6310 struct mgmt_cp_set_scan_rsp_data *cp;
6311 struct mgmt_pending_cmd *cmd;
6313 BT_DBG("status 0x%02x", status);
6317 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
6324 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6325 mgmt_status(status));
6327 mgmt_cmd_complete(cmd->sk, hdev->id,
6328 MGMT_OP_SET_SCAN_RSP_DATA, 0,
6331 mgmt_pending_remove(cmd);
6334 hci_dev_unlock(hdev);
6337 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
6340 struct mgmt_pending_cmd *cmd;
6341 struct hci_request req;
6342 struct mgmt_cp_set_scan_rsp_data *cp = data;
6343 struct hci_cp_le_set_scan_rsp_data rsp;
6346 BT_DBG("%s", hdev->name);
6348 if (!lmp_le_capable(hdev))
6349 return mgmt_cmd_status(sk, hdev->id,
6350 MGMT_OP_SET_SCAN_RSP_DATA,
6351 MGMT_STATUS_NOT_SUPPORTED);
6355 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
6356 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6361 if (len > HCI_MAX_AD_LENGTH) {
6362 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6363 MGMT_STATUS_INVALID_PARAMS);
6367 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
6373 hci_req_init(&req, hdev);
6375 memset(&rsp, 0, sizeof(rsp));
6376 memcpy(rsp.data, cp->data, len);
6379 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
6381 err = hci_req_run(&req, set_scan_rsp_data_complete);
6383 mgmt_pending_remove(cmd);
6386 hci_dev_unlock(hdev);
6391 /* Adv White List feature */
6392 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6394 struct mgmt_cp_add_dev_white_list *cp;
6395 struct mgmt_pending_cmd *cmd;
6397 BT_DBG("status 0x%02x", status);
6401 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
6408 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6409 mgmt_status(status));
6411 mgmt_cmd_complete(cmd->sk, hdev->id,
6412 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
6414 mgmt_pending_remove(cmd);
6417 hci_dev_unlock(hdev);
6420 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
6421 void *data, u16 len)
6423 struct mgmt_pending_cmd *cmd;
6424 struct mgmt_cp_add_dev_white_list *cp = data;
6425 struct hci_request req;
6428 BT_DBG("%s", hdev->name);
6430 if (!lmp_le_capable(hdev))
6431 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6432 MGMT_STATUS_NOT_SUPPORTED);
6434 if (!hdev_is_powered(hdev))
6435 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6436 MGMT_STATUS_REJECTED);
6440 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
6441 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6446 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
6452 hci_req_init(&req, hdev);
6454 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
6456 err = hci_req_run(&req, add_white_list_complete);
6458 mgmt_pending_remove(cmd);
6463 hci_dev_unlock(hdev);
6468 static void remove_from_white_list_complete(struct hci_dev *hdev,
6469 u8 status, u16 opcode)
6471 struct mgmt_cp_remove_dev_from_white_list *cp;
6472 struct mgmt_pending_cmd *cmd;
6474 BT_DBG("status 0x%02x", status);
6478 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
6485 mgmt_cmd_status(cmd->sk, hdev->id,
6486 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6487 mgmt_status(status));
6489 mgmt_cmd_complete(cmd->sk, hdev->id,
6490 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
6493 mgmt_pending_remove(cmd);
6496 hci_dev_unlock(hdev);
6499 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
6500 void *data, u16 len)
6502 struct mgmt_pending_cmd *cmd;
6503 struct mgmt_cp_remove_dev_from_white_list *cp = data;
6504 struct hci_request req;
6507 BT_DBG("%s", hdev->name);
6509 if (!lmp_le_capable(hdev))
6510 return mgmt_cmd_status(sk, hdev->id,
6511 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6512 MGMT_STATUS_NOT_SUPPORTED);
6514 if (!hdev_is_powered(hdev))
6515 return mgmt_cmd_status(sk, hdev->id,
6516 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6517 MGMT_STATUS_REJECTED);
6521 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
6522 err = mgmt_cmd_status(sk, hdev->id,
6523 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6528 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6535 hci_req_init(&req, hdev);
6537 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
6539 err = hci_req_run(&req, remove_from_white_list_complete);
6541 mgmt_pending_remove(cmd);
6546 hci_dev_unlock(hdev);
6551 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
6554 struct mgmt_pending_cmd *cmd;
6556 BT_DBG("status 0x%02x", status);
6560 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
6565 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6566 mgmt_status(status));
6568 mgmt_cmd_complete(cmd->sk, hdev->id,
6569 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6572 mgmt_pending_remove(cmd);
6575 hci_dev_unlock(hdev);
6578 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
6579 void *data, u16 len)
6581 struct mgmt_pending_cmd *cmd;
6582 struct hci_request req;
6585 BT_DBG("%s", hdev->name);
6587 if (!lmp_le_capable(hdev))
6588 return mgmt_cmd_status(sk, hdev->id,
6589 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6590 MGMT_STATUS_NOT_SUPPORTED);
6592 if (!hdev_is_powered(hdev))
6593 return mgmt_cmd_status(sk, hdev->id,
6594 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6595 MGMT_STATUS_REJECTED);
6599 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
6600 err = mgmt_cmd_status(sk, hdev->id,
6601 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6606 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6613 hci_req_init(&req, hdev);
6615 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
6617 err = hci_req_run(&req, clear_white_list_complete);
6619 mgmt_pending_remove(cmd);
6624 hci_dev_unlock(hdev);
6629 static void set_rssi_threshold_complete(struct hci_dev *hdev,
6630 u8 status, u16 opcode)
6632 struct mgmt_pending_cmd *cmd;
6634 BT_DBG("status 0x%02x", status);
6638 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6643 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6644 mgmt_status(status));
6646 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6649 mgmt_pending_remove(cmd);
6652 hci_dev_unlock(hdev);
6655 static void set_rssi_disable_complete(struct hci_dev *hdev,
6656 u8 status, u16 opcode)
6658 struct mgmt_pending_cmd *cmd;
6660 BT_DBG("status 0x%02x", status);
6664 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6669 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6670 mgmt_status(status));
6672 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6675 mgmt_pending_remove(cmd);
6678 hci_dev_unlock(hdev);
6681 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
6682 void *data, u16 len)
6685 struct hci_cp_set_rssi_threshold th = { 0, };
6686 struct mgmt_cp_set_enable_rssi *cp = data;
6687 struct hci_conn *conn;
6688 struct mgmt_pending_cmd *cmd;
6689 struct hci_request req;
6694 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6696 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6697 MGMT_STATUS_FAILED);
6701 if (!lmp_le_capable(hdev)) {
6702 mgmt_pending_remove(cmd);
6703 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6704 MGMT_STATUS_NOT_SUPPORTED);
6708 if (!hdev_is_powered(hdev)) {
6709 BT_DBG("%s", hdev->name);
6710 mgmt_pending_remove(cmd);
6711 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6712 MGMT_STATUS_NOT_POWERED);
6716 if (cp->link_type == 0x01)
6717 dest_type = LE_LINK;
6719 dest_type = ACL_LINK;
6721 /* Get LE/ACL link handle info */
6722 conn = hci_conn_hash_lookup_ba(hdev,
6723 dest_type, &cp->bdaddr);
6726 err = mgmt_cmd_complete(sk, hdev->id,
6727 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
6728 mgmt_pending_remove(cmd);
6732 hci_req_init(&req, hdev);
6734 th.hci_le_ext_opcode = 0x0B;
6736 th.conn_handle = conn->handle;
6737 th.alert_mask = 0x07;
6738 th.low_th = cp->low_th;
6739 th.in_range_th = cp->in_range_th;
6740 th.high_th = cp->high_th;
6742 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6743 err = hci_req_run(&req, set_rssi_threshold_complete);
6746 mgmt_pending_remove(cmd);
6747 BT_ERR("Error in requesting hci_req_run");
6752 hci_dev_unlock(hdev);
6756 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
6757 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6759 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
6760 struct mgmt_cp_set_enable_rssi *cp = data;
6761 struct mgmt_pending_cmd *cmd;
6766 mgmt_rp.status = rp->status;
6767 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6768 mgmt_rp.bt_address = cp->bdaddr;
6769 mgmt_rp.link_type = cp->link_type;
6771 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6772 MGMT_STATUS_SUCCESS, &mgmt_rp,
6773 sizeof(struct mgmt_cc_rsp_enable_rssi));
6775 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
6776 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6778 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
6779 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6780 &mgmt_rp.bt_address, true);
6784 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6786 mgmt_pending_remove(cmd);
6788 hci_dev_unlock(hdev);
6791 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
6792 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6794 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
6795 struct mgmt_cp_disable_rssi *cp = data;
6796 struct mgmt_pending_cmd *cmd;
6801 mgmt_rp.status = rp->status;
6802 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6803 mgmt_rp.bt_address = cp->bdaddr;
6804 mgmt_rp.link_type = cp->link_type;
6806 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6807 MGMT_STATUS_SUCCESS, &mgmt_rp,
6808 sizeof(struct mgmt_cc_rsp_enable_rssi));
6810 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
6811 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6813 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6814 &mgmt_rp.bt_address, false);
6818 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6820 mgmt_pending_remove(cmd);
6822 hci_dev_unlock(hdev);
6825 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
6826 void *data, u16 len)
6828 struct mgmt_pending_cmd *cmd;
6829 struct hci_request req;
6830 struct hci_cp_set_enable_rssi cp_en = { 0, };
6833 BT_DBG("Set Disable RSSI.");
6835 cp_en.hci_le_ext_opcode = 0x01;
6836 cp_en.le_enable_cs_Features = 0x00;
6837 cp_en.data[0] = 0x00;
6838 cp_en.data[1] = 0x00;
6839 cp_en.data[2] = 0x00;
6843 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6845 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6846 MGMT_STATUS_FAILED);
6850 if (!lmp_le_capable(hdev)) {
6851 mgmt_pending_remove(cmd);
6852 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6853 MGMT_STATUS_NOT_SUPPORTED);
6857 if (!hdev_is_powered(hdev)) {
6858 BT_DBG("%s", hdev->name);
6859 mgmt_pending_remove(cmd);
6860 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6861 MGMT_STATUS_NOT_POWERED);
6865 hci_req_init(&req, hdev);
6867 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6868 sizeof(struct hci_cp_set_enable_rssi),
6869 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6870 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6872 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6873 err = hci_req_run(&req, set_rssi_disable_complete);
6876 mgmt_pending_remove(cmd);
6877 BT_ERR("Error in requesting hci_req_run");
6882 hci_dev_unlock(hdev);
6886 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
6888 struct hci_cc_rsp_enable_rssi *rp = response;
6889 struct mgmt_pending_cmd *cmd_enable = NULL;
6890 struct mgmt_pending_cmd *cmd_disable = NULL;
6891 struct mgmt_cp_set_enable_rssi *cp_en;
6892 struct mgmt_cp_disable_rssi *cp_dis;
6895 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6896 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6897 hci_dev_unlock(hdev);
6900 BT_DBG("Enable Request");
6903 BT_DBG("Disable Request");
6906 cp_en = cmd_enable->param;
6911 switch (rp->le_ext_opcode) {
6913 BT_DBG("RSSI enabled.. Setting Threshold...");
6914 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
6915 cp_en, sizeof(*cp_en));
6919 BT_DBG("Sending RSSI enable success");
6920 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
6921 cp_en, rp, rp->status);
6925 } else if (cmd_disable) {
6926 cp_dis = cmd_disable->param;
6931 switch (rp->le_ext_opcode) {
6933 BT_DBG("Sending RSSI disable success");
6934 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6935 cp_dis, rp, rp->status);
6940 * Only unset RSSI Threshold values for the Link if
6941 * RSSI is monitored for other BREDR or LE Links
6943 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
6944 BT_DBG("Unset Threshold. Other links being monitored");
6945 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6946 cp_dis, rp, rp->status);
6948 BT_DBG("Unset Threshold. Disabling...");
6949 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
6950 cp_dis, sizeof(*cp_dis));
6957 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
6960 struct mgmt_pending_cmd *cmd;
6962 BT_DBG("status 0x%02x", status);
6966 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6971 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6972 mgmt_status(status));
6974 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6977 mgmt_pending_remove(cmd);
6980 hci_dev_unlock(hdev);
6983 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
6984 void *data, u16 len)
6986 struct mgmt_pending_cmd *cmd;
6987 struct hci_request req;
6988 struct mgmt_cp_set_enable_rssi *cp = data;
6989 struct hci_cp_set_enable_rssi cp_en = { 0, };
6992 BT_DBG("Set Enable RSSI.");
6994 cp_en.hci_le_ext_opcode = 0x01;
6995 cp_en.le_enable_cs_Features = 0x04;
6996 cp_en.data[0] = 0x00;
6997 cp_en.data[1] = 0x00;
6998 cp_en.data[2] = 0x00;
7002 if (!lmp_le_capable(hdev)) {
7003 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7004 MGMT_STATUS_NOT_SUPPORTED);
7008 if (!hdev_is_powered(hdev)) {
7009 BT_DBG("%s", hdev->name);
7010 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7011 MGMT_STATUS_NOT_POWERED);
7015 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
7016 BT_DBG("%s", hdev->name);
7017 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7022 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
7025 BT_DBG("%s", hdev->name);
7030 /* If RSSI is already enabled directly set Threshold values */
7031 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
7032 hci_dev_unlock(hdev);
7033 BT_DBG("RSSI Enabled. Directly set Threshold");
7034 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
7038 hci_req_init(&req, hdev);
7040 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7041 sizeof(struct hci_cp_set_enable_rssi),
7042 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7043 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7045 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7046 err = hci_req_run(&req, set_rssi_enable_complete);
7049 mgmt_pending_remove(cmd);
7050 BT_ERR("Error in requesting hci_req_run");
7055 hci_dev_unlock(hdev);
7060 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7062 struct mgmt_pending_cmd *cmd;
7064 BT_DBG("status 0x%02x", status);
7068 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
7072 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7073 MGMT_STATUS_SUCCESS, &status, 1);
7075 mgmt_pending_remove(cmd);
7078 hci_dev_unlock(hdev);
7081 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
7084 struct mgmt_pending_cmd *cmd;
7085 struct hci_request req;
7086 struct mgmt_cp_get_raw_rssi *cp = data;
7087 struct hci_cp_get_raw_rssi hci_cp;
7089 struct hci_conn *conn;
7093 BT_DBG("Get Raw RSSI.");
7097 if (!lmp_le_capable(hdev)) {
7098 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7099 MGMT_STATUS_NOT_SUPPORTED);
7103 if (cp->link_type == 0x01)
7104 dest_type = LE_LINK;
7106 dest_type = ACL_LINK;
7108 /* Get LE/BREDR link handle info */
7109 conn = hci_conn_hash_lookup_ba(hdev,
7110 dest_type, &cp->bt_address);
7112 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7113 MGMT_STATUS_NOT_CONNECTED);
7116 hci_cp.conn_handle = conn->handle;
7118 if (!hdev_is_powered(hdev)) {
7119 BT_DBG("%s", hdev->name);
7120 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7121 MGMT_STATUS_NOT_POWERED);
7125 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
7126 BT_DBG("%s", hdev->name);
7127 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7132 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
7134 BT_DBG("%s", hdev->name);
7139 hci_req_init(&req, hdev);
7141 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
7142 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
7143 err = hci_req_run(&req, get_raw_rssi_complete);
7146 mgmt_pending_remove(cmd);
7147 BT_ERR("Error in requesting hci_req_run");
7151 hci_dev_unlock(hdev);
7156 void mgmt_raw_rssi_response(struct hci_dev *hdev,
7157 struct hci_cc_rp_get_raw_rssi *rp, int success)
7159 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
7160 struct hci_conn *conn;
7162 mgmt_rp.status = rp->status;
7163 mgmt_rp.rssi_dbm = rp->rssi_dbm;
7165 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
7169 bacpy(&mgmt_rp.bt_address, &conn->dst);
7170 if (conn->type == LE_LINK)
7171 mgmt_rp.link_type = 0x01;
7173 mgmt_rp.link_type = 0x00;
7175 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
7176 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
7179 static void set_disable_threshold_complete(struct hci_dev *hdev,
7180 u8 status, u16 opcode)
7182 struct mgmt_pending_cmd *cmd;
7184 BT_DBG("status 0x%02x", status);
7188 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7192 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7193 MGMT_STATUS_SUCCESS, &status, 1);
7195 mgmt_pending_remove(cmd);
7198 hci_dev_unlock(hdev);
7201 /** Removes monitoring for a link*/
7202 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
7203 void *data, u16 len)
7206 struct hci_cp_set_rssi_threshold th = { 0, };
7207 struct mgmt_cp_disable_rssi *cp = data;
7208 struct hci_conn *conn;
7209 struct mgmt_pending_cmd *cmd;
7210 struct hci_request req;
7213 BT_DBG("Set Disable RSSI.");
7217 if (!lmp_le_capable(hdev)) {
7218 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7219 MGMT_STATUS_NOT_SUPPORTED);
7223 /* Get LE/ACL link handle info*/
7224 if (cp->link_type == 0x01)
7225 dest_type = LE_LINK;
7227 dest_type = ACL_LINK;
7229 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
7231 err = mgmt_cmd_complete(sk, hdev->id,
7232 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
7236 th.hci_le_ext_opcode = 0x0B;
7238 th.conn_handle = conn->handle;
7239 th.alert_mask = 0x00;
7241 th.in_range_th = 0x00;
7244 if (!hdev_is_powered(hdev)) {
7245 BT_DBG("%s", hdev->name);
7246 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7251 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
7252 BT_DBG("%s", hdev->name);
7253 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7258 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
7261 BT_DBG("%s", hdev->name);
7266 hci_req_init(&req, hdev);
7268 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7269 err = hci_req_run(&req, set_disable_threshold_complete);
7271 mgmt_pending_remove(cmd);
7272 BT_ERR("Error in requesting hci_req_run");
7277 hci_dev_unlock(hdev);
7282 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
7283 s8 alert_type, s8 rssi_dbm)
7285 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
7286 struct hci_conn *conn;
7288 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
7289 conn_handle, alert_type, rssi_dbm);
7291 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
7294 BT_ERR("RSSI alert Error: Device not found for handle");
7297 bacpy(&mgmt_ev.bdaddr, &conn->dst);
7299 if (conn->type == LE_LINK)
7300 mgmt_ev.link_type = 0x01;
7302 mgmt_ev.link_type = 0x00;
7304 mgmt_ev.alert_type = alert_type;
7305 mgmt_ev.rssi_dbm = rssi_dbm;
7307 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
7308 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
7312 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
7314 struct mgmt_pending_cmd *cmd;
7318 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7320 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7324 type = hdev->le_discovery.type;
7326 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7327 mgmt_status(status), &type, sizeof(type));
7328 mgmt_pending_remove(cmd);
7333 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
7336 unsigned long timeout = 0;
7338 BT_DBG("status %d", status);
7342 mgmt_start_le_discovery_failed(hdev, status);
7343 hci_dev_unlock(hdev);
7348 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
7349 hci_dev_unlock(hdev);
7351 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
7352 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
7357 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
7360 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
7361 void *data, u16 len)
7363 struct mgmt_cp_start_le_discovery *cp = data;
7364 struct mgmt_pending_cmd *cmd;
7365 struct hci_cp_le_set_scan_param param_cp;
7366 struct hci_cp_le_set_scan_enable enable_cp;
7367 struct hci_request req;
7368 u8 status, own_addr_type;
7371 BT_DBG("%s", hdev->name);
7375 if (!hdev_is_powered(hdev)) {
7376 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7377 MGMT_STATUS_NOT_POWERED);
7381 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
7382 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7387 if (cp->type != DISCOV_TYPE_LE) {
7388 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7389 MGMT_STATUS_INVALID_PARAMS);
7393 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
7399 hdev->le_discovery.type = cp->type;
7401 hci_req_init(&req, hdev);
7403 status = mgmt_le_support(hdev);
7405 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7407 mgmt_pending_remove(cmd);
7411 /* If controller is scanning, it means the background scanning
7412 * is running. Thus, we should temporarily stop it in order to
7413 * set the discovery scanning parameters.
7415 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
7416 hci_req_add_le_scan_disable(&req, false);
7418 memset(¶m_cp, 0, sizeof(param_cp));
7420 /* All active scans will be done with either a resolvable
7421 * private address (when privacy feature has been enabled)
7422 * or unresolvable private address.
7424 err = hci_update_random_address(&req, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
7426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7427 MGMT_STATUS_FAILED);
7428 mgmt_pending_remove(cmd);
7432 param_cp.type = hdev->le_scan_type;
7433 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
7434 param_cp.window = cpu_to_le16(hdev->le_scan_window);
7435 param_cp.own_address_type = own_addr_type;
7436 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
7439 memset(&enable_cp, 0, sizeof(enable_cp));
7440 enable_cp.enable = LE_SCAN_ENABLE;
7441 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
7443 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
7446 err = hci_req_run(&req, start_le_discovery_complete);
7448 mgmt_pending_remove(cmd);
7450 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
7453 hci_dev_unlock(hdev);
7457 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
7459 struct mgmt_pending_cmd *cmd;
7462 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7466 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7467 mgmt_status(status), &hdev->le_discovery.type,
7468 sizeof(hdev->le_discovery.type));
7469 mgmt_pending_remove(cmd);
7474 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
7477 BT_DBG("status %d", status);
7482 mgmt_stop_le_discovery_failed(hdev, status);
7486 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7489 hci_dev_unlock(hdev);
7492 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
7493 void *data, u16 len)
7495 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
7496 struct mgmt_pending_cmd *cmd;
7497 struct hci_request req;
7500 BT_DBG("%s", hdev->name);
7504 if (!hci_le_discovery_active(hdev)) {
7505 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7506 MGMT_STATUS_REJECTED, &mgmt_cp->type,
7507 sizeof(mgmt_cp->type));
7511 if (hdev->le_discovery.type != mgmt_cp->type) {
7512 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7513 MGMT_STATUS_INVALID_PARAMS,
7514 &mgmt_cp->type, sizeof(mgmt_cp->type));
7518 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
7524 hci_req_init(&req, hdev);
7526 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
7527 BT_DBG("unknown le discovery state %u",
7528 hdev->le_discovery.state);
7530 mgmt_pending_remove(cmd);
7531 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7532 MGMT_STATUS_FAILED, &mgmt_cp->type,
7533 sizeof(mgmt_cp->type));
7537 cancel_delayed_work(&hdev->le_scan_disable);
7538 hci_req_add_le_scan_disable(&req, false);
7540 err = hci_req_run(&req, stop_le_discovery_complete);
7542 mgmt_pending_remove(cmd);
7544 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
7547 hci_dev_unlock(hdev);
7551 /* Separate LE discovery */
7552 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
7554 struct mgmt_ev_discovering ev;
7555 struct mgmt_pending_cmd *cmd;
7557 BT_DBG("%s le discovering %u", hdev->name, discovering);
7560 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7562 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7565 u8 type = hdev->le_discovery.type;
7567 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
7569 mgmt_pending_remove(cmd);
7572 memset(&ev, 0, sizeof(ev));
7573 ev.type = hdev->le_discovery.type;
7574 ev.discovering = discovering;
7576 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7579 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
7580 void *data, u16 len)
7584 BT_DBG("%s", hdev->name);
7588 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
7590 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
7592 hci_dev_unlock(hdev);
7597 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
7602 if (min > max || min < 6 || max > 3200)
7605 if (to_multiplier < 10 || to_multiplier > 3200)
7608 if (max >= to_multiplier * 8)
7611 max_latency = (to_multiplier * 8 / max) - 1;
7613 if (latency > 499 || latency > max_latency)
7619 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
7622 struct mgmt_cp_le_conn_update *cp = data;
7624 struct hci_conn *conn;
7625 u16 min, max, latency, supervision_timeout;
7628 if (!hdev_is_powered(hdev))
7629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7630 MGMT_STATUS_NOT_POWERED);
7632 min = __le16_to_cpu(cp->conn_interval_min);
7633 max = __le16_to_cpu(cp->conn_interval_max);
7634 latency = __le16_to_cpu(cp->conn_latency);
7635 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
7637 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
7638 min, max, latency, supervision_timeout);
7640 err = check_le_conn_update_param(min, max, latency,
7641 supervision_timeout);
7644 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7645 MGMT_STATUS_INVALID_PARAMS);
7649 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
7651 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7652 MGMT_STATUS_NOT_CONNECTED);
7653 hci_dev_unlock(hdev);
7657 hci_dev_unlock(hdev);
7659 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
7661 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
7665 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
7668 struct mgmt_cp_set_manufacturer_data *cp;
7669 struct mgmt_pending_cmd *cmd;
7671 BT_DBG("status 0x%02x", status);
7675 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
7682 mgmt_cmd_status(cmd->sk, hdev->id,
7683 MGMT_OP_SET_MANUFACTURER_DATA,
7684 mgmt_status(status));
7686 mgmt_cmd_complete(cmd->sk, hdev->id,
7687 MGMT_OP_SET_MANUFACTURER_DATA, 0,
7690 mgmt_pending_remove(cmd);
7693 hci_dev_unlock(hdev);
7696 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
7697 void *data, u16 len)
7699 struct mgmt_pending_cmd *cmd;
7700 struct hci_request req;
7701 struct mgmt_cp_set_manufacturer_data *cp = data;
7702 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
7706 BT_DBG("%s", hdev->name);
7708 if (!lmp_bredr_capable(hdev))
7709 return mgmt_cmd_status(sk, hdev->id,
7710 MGMT_OP_SET_MANUFACTURER_DATA,
7711 MGMT_STATUS_NOT_SUPPORTED);
7713 if (cp->data[0] == 0 ||
7714 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
7715 return mgmt_cmd_status(sk, hdev->id,
7716 MGMT_OP_SET_MANUFACTURER_DATA,
7717 MGMT_STATUS_INVALID_PARAMS);
7719 if (cp->data[1] != 0xFF)
7720 return mgmt_cmd_status(sk, hdev->id,
7721 MGMT_OP_SET_MANUFACTURER_DATA,
7722 MGMT_STATUS_NOT_SUPPORTED);
7726 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
7727 err = mgmt_cmd_status(sk, hdev->id,
7728 MGMT_OP_SET_MANUFACTURER_DATA,
7733 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
7740 hci_req_init(&req, hdev);
7742 /* if new data is same as previous data then return command
7745 if (hdev->manufacturer_len == cp->data[0] - 1 &&
7746 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
7747 mgmt_pending_remove(cmd);
7748 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
7749 0, cp, sizeof(*cp));
7754 old_len = hdev->manufacturer_len;
7756 memcpy(old_data, hdev->manufacturer_data, old_len);
7758 hdev->manufacturer_len = cp->data[0] - 1;
7759 if (hdev->manufacturer_len > 0)
7760 memcpy(hdev->manufacturer_data, cp->data + 2,
7761 hdev->manufacturer_len);
7763 __hci_req_update_eir(&req);
7765 err = hci_req_run(&req, set_manufacturer_data_complete);
7767 mgmt_pending_remove(cmd);
7772 hci_dev_unlock(hdev);
7777 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
7778 hdev->manufacturer_len = old_len;
7779 if (hdev->manufacturer_len > 0)
7780 memcpy(hdev->manufacturer_data, old_data,
7781 hdev->manufacturer_len);
7782 hci_dev_unlock(hdev);
7786 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
7787 void *data, u16 len)
7789 struct mgmt_cp_le_set_scan_params *cp = data;
7790 __u16 interval, window;
7793 BT_DBG("%s", hdev->name);
7795 if (!lmp_le_capable(hdev))
7796 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7797 MGMT_STATUS_NOT_SUPPORTED);
7799 interval = __le16_to_cpu(cp->interval);
7801 if (interval < 0x0004 || interval > 0x4000)
7802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7803 MGMT_STATUS_INVALID_PARAMS);
7805 window = __le16_to_cpu(cp->window);
7807 if (window < 0x0004 || window > 0x4000)
7808 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7809 MGMT_STATUS_INVALID_PARAMS);
7811 if (window > interval)
7812 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7813 MGMT_STATUS_INVALID_PARAMS);
7817 hdev->le_scan_type = cp->type;
7818 hdev->le_scan_interval = interval;
7819 hdev->le_scan_window = window;
7821 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
7824 /* If background scan is running, restart it so new parameters are
7827 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
7828 hdev->discovery.state == DISCOVERY_STOPPED) {
7829 struct hci_request req;
7831 hci_req_init(&req, hdev);
7833 hci_req_add_le_scan_disable(&req, false);
7834 hci_req_add_le_passive_scan(&req);
7836 hci_req_run(&req, NULL);
7839 hci_dev_unlock(hdev);
7844 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
7846 struct mgmt_ev_hardware_error ev;
7848 ev.error_code = err_code;
7849 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
7852 void mgmt_tx_timeout_error(struct hci_dev *hdev)
7854 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
7857 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
7858 u8 state_change_reason, u16 connection_handle)
7860 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
7862 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
7863 adv_instance, state_change_reason, connection_handle);
7865 mgmt_ev.adv_instance = adv_instance;
7866 mgmt_ev.state_change_reason = state_change_reason;
7867 mgmt_ev.connection_handle = connection_handle;
7869 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
7870 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
7873 #endif /* TIZEN_BT */
7875 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7877 if (key->initiator != 0x00 && key->initiator != 0x01)
7880 switch (key->addr.type) {
7881 case BDADDR_LE_PUBLIC:
7884 case BDADDR_LE_RANDOM:
7885 /* Two most significant bits shall be set */
7886 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7894 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7895 void *cp_data, u16 len)
7897 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7898 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7899 sizeof(struct mgmt_ltk_info));
7900 u16 key_count, expected_len;
7903 bt_dev_dbg(hdev, "sock %p", sk);
7905 if (!lmp_le_capable(hdev))
7906 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7907 MGMT_STATUS_NOT_SUPPORTED);
7909 key_count = __le16_to_cpu(cp->key_count);
7910 if (key_count > max_key_count) {
7911 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7913 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7914 MGMT_STATUS_INVALID_PARAMS);
7917 expected_len = struct_size(cp, keys, key_count);
7918 if (expected_len != len) {
7919 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7921 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7922 MGMT_STATUS_INVALID_PARAMS);
7925 bt_dev_dbg(hdev, "key_count %u", key_count);
7927 for (i = 0; i < key_count; i++) {
7928 struct mgmt_ltk_info *key = &cp->keys[i];
7930 if (!ltk_is_valid(key))
7931 return mgmt_cmd_status(sk, hdev->id,
7932 MGMT_OP_LOAD_LONG_TERM_KEYS,
7933 MGMT_STATUS_INVALID_PARAMS);
7938 hci_smp_ltks_clear(hdev);
7940 for (i = 0; i < key_count; i++) {
7941 struct mgmt_ltk_info *key = &cp->keys[i];
7942 u8 type, authenticated;
7944 if (hci_is_blocked_key(hdev,
7945 HCI_BLOCKED_KEY_TYPE_LTK,
7947 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7952 switch (key->type) {
7953 case MGMT_LTK_UNAUTHENTICATED:
7954 authenticated = 0x00;
7955 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7957 case MGMT_LTK_AUTHENTICATED:
7958 authenticated = 0x01;
7959 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7961 case MGMT_LTK_P256_UNAUTH:
7962 authenticated = 0x00;
7963 type = SMP_LTK_P256;
7965 case MGMT_LTK_P256_AUTH:
7966 authenticated = 0x01;
7967 type = SMP_LTK_P256;
7969 case MGMT_LTK_P256_DEBUG:
7970 authenticated = 0x00;
7971 type = SMP_LTK_P256_DEBUG;
7977 hci_add_ltk(hdev, &key->addr.bdaddr,
7978 le_addr_type(key->addr.type), type, authenticated,
7979 key->val, key->enc_size, key->ediv, key->rand);
7982 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7985 hci_dev_unlock(hdev);
7990 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
7992 struct hci_conn *conn = cmd->user_data;
7993 struct mgmt_rp_get_conn_info rp;
7996 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
7998 if (status == MGMT_STATUS_SUCCESS) {
7999 rp.rssi = conn->rssi;
8000 rp.tx_power = conn->tx_power;
8001 rp.max_tx_power = conn->max_tx_power;
8003 rp.rssi = HCI_RSSI_INVALID;
8004 rp.tx_power = HCI_TX_POWER_INVALID;
8005 rp.max_tx_power = HCI_TX_POWER_INVALID;
8008 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
8009 status, &rp, sizeof(rp));
8011 hci_conn_drop(conn);
8017 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
8020 struct hci_cp_read_rssi *cp;
8021 struct mgmt_pending_cmd *cmd;
8022 struct hci_conn *conn;
8026 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
8030 /* Commands sent in request are either Read RSSI or Read Transmit Power
8031 * Level so we check which one was last sent to retrieve connection
8032 * handle. Both commands have handle as first parameter so it's safe to
8033 * cast data on the same command struct.
8035 * First command sent is always Read RSSI and we fail only if it fails.
8036 * In other case we simply override error to indicate success as we
8037 * already remembered if TX power value is actually valid.
8039 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
8041 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
8042 status = MGMT_STATUS_SUCCESS;
8044 status = mgmt_status(hci_status);
8048 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
8052 handle = __le16_to_cpu(cp->handle);
8053 conn = hci_conn_hash_lookup_handle(hdev, handle);
8055 bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
8060 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
8064 cmd->cmd_complete(cmd, status);
8065 mgmt_pending_remove(cmd);
8068 hci_dev_unlock(hdev);
8071 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
8074 struct mgmt_cp_get_conn_info *cp = data;
8075 struct mgmt_rp_get_conn_info rp;
8076 struct hci_conn *conn;
8077 unsigned long conn_info_age;
8080 bt_dev_dbg(hdev, "sock %p", sk);
8082 memset(&rp, 0, sizeof(rp));
8083 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8084 rp.addr.type = cp->addr.type;
8086 if (!bdaddr_type_is_valid(cp->addr.type))
8087 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8088 MGMT_STATUS_INVALID_PARAMS,
8093 if (!hdev_is_powered(hdev)) {
8094 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8095 MGMT_STATUS_NOT_POWERED, &rp,
8100 if (cp->addr.type == BDADDR_BREDR)
8101 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8104 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8106 if (!conn || conn->state != BT_CONNECTED) {
8107 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8108 MGMT_STATUS_NOT_CONNECTED, &rp,
8113 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
8114 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8115 MGMT_STATUS_BUSY, &rp, sizeof(rp));
8119 /* To avoid client trying to guess when to poll again for information we
8120 * calculate conn info age as random value between min/max set in hdev.
8122 conn_info_age = hdev->conn_info_min_age +
8123 prandom_u32_max(hdev->conn_info_max_age -
8124 hdev->conn_info_min_age);
8126 /* Query controller to refresh cached values if they are too old or were
8129 if (time_after(jiffies, conn->conn_info_timestamp +
8130 msecs_to_jiffies(conn_info_age)) ||
8131 !conn->conn_info_timestamp) {
8132 struct hci_request req;
8133 struct hci_cp_read_tx_power req_txp_cp;
8134 struct hci_cp_read_rssi req_rssi_cp;
8135 struct mgmt_pending_cmd *cmd;
8137 hci_req_init(&req, hdev);
8138 req_rssi_cp.handle = cpu_to_le16(conn->handle);
8139 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
8142 /* For LE links TX power does not change thus we don't need to
8143 * query for it once value is known.
8145 if (!bdaddr_type_is_le(cp->addr.type) ||
8146 conn->tx_power == HCI_TX_POWER_INVALID) {
8147 req_txp_cp.handle = cpu_to_le16(conn->handle);
8148 req_txp_cp.type = 0x00;
8149 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8150 sizeof(req_txp_cp), &req_txp_cp);
8153 /* Max TX power needs to be read only once per connection */
8154 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
8155 req_txp_cp.handle = cpu_to_le16(conn->handle);
8156 req_txp_cp.type = 0x01;
8157 hci_req_add(&req, HCI_OP_READ_TX_POWER,
8158 sizeof(req_txp_cp), &req_txp_cp);
8161 err = hci_req_run(&req, conn_info_refresh_complete);
8165 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
8172 hci_conn_hold(conn);
8173 cmd->user_data = hci_conn_get(conn);
8174 cmd->cmd_complete = conn_info_cmd_complete;
8176 conn->conn_info_timestamp = jiffies;
8178 /* Cache is valid, just reply with values cached in hci_conn */
8179 rp.rssi = conn->rssi;
8180 rp.tx_power = conn->tx_power;
8181 rp.max_tx_power = conn->max_tx_power;
8183 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8184 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8188 hci_dev_unlock(hdev);
8192 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
8194 struct hci_conn *conn = cmd->user_data;
8195 struct mgmt_rp_get_clock_info rp;
8196 struct hci_dev *hdev;
8199 memset(&rp, 0, sizeof(rp));
8200 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
8205 hdev = hci_dev_get(cmd->index);
8207 rp.local_clock = cpu_to_le32(hdev->clock);
8212 rp.piconet_clock = cpu_to_le32(conn->clock);
8213 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
8217 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
8221 hci_conn_drop(conn);
8228 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8230 struct hci_cp_read_clock *hci_cp;
8231 struct mgmt_pending_cmd *cmd;
8232 struct hci_conn *conn;
8234 bt_dev_dbg(hdev, "status %u", status);
8238 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
8242 if (hci_cp->which) {
8243 u16 handle = __le16_to_cpu(hci_cp->handle);
8244 conn = hci_conn_hash_lookup_handle(hdev, handle);
8249 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
8253 cmd->cmd_complete(cmd, mgmt_status(status));
8254 mgmt_pending_remove(cmd);
8257 hci_dev_unlock(hdev);
8260 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
8263 struct mgmt_cp_get_clock_info *cp = data;
8264 struct mgmt_rp_get_clock_info rp;
8265 struct hci_cp_read_clock hci_cp;
8266 struct mgmt_pending_cmd *cmd;
8267 struct hci_request req;
8268 struct hci_conn *conn;
8271 bt_dev_dbg(hdev, "sock %p", sk);
8273 memset(&rp, 0, sizeof(rp));
8274 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8275 rp.addr.type = cp->addr.type;
8277 if (cp->addr.type != BDADDR_BREDR)
8278 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8279 MGMT_STATUS_INVALID_PARAMS,
8284 if (!hdev_is_powered(hdev)) {
8285 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8286 MGMT_STATUS_NOT_POWERED, &rp,
8291 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8292 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8294 if (!conn || conn->state != BT_CONNECTED) {
8295 err = mgmt_cmd_complete(sk, hdev->id,
8296 MGMT_OP_GET_CLOCK_INFO,
8297 MGMT_STATUS_NOT_CONNECTED,
8305 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
8311 cmd->cmd_complete = clock_info_cmd_complete;
8313 hci_req_init(&req, hdev);
8315 memset(&hci_cp, 0, sizeof(hci_cp));
8316 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8319 hci_conn_hold(conn);
8320 cmd->user_data = hci_conn_get(conn);
8322 hci_cp.handle = cpu_to_le16(conn->handle);
8323 hci_cp.which = 0x01; /* Piconet clock */
8324 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8327 err = hci_req_run(&req, get_clock_info_complete);
8329 mgmt_pending_remove(cmd);
8332 hci_dev_unlock(hdev);
8336 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
8338 struct hci_conn *conn;
8340 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
8344 if (conn->dst_type != type)
8347 if (conn->state != BT_CONNECTED)
8353 /* This function requires the caller holds hdev->lock */
8354 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
8355 u8 addr_type, u8 auto_connect)
8357 struct hci_conn_params *params;
8359 params = hci_conn_params_add(hdev, addr, addr_type);
8363 if (params->auto_connect == auto_connect)
8366 list_del_init(¶ms->action);
8368 switch (auto_connect) {
8369 case HCI_AUTO_CONN_DISABLED:
8370 case HCI_AUTO_CONN_LINK_LOSS:
8371 /* If auto connect is being disabled when we're trying to
8372 * connect to device, keep connecting.
8374 if (params->explicit_connect)
8375 list_add(¶ms->action, &hdev->pend_le_conns);
8377 case HCI_AUTO_CONN_REPORT:
8378 if (params->explicit_connect)
8379 list_add(¶ms->action, &hdev->pend_le_conns);
8381 list_add(¶ms->action, &hdev->pend_le_reports);
8383 case HCI_AUTO_CONN_DIRECT:
8384 case HCI_AUTO_CONN_ALWAYS:
8385 if (!is_connected(hdev, addr, addr_type))
8386 list_add(¶ms->action, &hdev->pend_le_conns);
8390 params->auto_connect = auto_connect;
8392 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
8393 addr, addr_type, auto_connect);
8398 static void device_added(struct sock *sk, struct hci_dev *hdev,
8399 bdaddr_t *bdaddr, u8 type, u8 action)
8401 struct mgmt_ev_device_added ev;
8403 bacpy(&ev.addr.bdaddr, bdaddr);
8404 ev.addr.type = type;
8407 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
8410 static int add_device(struct sock *sk, struct hci_dev *hdev,
8411 void *data, u16 len)
8413 struct mgmt_cp_add_device *cp = data;
8414 u8 auto_conn, addr_type;
8415 struct hci_conn_params *params;
8417 u32 current_flags = 0;
8419 bt_dev_dbg(hdev, "sock %p", sk);
8421 if (!bdaddr_type_is_valid(cp->addr.type) ||
8422 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
8423 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8424 MGMT_STATUS_INVALID_PARAMS,
8425 &cp->addr, sizeof(cp->addr));
8427 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
8428 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8429 MGMT_STATUS_INVALID_PARAMS,
8430 &cp->addr, sizeof(cp->addr));
8434 if (cp->addr.type == BDADDR_BREDR) {
8435 /* Only incoming connections action is supported for now */
8436 if (cp->action != 0x01) {
8437 err = mgmt_cmd_complete(sk, hdev->id,
8439 MGMT_STATUS_INVALID_PARAMS,
8440 &cp->addr, sizeof(cp->addr));
8444 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
8450 hci_req_update_scan(hdev);
8455 addr_type = le_addr_type(cp->addr.type);
8457 if (cp->action == 0x02)
8458 auto_conn = HCI_AUTO_CONN_ALWAYS;
8459 else if (cp->action == 0x01)
8460 auto_conn = HCI_AUTO_CONN_DIRECT;
8462 auto_conn = HCI_AUTO_CONN_REPORT;
8464 /* Kernel internally uses conn_params with resolvable private
8465 * address, but Add Device allows only identity addresses.
8466 * Make sure it is enforced before calling
8467 * hci_conn_params_lookup.
8469 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8470 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8471 MGMT_STATUS_INVALID_PARAMS,
8472 &cp->addr, sizeof(cp->addr));
8476 /* If the connection parameters don't exist for this device,
8477 * they will be created and configured with defaults.
8479 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
8481 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8482 MGMT_STATUS_FAILED, &cp->addr,
8486 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8489 current_flags = params->current_flags;
8492 hci_update_background_scan(hdev);
8495 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
8496 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
8497 SUPPORTED_DEVICE_FLAGS(), current_flags);
8499 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8500 MGMT_STATUS_SUCCESS, &cp->addr,
8504 hci_dev_unlock(hdev);
8508 static void device_removed(struct sock *sk, struct hci_dev *hdev,
8509 bdaddr_t *bdaddr, u8 type)
8511 struct mgmt_ev_device_removed ev;
8513 bacpy(&ev.addr.bdaddr, bdaddr);
8514 ev.addr.type = type;
8516 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
8519 static int remove_device(struct sock *sk, struct hci_dev *hdev,
8520 void *data, u16 len)
8522 struct mgmt_cp_remove_device *cp = data;
8525 bt_dev_dbg(hdev, "sock %p", sk);
8529 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8530 struct hci_conn_params *params;
8533 if (!bdaddr_type_is_valid(cp->addr.type)) {
8534 err = mgmt_cmd_complete(sk, hdev->id,
8535 MGMT_OP_REMOVE_DEVICE,
8536 MGMT_STATUS_INVALID_PARAMS,
8537 &cp->addr, sizeof(cp->addr));
8541 if (cp->addr.type == BDADDR_BREDR) {
8542 err = hci_bdaddr_list_del(&hdev->accept_list,
8546 err = mgmt_cmd_complete(sk, hdev->id,
8547 MGMT_OP_REMOVE_DEVICE,
8548 MGMT_STATUS_INVALID_PARAMS,
8554 hci_req_update_scan(hdev);
8556 device_removed(sk, hdev, &cp->addr.bdaddr,
8561 addr_type = le_addr_type(cp->addr.type);
8563 /* Kernel internally uses conn_params with resolvable private
8564 * address, but Remove Device allows only identity addresses.
8565 * Make sure it is enforced before calling
8566 * hci_conn_params_lookup.
8568 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8569 err = mgmt_cmd_complete(sk, hdev->id,
8570 MGMT_OP_REMOVE_DEVICE,
8571 MGMT_STATUS_INVALID_PARAMS,
8572 &cp->addr, sizeof(cp->addr));
8576 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8579 err = mgmt_cmd_complete(sk, hdev->id,
8580 MGMT_OP_REMOVE_DEVICE,
8581 MGMT_STATUS_INVALID_PARAMS,
8582 &cp->addr, sizeof(cp->addr));
8586 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
8587 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
8588 err = mgmt_cmd_complete(sk, hdev->id,
8589 MGMT_OP_REMOVE_DEVICE,
8590 MGMT_STATUS_INVALID_PARAMS,
8591 &cp->addr, sizeof(cp->addr));
8595 list_del(¶ms->action);
8596 list_del(¶ms->list);
8598 hci_update_background_scan(hdev);
8600 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
8602 struct hci_conn_params *p, *tmp;
8603 struct bdaddr_list *b, *btmp;
8605 if (cp->addr.type) {
8606 err = mgmt_cmd_complete(sk, hdev->id,
8607 MGMT_OP_REMOVE_DEVICE,
8608 MGMT_STATUS_INVALID_PARAMS,
8609 &cp->addr, sizeof(cp->addr));
8613 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
8614 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
8619 hci_req_update_scan(hdev);
8621 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
8622 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
8624 device_removed(sk, hdev, &p->addr, p->addr_type);
8625 if (p->explicit_connect) {
8626 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
8629 list_del(&p->action);
8634 bt_dev_dbg(hdev, "All LE connection parameters were removed");
8636 hci_update_background_scan(hdev);
8640 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
8641 MGMT_STATUS_SUCCESS, &cp->addr,
8644 hci_dev_unlock(hdev);
8648 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
8651 struct mgmt_cp_load_conn_param *cp = data;
8652 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
8653 sizeof(struct mgmt_conn_param));
8654 u16 param_count, expected_len;
8657 if (!lmp_le_capable(hdev))
8658 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8659 MGMT_STATUS_NOT_SUPPORTED);
8661 param_count = __le16_to_cpu(cp->param_count);
8662 if (param_count > max_param_count) {
8663 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
8665 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8666 MGMT_STATUS_INVALID_PARAMS);
8669 expected_len = struct_size(cp, params, param_count);
8670 if (expected_len != len) {
8671 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
8673 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8674 MGMT_STATUS_INVALID_PARAMS);
8677 bt_dev_dbg(hdev, "param_count %u", param_count);
8681 hci_conn_params_clear_disabled(hdev);
8683 for (i = 0; i < param_count; i++) {
8684 struct mgmt_conn_param *param = &cp->params[i];
8685 struct hci_conn_params *hci_param;
8686 u16 min, max, latency, timeout;
8689 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
8692 if (param->addr.type == BDADDR_LE_PUBLIC) {
8693 addr_type = ADDR_LE_DEV_PUBLIC;
8694 } else if (param->addr.type == BDADDR_LE_RANDOM) {
8695 addr_type = ADDR_LE_DEV_RANDOM;
8697 bt_dev_err(hdev, "ignoring invalid connection parameters");
8701 min = le16_to_cpu(param->min_interval);
8702 max = le16_to_cpu(param->max_interval);
8703 latency = le16_to_cpu(param->latency);
8704 timeout = le16_to_cpu(param->timeout);
8706 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
8707 min, max, latency, timeout);
8709 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8710 bt_dev_err(hdev, "ignoring invalid connection parameters");
8714 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
8717 bt_dev_err(hdev, "failed to add connection parameters");
8721 hci_param->conn_min_interval = min;
8722 hci_param->conn_max_interval = max;
8723 hci_param->conn_latency = latency;
8724 hci_param->supervision_timeout = timeout;
8727 hci_dev_unlock(hdev);
8729 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8733 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8734 void *data, u16 len)
8736 struct mgmt_cp_set_external_config *cp = data;
8740 bt_dev_dbg(hdev, "sock %p", sk);
8742 if (hdev_is_powered(hdev))
8743 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8744 MGMT_STATUS_REJECTED);
8746 if (cp->config != 0x00 && cp->config != 0x01)
8747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8748 MGMT_STATUS_INVALID_PARAMS);
8750 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
8751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8752 MGMT_STATUS_NOT_SUPPORTED);
8757 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8759 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8761 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8768 err = new_options(hdev, sk);
8770 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8771 mgmt_index_removed(hdev);
8773 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8774 hci_dev_set_flag(hdev, HCI_CONFIG);
8775 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8777 queue_work(hdev->req_workqueue, &hdev->power_on);
8779 set_bit(HCI_RAW, &hdev->flags);
8780 mgmt_index_added(hdev);
8785 hci_dev_unlock(hdev);
8789 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8790 void *data, u16 len)
8792 struct mgmt_cp_set_public_address *cp = data;
8796 bt_dev_dbg(hdev, "sock %p", sk);
8798 if (hdev_is_powered(hdev))
8799 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8800 MGMT_STATUS_REJECTED);
8802 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8803 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8804 MGMT_STATUS_INVALID_PARAMS);
8806 if (!hdev->set_bdaddr)
8807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8808 MGMT_STATUS_NOT_SUPPORTED);
8812 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8813 bacpy(&hdev->public_addr, &cp->bdaddr);
8815 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8822 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8823 err = new_options(hdev, sk);
8825 if (is_configured(hdev)) {
8826 mgmt_index_removed(hdev);
8828 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8830 hci_dev_set_flag(hdev, HCI_CONFIG);
8831 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8833 queue_work(hdev->req_workqueue, &hdev->power_on);
8837 hci_dev_unlock(hdev);
8842 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
8846 struct mgmt_ev_device_name_update *ev = (void *)buf;
8852 bacpy(&ev->addr.bdaddr, bdaddr);
8853 ev->addr.type = BDADDR_BREDR;
8855 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8858 ev->eir_len = cpu_to_le16(eir_len);
8860 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
8861 sizeof(*ev) + eir_len, NULL);
8864 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8865 u8 link_type, u8 addr_type, u8 status)
8867 struct mgmt_ev_conn_update_failed ev;
8869 bacpy(&ev.addr.bdaddr, bdaddr);
8870 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8873 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
8874 &ev, sizeof(ev), NULL);
8877 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
8878 u8 link_type, u8 addr_type, u16 conn_interval,
8879 u16 conn_latency, u16 supervision_timeout)
8881 struct mgmt_ev_conn_updated ev;
8883 bacpy(&ev.addr.bdaddr, bdaddr);
8884 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8885 ev.conn_interval = cpu_to_le16(conn_interval);
8886 ev.conn_latency = cpu_to_le16(conn_latency);
8887 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
8889 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
8890 &ev, sizeof(ev), NULL);
8893 /* le device found event - Pass adv type */
8894 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8895 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
8896 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
8899 struct mgmt_ev_le_device_found *ev = (void *)buf;
8902 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
8905 /* Make sure that the buffer is big enough. The 5 extra bytes
8906 * are for the potential CoD field.
8908 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8911 memset(buf, 0, sizeof(buf));
8913 bacpy(&ev->addr.bdaddr, bdaddr);
8914 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8916 ev->flags = cpu_to_le32(flags);
8917 ev->adv_type = adv_type;
8920 memcpy(ev->eir, eir, eir_len);
8922 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
8923 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8926 if (scan_rsp_len > 0)
8927 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8929 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8930 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8932 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8936 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
8937 u16 opcode, struct sk_buff *skb)
8939 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8940 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8941 u8 *h192, *r192, *h256, *r256;
8942 struct mgmt_pending_cmd *cmd;
8946 bt_dev_dbg(hdev, "status %u", status);
8948 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
8952 mgmt_cp = cmd->param;
8955 status = mgmt_status(status);
8962 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
8963 struct hci_rp_read_local_oob_data *rp;
8965 if (skb->len != sizeof(*rp)) {
8966 status = MGMT_STATUS_FAILED;
8969 status = MGMT_STATUS_SUCCESS;
8970 rp = (void *)skb->data;
8972 eir_len = 5 + 18 + 18;
8979 struct hci_rp_read_local_oob_ext_data *rp;
8981 if (skb->len != sizeof(*rp)) {
8982 status = MGMT_STATUS_FAILED;
8985 status = MGMT_STATUS_SUCCESS;
8986 rp = (void *)skb->data;
8988 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8989 eir_len = 5 + 18 + 18;
8993 eir_len = 5 + 18 + 18 + 18 + 18;
9003 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
9010 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
9011 hdev->dev_class, 3);
9014 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9015 EIR_SSP_HASH_C192, h192, 16);
9016 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9017 EIR_SSP_RAND_R192, r192, 16);
9021 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9022 EIR_SSP_HASH_C256, h256, 16);
9023 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9024 EIR_SSP_RAND_R256, r256, 16);
9028 mgmt_rp->type = mgmt_cp->type;
9029 mgmt_rp->eir_len = cpu_to_le16(eir_len);
9031 err = mgmt_cmd_complete(cmd->sk, hdev->id,
9032 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
9033 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
9034 if (err < 0 || status)
9037 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
9039 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9040 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
9041 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
9044 mgmt_pending_remove(cmd);
9047 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
9048 struct mgmt_cp_read_local_oob_ext_data *cp)
9050 struct mgmt_pending_cmd *cmd;
9051 struct hci_request req;
9054 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
9059 hci_req_init(&req, hdev);
9061 if (bredr_sc_enabled(hdev))
9062 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
9064 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
9066 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
9068 mgmt_pending_remove(cmd);
9075 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
9076 void *data, u16 data_len)
9078 struct mgmt_cp_read_local_oob_ext_data *cp = data;
9079 struct mgmt_rp_read_local_oob_ext_data *rp;
9082 u8 status, flags, role, addr[7], hash[16], rand[16];
9085 bt_dev_dbg(hdev, "sock %p", sk);
9087 if (hdev_is_powered(hdev)) {
9089 case BIT(BDADDR_BREDR):
9090 status = mgmt_bredr_support(hdev);
9096 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9097 status = mgmt_le_support(hdev);
9101 eir_len = 9 + 3 + 18 + 18 + 3;
9104 status = MGMT_STATUS_INVALID_PARAMS;
9109 status = MGMT_STATUS_NOT_POWERED;
9113 rp_len = sizeof(*rp) + eir_len;
9114 rp = kmalloc(rp_len, GFP_ATOMIC);
9125 case BIT(BDADDR_BREDR):
9126 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9127 err = read_local_ssp_oob_req(hdev, sk, cp);
9128 hci_dev_unlock(hdev);
9132 status = MGMT_STATUS_FAILED;
9135 eir_len = eir_append_data(rp->eir, eir_len,
9137 hdev->dev_class, 3);
9140 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9141 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
9142 smp_generate_oob(hdev, hash, rand) < 0) {
9143 hci_dev_unlock(hdev);
9144 status = MGMT_STATUS_FAILED;
9148 /* This should return the active RPA, but since the RPA
9149 * is only programmed on demand, it is really hard to fill
9150 * this in at the moment. For now disallow retrieving
9151 * local out-of-band data when privacy is in use.
9153 * Returning the identity address will not help here since
9154 * pairing happens before the identity resolving key is
9155 * known and thus the connection establishment happens
9156 * based on the RPA and not the identity address.
9158 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
9159 hci_dev_unlock(hdev);
9160 status = MGMT_STATUS_REJECTED;
9164 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
9165 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
9166 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
9167 bacmp(&hdev->static_addr, BDADDR_ANY))) {
9168 memcpy(addr, &hdev->static_addr, 6);
9171 memcpy(addr, &hdev->bdaddr, 6);
9175 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
9176 addr, sizeof(addr));
9178 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
9183 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
9184 &role, sizeof(role));
9186 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
9187 eir_len = eir_append_data(rp->eir, eir_len,
9189 hash, sizeof(hash));
9191 eir_len = eir_append_data(rp->eir, eir_len,
9193 rand, sizeof(rand));
9196 flags = mgmt_get_adv_discov_flags(hdev);
9198 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
9199 flags |= LE_AD_NO_BREDR;
9201 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
9202 &flags, sizeof(flags));
9206 hci_dev_unlock(hdev);
9208 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
9210 status = MGMT_STATUS_SUCCESS;
9213 rp->type = cp->type;
9214 rp->eir_len = cpu_to_le16(eir_len);
9216 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
9217 status, rp, sizeof(*rp) + eir_len);
9218 if (err < 0 || status)
9221 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9222 rp, sizeof(*rp) + eir_len,
9223 HCI_MGMT_OOB_DATA_EVENTS, sk);
9231 static u32 get_supported_adv_flags(struct hci_dev *hdev)
9235 flags |= MGMT_ADV_FLAG_CONNECTABLE;
9236 flags |= MGMT_ADV_FLAG_DISCOV;
9237 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
9238 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
9239 flags |= MGMT_ADV_FLAG_APPEARANCE;
9240 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
9241 flags |= MGMT_ADV_PARAM_DURATION;
9242 flags |= MGMT_ADV_PARAM_TIMEOUT;
9243 flags |= MGMT_ADV_PARAM_INTERVALS;
9244 flags |= MGMT_ADV_PARAM_TX_POWER;
9245 flags |= MGMT_ADV_PARAM_SCAN_RSP;
9247 /* In extended adv TX_POWER returned from Set Adv Param
9248 * will be always valid.
9250 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
9251 ext_adv_capable(hdev))
9252 flags |= MGMT_ADV_FLAG_TX_POWER;
9254 if (ext_adv_capable(hdev)) {
9255 flags |= MGMT_ADV_FLAG_SEC_1M;
9256 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
9257 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
9259 if (hdev->le_features[1] & HCI_LE_PHY_2M)
9260 flags |= MGMT_ADV_FLAG_SEC_2M;
9262 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
9263 flags |= MGMT_ADV_FLAG_SEC_CODED;
9269 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
9270 void *data, u16 data_len)
9272 struct mgmt_rp_read_adv_features *rp;
9275 struct adv_info *adv_instance;
9276 u32 supported_flags;
9279 bt_dev_dbg(hdev, "sock %p", sk);
9281 if (!lmp_le_capable(hdev))
9282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9283 MGMT_STATUS_REJECTED);
9285 /* Enabling the experimental LL Privay support disables support for
9288 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9289 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9290 MGMT_STATUS_NOT_SUPPORTED);
9294 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
9295 rp = kmalloc(rp_len, GFP_ATOMIC);
9297 hci_dev_unlock(hdev);
9301 supported_flags = get_supported_adv_flags(hdev);
9303 rp->supported_flags = cpu_to_le32(supported_flags);
9304 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
9305 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
9306 rp->max_instances = hdev->le_num_of_adv_sets;
9307 rp->num_instances = hdev->adv_instance_cnt;
9309 instance = rp->instance;
9310 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
9311 *instance = adv_instance->instance;
9315 hci_dev_unlock(hdev);
9317 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9318 MGMT_STATUS_SUCCESS, rp, rp_len);
9325 static u8 calculate_name_len(struct hci_dev *hdev)
9327 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
9329 return append_local_name(hdev, buf, 0);
9332 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
9335 u8 max_len = HCI_MAX_AD_LENGTH;
9338 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
9339 MGMT_ADV_FLAG_LIMITED_DISCOV |
9340 MGMT_ADV_FLAG_MANAGED_FLAGS))
9343 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
9346 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
9347 max_len -= calculate_name_len(hdev);
9349 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
9356 static bool flags_managed(u32 adv_flags)
9358 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
9359 MGMT_ADV_FLAG_LIMITED_DISCOV |
9360 MGMT_ADV_FLAG_MANAGED_FLAGS);
9363 static bool tx_power_managed(u32 adv_flags)
9365 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
9368 static bool name_managed(u32 adv_flags)
9370 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
9373 static bool appearance_managed(u32 adv_flags)
9375 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
9378 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
9379 u8 len, bool is_adv_data)
9384 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
9389 /* Make sure that the data is correctly formatted. */
9390 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
9396 if (data[i + 1] == EIR_FLAGS &&
9397 (!is_adv_data || flags_managed(adv_flags)))
9400 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
9403 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
9406 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
9409 if (data[i + 1] == EIR_APPEARANCE &&
9410 appearance_managed(adv_flags))
9413 /* If the current field length would exceed the total data
9414 * length, then it's invalid.
9416 if (i + cur_len >= len)
9423 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
9425 u32 supported_flags, phy_flags;
9427 /* The current implementation only supports a subset of the specified
9428 * flags. Also need to check mutual exclusiveness of sec flags.
9430 supported_flags = get_supported_adv_flags(hdev);
9431 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
9432 if (adv_flags & ~supported_flags ||
9433 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
9439 static bool adv_busy(struct hci_dev *hdev)
9441 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
9442 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
9443 pending_find(MGMT_OP_SET_LE, hdev) ||
9444 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
9445 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
9448 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
9451 struct mgmt_pending_cmd *cmd;
9452 struct mgmt_cp_add_advertising *cp;
9453 struct mgmt_rp_add_advertising rp;
9454 struct adv_info *adv_instance, *n;
9457 bt_dev_dbg(hdev, "status %u", status);
9461 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
9463 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
9465 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
9466 if (!adv_instance->pending)
9470 adv_instance->pending = false;
9474 instance = adv_instance->instance;
9476 if (hdev->cur_adv_instance == instance)
9477 cancel_adv_timeout(hdev);
9479 hci_remove_adv_instance(hdev, instance);
9480 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
9487 rp.instance = cp->instance;
9490 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9491 mgmt_status(status));
9493 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9494 mgmt_status(status), &rp, sizeof(rp));
9496 mgmt_pending_remove(cmd);
9499 hci_dev_unlock(hdev);
9502 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
9503 void *data, u16 data_len)
9505 struct mgmt_cp_add_advertising *cp = data;
9506 struct mgmt_rp_add_advertising rp;
9509 u16 timeout, duration;
9510 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
9511 u8 schedule_instance = 0;
9512 struct adv_info *next_instance;
9514 struct mgmt_pending_cmd *cmd;
9515 struct hci_request req;
9517 bt_dev_dbg(hdev, "sock %p", sk);
9519 status = mgmt_le_support(hdev);
9521 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9524 /* Enabling the experimental LL Privay support disables support for
9527 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9528 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9529 MGMT_STATUS_NOT_SUPPORTED);
9531 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9532 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9533 MGMT_STATUS_INVALID_PARAMS);
9535 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
9536 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9537 MGMT_STATUS_INVALID_PARAMS);
9539 flags = __le32_to_cpu(cp->flags);
9540 timeout = __le16_to_cpu(cp->timeout);
9541 duration = __le16_to_cpu(cp->duration);
9543 if (!requested_adv_flags_are_valid(hdev, flags))
9544 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9545 MGMT_STATUS_INVALID_PARAMS);
9549 if (timeout && !hdev_is_powered(hdev)) {
9550 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9551 MGMT_STATUS_REJECTED);
9555 if (adv_busy(hdev)) {
9556 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9561 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
9562 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
9563 cp->scan_rsp_len, false)) {
9564 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9565 MGMT_STATUS_INVALID_PARAMS);
9569 err = hci_add_adv_instance(hdev, cp->instance, flags,
9570 cp->adv_data_len, cp->data,
9572 cp->data + cp->adv_data_len,
9574 HCI_ADV_TX_POWER_NO_PREFERENCE,
9575 hdev->le_adv_min_interval,
9576 hdev->le_adv_max_interval);
9578 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9579 MGMT_STATUS_FAILED);
9583 /* Only trigger an advertising added event if a new instance was
9586 if (hdev->adv_instance_cnt > prev_instance_cnt)
9587 mgmt_advertising_added(sk, hdev, cp->instance);
9589 if (hdev->cur_adv_instance == cp->instance) {
9590 /* If the currently advertised instance is being changed then
9591 * cancel the current advertising and schedule the next
9592 * instance. If there is only one instance then the overridden
9593 * advertising data will be visible right away.
9595 cancel_adv_timeout(hdev);
9597 next_instance = hci_get_next_instance(hdev, cp->instance);
9599 schedule_instance = next_instance->instance;
9600 } else if (!hdev->adv_instance_timeout) {
9601 /* Immediately advertise the new instance if no other
9602 * instance is currently being advertised.
9604 schedule_instance = cp->instance;
9607 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
9608 * there is no instance to be advertised then we have no HCI
9609 * communication to make. Simply return.
9611 if (!hdev_is_powered(hdev) ||
9612 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
9613 !schedule_instance) {
9614 rp.instance = cp->instance;
9615 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9616 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9620 /* We're good to go, update advertising data, parameters, and start
9623 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
9630 hci_req_init(&req, hdev);
9632 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
9635 err = hci_req_run(&req, add_advertising_complete);
9638 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9639 MGMT_STATUS_FAILED);
9640 mgmt_pending_remove(cmd);
9644 hci_dev_unlock(hdev);
9649 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
9652 struct mgmt_pending_cmd *cmd;
9653 struct mgmt_cp_add_ext_adv_params *cp;
9654 struct mgmt_rp_add_ext_adv_params rp;
9655 struct adv_info *adv_instance;
9658 BT_DBG("%s", hdev->name);
9662 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
9667 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9671 rp.instance = cp->instance;
9672 rp.tx_power = adv_instance->tx_power;
9674 /* While we're at it, inform userspace of the available space for this
9675 * advertisement, given the flags that will be used.
9677 flags = __le32_to_cpu(cp->flags);
9678 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9679 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9682 /* If this advertisement was previously advertising and we
9683 * failed to update it, we signal that it has been removed and
9684 * delete its structure
9686 if (!adv_instance->pending)
9687 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
9689 hci_remove_adv_instance(hdev, cp->instance);
9691 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9692 mgmt_status(status));
9695 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9696 mgmt_status(status), &rp, sizeof(rp));
9701 mgmt_pending_remove(cmd);
9703 hci_dev_unlock(hdev);
9706 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
9707 void *data, u16 data_len)
9709 struct mgmt_cp_add_ext_adv_params *cp = data;
9710 struct mgmt_rp_add_ext_adv_params rp;
9711 struct mgmt_pending_cmd *cmd = NULL;
9712 struct adv_info *adv_instance;
9713 struct hci_request req;
9714 u32 flags, min_interval, max_interval;
9715 u16 timeout, duration;
9720 BT_DBG("%s", hdev->name);
9722 status = mgmt_le_support(hdev);
9724 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9727 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9728 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9729 MGMT_STATUS_INVALID_PARAMS);
9731 /* The purpose of breaking add_advertising into two separate MGMT calls
9732 * for params and data is to allow more parameters to be added to this
9733 * structure in the future. For this reason, we verify that we have the
9734 * bare minimum structure we know of when the interface was defined. Any
9735 * extra parameters we don't know about will be ignored in this request.
9737 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
9738 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9739 MGMT_STATUS_INVALID_PARAMS);
9741 flags = __le32_to_cpu(cp->flags);
9743 if (!requested_adv_flags_are_valid(hdev, flags))
9744 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9745 MGMT_STATUS_INVALID_PARAMS);
9749 /* In new interface, we require that we are powered to register */
9750 if (!hdev_is_powered(hdev)) {
9751 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9752 MGMT_STATUS_REJECTED);
9756 if (adv_busy(hdev)) {
9757 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9762 /* Parse defined parameters from request, use defaults otherwise */
9763 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
9764 __le16_to_cpu(cp->timeout) : 0;
9766 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
9767 __le16_to_cpu(cp->duration) :
9768 hdev->def_multi_adv_rotation_duration;
9770 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9771 __le32_to_cpu(cp->min_interval) :
9772 hdev->le_adv_min_interval;
9774 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9775 __le32_to_cpu(cp->max_interval) :
9776 hdev->le_adv_max_interval;
9778 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
9780 HCI_ADV_TX_POWER_NO_PREFERENCE;
9782 /* Create advertising instance with no advertising or response data */
9783 err = hci_add_adv_instance(hdev, cp->instance, flags,
9784 0, NULL, 0, NULL, timeout, duration,
9785 tx_power, min_interval, max_interval);
9788 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9789 MGMT_STATUS_FAILED);
9793 /* Submit request for advertising params if ext adv available */
9794 if (ext_adv_capable(hdev)) {
9795 hci_req_init(&req, hdev);
9796 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9798 /* Updating parameters of an active instance will return a
9799 * Command Disallowed error, so we must first disable the
9800 * instance if it is active.
9802 if (!adv_instance->pending)
9803 __hci_req_disable_ext_adv_instance(&req, cp->instance);
9805 __hci_req_setup_ext_adv_instance(&req, cp->instance);
9807 err = hci_req_run(&req, add_ext_adv_params_complete);
9810 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
9811 hdev, data, data_len);
9814 hci_remove_adv_instance(hdev, cp->instance);
9819 rp.instance = cp->instance;
9820 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9821 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9822 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9823 err = mgmt_cmd_complete(sk, hdev->id,
9824 MGMT_OP_ADD_EXT_ADV_PARAMS,
9825 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9829 hci_dev_unlock(hdev);
9834 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9837 struct mgmt_cp_add_ext_adv_data *cp = data;
9838 struct mgmt_rp_add_ext_adv_data rp;
9839 u8 schedule_instance = 0;
9840 struct adv_info *next_instance;
9841 struct adv_info *adv_instance;
9843 struct mgmt_pending_cmd *cmd;
9844 struct hci_request req;
9846 BT_DBG("%s", hdev->name);
9850 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9852 if (!adv_instance) {
9853 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9854 MGMT_STATUS_INVALID_PARAMS);
9858 /* In new interface, we require that we are powered to register */
9859 if (!hdev_is_powered(hdev)) {
9860 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9861 MGMT_STATUS_REJECTED);
9862 goto clear_new_instance;
9865 if (adv_busy(hdev)) {
9866 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9868 goto clear_new_instance;
9871 /* Validate new data */
9872 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9873 cp->adv_data_len, true) ||
9874 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9875 cp->adv_data_len, cp->scan_rsp_len, false)) {
9876 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9877 MGMT_STATUS_INVALID_PARAMS);
9878 goto clear_new_instance;
9881 /* Set the data in the advertising instance */
9882 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9883 cp->data, cp->scan_rsp_len,
9884 cp->data + cp->adv_data_len);
9886 /* We're good to go, update advertising data, parameters, and start
9890 hci_req_init(&req, hdev);
9892 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
9894 if (ext_adv_capable(hdev)) {
9895 __hci_req_update_adv_data(&req, cp->instance);
9896 __hci_req_update_scan_rsp_data(&req, cp->instance);
9897 __hci_req_enable_ext_advertising(&req, cp->instance);
9900 /* If using software rotation, determine next instance to use */
9902 if (hdev->cur_adv_instance == cp->instance) {
9903 /* If the currently advertised instance is being changed
9904 * then cancel the current advertising and schedule the
9905 * next instance. If there is only one instance then the
9906 * overridden advertising data will be visible right
9909 cancel_adv_timeout(hdev);
9911 next_instance = hci_get_next_instance(hdev,
9914 schedule_instance = next_instance->instance;
9915 } else if (!hdev->adv_instance_timeout) {
9916 /* Immediately advertise the new instance if no other
9917 * instance is currently being advertised.
9919 schedule_instance = cp->instance;
9922 /* If the HCI_ADVERTISING flag is set or there is no instance to
9923 * be advertised then we have no HCI communication to make.
9926 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
9927 !schedule_instance) {
9928 if (adv_instance->pending) {
9929 mgmt_advertising_added(sk, hdev, cp->instance);
9930 adv_instance->pending = false;
9932 rp.instance = cp->instance;
9933 err = mgmt_cmd_complete(sk, hdev->id,
9934 MGMT_OP_ADD_EXT_ADV_DATA,
9935 MGMT_STATUS_SUCCESS, &rp,
9940 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
9944 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9948 goto clear_new_instance;
9952 err = hci_req_run(&req, add_advertising_complete);
9955 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9956 MGMT_STATUS_FAILED);
9957 mgmt_pending_remove(cmd);
9958 goto clear_new_instance;
9961 /* We were successful in updating data, so trigger advertising_added
9962 * event if this is an instance that wasn't previously advertising. If
9963 * a failure occurs in the requests we initiated, we will remove the
9964 * instance again in add_advertising_complete
9966 if (adv_instance->pending)
9967 mgmt_advertising_added(sk, hdev, cp->instance);
9972 hci_remove_adv_instance(hdev, cp->instance);
9975 hci_dev_unlock(hdev);
9980 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
9983 struct mgmt_pending_cmd *cmd;
9984 struct mgmt_cp_remove_advertising *cp;
9985 struct mgmt_rp_remove_advertising rp;
9987 bt_dev_dbg(hdev, "status %u", status);
9991 /* A failure status here only means that we failed to disable
9992 * advertising. Otherwise, the advertising instance has been removed,
9993 * so report success.
9995 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
10000 rp.instance = cp->instance;
10002 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
10004 mgmt_pending_remove(cmd);
10007 hci_dev_unlock(hdev);
10010 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
10011 void *data, u16 data_len)
10013 struct mgmt_cp_remove_advertising *cp = data;
10014 struct mgmt_rp_remove_advertising rp;
10015 struct mgmt_pending_cmd *cmd;
10016 struct hci_request req;
10019 bt_dev_dbg(hdev, "sock %p", sk);
10021 /* Enabling the experimental LL Privay support disables support for
10024 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
10025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
10026 MGMT_STATUS_NOT_SUPPORTED);
10028 hci_dev_lock(hdev);
10030 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
10031 err = mgmt_cmd_status(sk, hdev->id,
10032 MGMT_OP_REMOVE_ADVERTISING,
10033 MGMT_STATUS_INVALID_PARAMS);
10037 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
10038 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
10039 pending_find(MGMT_OP_SET_LE, hdev)) {
10040 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10045 if (list_empty(&hdev->adv_instances)) {
10046 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10047 MGMT_STATUS_INVALID_PARAMS);
10051 hci_req_init(&req, hdev);
10053 /* If we use extended advertising, instance is disabled and removed */
10054 if (ext_adv_capable(hdev)) {
10055 __hci_req_disable_ext_adv_instance(&req, cp->instance);
10056 __hci_req_remove_ext_adv_instance(&req, cp->instance);
10059 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
10061 if (list_empty(&hdev->adv_instances))
10062 __hci_req_disable_advertising(&req);
10064 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
10065 * flag is set or the device isn't powered then we have no HCI
10066 * communication to make. Simply return.
10068 if (skb_queue_empty(&req.cmd_q) ||
10069 !hdev_is_powered(hdev) ||
10070 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
10071 hci_req_purge(&req);
10072 rp.instance = cp->instance;
10073 err = mgmt_cmd_complete(sk, hdev->id,
10074 MGMT_OP_REMOVE_ADVERTISING,
10075 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10079 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
10086 err = hci_req_run(&req, remove_advertising_complete);
10088 mgmt_pending_remove(cmd);
10091 hci_dev_unlock(hdev);
10096 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
10097 void *data, u16 data_len)
10099 struct mgmt_cp_get_adv_size_info *cp = data;
10100 struct mgmt_rp_get_adv_size_info rp;
10101 u32 flags, supported_flags;
10104 bt_dev_dbg(hdev, "sock %p", sk);
10106 if (!lmp_le_capable(hdev))
10107 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10108 MGMT_STATUS_REJECTED);
10110 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10111 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10112 MGMT_STATUS_INVALID_PARAMS);
10114 flags = __le32_to_cpu(cp->flags);
10116 /* The current implementation only supports a subset of the specified
10119 supported_flags = get_supported_adv_flags(hdev);
10120 if (flags & ~supported_flags)
10121 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10122 MGMT_STATUS_INVALID_PARAMS);
10124 rp.instance = cp->instance;
10125 rp.flags = cp->flags;
10126 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10127 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10129 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10130 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10135 static const struct hci_mgmt_handler mgmt_handlers[] = {
10136 { NULL }, /* 0x0000 (no command) */
10137 { read_version, MGMT_READ_VERSION_SIZE,
10139 HCI_MGMT_UNTRUSTED },
10140 { read_commands, MGMT_READ_COMMANDS_SIZE,
10142 HCI_MGMT_UNTRUSTED },
10143 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
10145 HCI_MGMT_UNTRUSTED },
10146 { read_controller_info, MGMT_READ_INFO_SIZE,
10147 HCI_MGMT_UNTRUSTED },
10148 { set_powered, MGMT_SETTING_SIZE },
10149 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
10150 { set_connectable, MGMT_SETTING_SIZE },
10151 { set_fast_connectable, MGMT_SETTING_SIZE },
10152 { set_bondable, MGMT_SETTING_SIZE },
10153 { set_link_security, MGMT_SETTING_SIZE },
10154 { set_ssp, MGMT_SETTING_SIZE },
10155 { set_hs, MGMT_SETTING_SIZE },
10156 { set_le, MGMT_SETTING_SIZE },
10157 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
10158 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
10159 { add_uuid, MGMT_ADD_UUID_SIZE },
10160 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
10161 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
10162 HCI_MGMT_VAR_LEN },
10163 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
10164 HCI_MGMT_VAR_LEN },
10165 { disconnect, MGMT_DISCONNECT_SIZE },
10166 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
10167 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
10168 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
10169 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
10170 { pair_device, MGMT_PAIR_DEVICE_SIZE },
10171 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
10172 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
10173 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
10174 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
10175 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
10176 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
10177 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
10178 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
10179 HCI_MGMT_VAR_LEN },
10180 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
10181 { start_discovery, MGMT_START_DISCOVERY_SIZE },
10182 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
10183 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
10184 { block_device, MGMT_BLOCK_DEVICE_SIZE },
10185 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
10186 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
10187 { set_advertising, MGMT_SETTING_SIZE },
10188 { set_bredr, MGMT_SETTING_SIZE },
10189 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
10190 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
10191 { set_secure_conn, MGMT_SETTING_SIZE },
10192 { set_debug_keys, MGMT_SETTING_SIZE },
10193 { set_privacy, MGMT_SET_PRIVACY_SIZE },
10194 { load_irks, MGMT_LOAD_IRKS_SIZE,
10195 HCI_MGMT_VAR_LEN },
10196 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
10197 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
10198 { add_device, MGMT_ADD_DEVICE_SIZE },
10199 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
10200 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
10201 HCI_MGMT_VAR_LEN },
10202 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
10204 HCI_MGMT_UNTRUSTED },
10205 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
10206 HCI_MGMT_UNCONFIGURED |
10207 HCI_MGMT_UNTRUSTED },
10208 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
10209 HCI_MGMT_UNCONFIGURED },
10210 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
10211 HCI_MGMT_UNCONFIGURED },
10212 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
10213 HCI_MGMT_VAR_LEN },
10214 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
10215 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
10217 HCI_MGMT_UNTRUSTED },
10218 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
10219 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
10220 HCI_MGMT_VAR_LEN },
10221 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
10222 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
10223 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
10224 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
10225 HCI_MGMT_UNTRUSTED },
10226 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
10227 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
10228 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
10229 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
10230 HCI_MGMT_VAR_LEN },
10231 { set_wideband_speech, MGMT_SETTING_SIZE },
10232 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
10233 HCI_MGMT_UNTRUSTED },
10234 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
10235 HCI_MGMT_UNTRUSTED |
10236 HCI_MGMT_HDEV_OPTIONAL },
10237 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
10239 HCI_MGMT_HDEV_OPTIONAL },
10240 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
10241 HCI_MGMT_UNTRUSTED },
10242 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
10243 HCI_MGMT_VAR_LEN },
10244 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
10245 HCI_MGMT_UNTRUSTED },
10246 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
10247 HCI_MGMT_VAR_LEN },
10248 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
10249 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
10250 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
10251 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
10252 HCI_MGMT_VAR_LEN },
10253 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
10254 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
10255 HCI_MGMT_VAR_LEN },
10256 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
10257 HCI_MGMT_VAR_LEN },
10258 { add_adv_patterns_monitor_rssi,
10259 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
10260 HCI_MGMT_VAR_LEN },
10264 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
10265 { NULL }, /* 0x0000 (no command) */
10266 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
10267 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
10268 HCI_MGMT_VAR_LEN },
10269 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
10270 HCI_MGMT_VAR_LEN },
10271 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
10272 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
10273 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
10274 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
10275 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
10276 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
10277 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
10278 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
10279 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
10280 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
10281 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
10282 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
10286 void mgmt_index_added(struct hci_dev *hdev)
10288 struct mgmt_ev_ext_index ev;
10290 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10293 switch (hdev->dev_type) {
10295 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10296 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
10297 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10300 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
10301 HCI_MGMT_INDEX_EVENTS);
10312 ev.bus = hdev->bus;
10314 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
10315 HCI_MGMT_EXT_INDEX_EVENTS);
10318 void mgmt_index_removed(struct hci_dev *hdev)
10320 struct mgmt_ev_ext_index ev;
10321 u8 status = MGMT_STATUS_INVALID_INDEX;
10323 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10326 switch (hdev->dev_type) {
10328 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10330 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10331 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
10332 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10335 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
10336 HCI_MGMT_INDEX_EVENTS);
10347 ev.bus = hdev->bus;
10349 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
10350 HCI_MGMT_EXT_INDEX_EVENTS);
10353 /* This function requires the caller holds hdev->lock */
10354 static void restart_le_actions(struct hci_dev *hdev)
10356 struct hci_conn_params *p;
10358 list_for_each_entry(p, &hdev->le_conn_params, list) {
10359 /* Needed for AUTO_OFF case where might not "really"
10360 * have been powered off.
10362 list_del_init(&p->action);
10364 switch (p->auto_connect) {
10365 case HCI_AUTO_CONN_DIRECT:
10366 case HCI_AUTO_CONN_ALWAYS:
10367 list_add(&p->action, &hdev->pend_le_conns);
10369 case HCI_AUTO_CONN_REPORT:
10370 list_add(&p->action, &hdev->pend_le_reports);
10378 void mgmt_power_on(struct hci_dev *hdev, int err)
10380 struct cmd_lookup match = { NULL, hdev };
10382 bt_dev_dbg(hdev, "err %d", err);
10384 hci_dev_lock(hdev);
10387 restart_le_actions(hdev);
10388 hci_update_background_scan(hdev);
10391 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10393 new_settings(hdev, match.sk);
10396 sock_put(match.sk);
10398 hci_dev_unlock(hdev);
10401 void __mgmt_power_off(struct hci_dev *hdev)
10403 struct cmd_lookup match = { NULL, hdev };
10404 u8 status, zero_cod[] = { 0, 0, 0 };
10406 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10408 /* If the power off is because of hdev unregistration let
10409 * use the appropriate INVALID_INDEX status. Otherwise use
10410 * NOT_POWERED. We cover both scenarios here since later in
10411 * mgmt_index_removed() any hci_conn callbacks will have already
10412 * been triggered, potentially causing misleading DISCONNECTED
10413 * status responses.
10415 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
10416 status = MGMT_STATUS_INVALID_INDEX;
10418 status = MGMT_STATUS_NOT_POWERED;
10420 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10422 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
10423 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
10424 zero_cod, sizeof(zero_cod),
10425 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10426 ext_info_changed(hdev, NULL);
10429 new_settings(hdev, match.sk);
10432 sock_put(match.sk);
10435 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
10437 struct mgmt_pending_cmd *cmd;
10440 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10444 if (err == -ERFKILL)
10445 status = MGMT_STATUS_RFKILLED;
10447 status = MGMT_STATUS_FAILED;
10449 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
10451 mgmt_pending_remove(cmd);
10454 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
10457 struct mgmt_ev_new_link_key ev;
10459 memset(&ev, 0, sizeof(ev));
10461 ev.store_hint = persistent;
10462 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10463 ev.key.addr.type = BDADDR_BREDR;
10464 ev.key.type = key->type;
10465 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
10466 ev.key.pin_len = key->pin_len;
10468 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
10471 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
10473 switch (ltk->type) {
10475 case SMP_LTK_RESPONDER:
10476 if (ltk->authenticated)
10477 return MGMT_LTK_AUTHENTICATED;
10478 return MGMT_LTK_UNAUTHENTICATED;
10480 if (ltk->authenticated)
10481 return MGMT_LTK_P256_AUTH;
10482 return MGMT_LTK_P256_UNAUTH;
10483 case SMP_LTK_P256_DEBUG:
10484 return MGMT_LTK_P256_DEBUG;
10487 return MGMT_LTK_UNAUTHENTICATED;
10490 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
10492 struct mgmt_ev_new_long_term_key ev;
10494 memset(&ev, 0, sizeof(ev));
10496 /* Devices using resolvable or non-resolvable random addresses
10497 * without providing an identity resolving key don't require
10498 * to store long term keys. Their addresses will change the
10499 * next time around.
10501 * Only when a remote device provides an identity address
10502 * make sure the long term key is stored. If the remote
10503 * identity is known, the long term keys are internally
10504 * mapped to the identity address. So allow static random
10505 * and public addresses here.
10507 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10508 (key->bdaddr.b[5] & 0xc0) != 0xc0)
10509 ev.store_hint = 0x00;
10511 ev.store_hint = persistent;
10513 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10514 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
10515 ev.key.type = mgmt_ltk_type(key);
10516 ev.key.enc_size = key->enc_size;
10517 ev.key.ediv = key->ediv;
10518 ev.key.rand = key->rand;
10520 if (key->type == SMP_LTK)
10521 ev.key.initiator = 1;
10523 /* Make sure we copy only the significant bytes based on the
10524 * encryption key size, and set the rest of the value to zeroes.
10526 memcpy(ev.key.val, key->val, key->enc_size);
10527 memset(ev.key.val + key->enc_size, 0,
10528 sizeof(ev.key.val) - key->enc_size);
10530 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
10533 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
10535 struct mgmt_ev_new_irk ev;
10537 memset(&ev, 0, sizeof(ev));
10539 ev.store_hint = persistent;
10541 bacpy(&ev.rpa, &irk->rpa);
10542 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
10543 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
10544 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
10546 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
10549 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
10552 struct mgmt_ev_new_csrk ev;
10554 memset(&ev, 0, sizeof(ev));
10556 /* Devices using resolvable or non-resolvable random addresses
10557 * without providing an identity resolving key don't require
10558 * to store signature resolving keys. Their addresses will change
10559 * the next time around.
10561 * Only when a remote device provides an identity address
10562 * make sure the signature resolving key is stored. So allow
10563 * static random and public addresses here.
10565 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10566 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
10567 ev.store_hint = 0x00;
10569 ev.store_hint = persistent;
10571 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
10572 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
10573 ev.key.type = csrk->type;
10574 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
10576 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
10579 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
10580 u8 bdaddr_type, u8 store_hint, u16 min_interval,
10581 u16 max_interval, u16 latency, u16 timeout)
10583 struct mgmt_ev_new_conn_param ev;
10585 if (!hci_is_identity_address(bdaddr, bdaddr_type))
10588 memset(&ev, 0, sizeof(ev));
10589 bacpy(&ev.addr.bdaddr, bdaddr);
10590 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
10591 ev.store_hint = store_hint;
10592 ev.min_interval = cpu_to_le16(min_interval);
10593 ev.max_interval = cpu_to_le16(max_interval);
10594 ev.latency = cpu_to_le16(latency);
10595 ev.timeout = cpu_to_le16(timeout);
10597 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
10600 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
10601 u8 *name, u8 name_len)
10604 struct mgmt_ev_device_connected *ev = (void *) buf;
10608 bacpy(&ev->addr.bdaddr, &conn->dst);
10609 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10612 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
10614 ev->flags = __cpu_to_le32(flags);
10616 /* We must ensure that the EIR Data fields are ordered and
10617 * unique. Keep it simple for now and avoid the problem by not
10618 * adding any BR/EDR data to the LE adv.
10620 if (conn->le_adv_data_len > 0) {
10621 memcpy(&ev->eir[eir_len],
10622 conn->le_adv_data, conn->le_adv_data_len);
10623 eir_len = conn->le_adv_data_len;
10626 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
10629 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
10630 eir_len = eir_append_data(ev->eir, eir_len,
10632 conn->dev_class, 3);
10635 ev->eir_len = cpu_to_le16(eir_len);
10637 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
10638 sizeof(*ev) + eir_len, NULL);
10641 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
10643 struct sock **sk = data;
10645 cmd->cmd_complete(cmd, 0);
10650 mgmt_pending_remove(cmd);
10653 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
10655 struct hci_dev *hdev = data;
10656 struct mgmt_cp_unpair_device *cp = cmd->param;
10658 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
10660 cmd->cmd_complete(cmd, 0);
10661 mgmt_pending_remove(cmd);
10664 bool mgmt_powering_down(struct hci_dev *hdev)
10666 struct mgmt_pending_cmd *cmd;
10667 struct mgmt_mode *cp;
10669 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10680 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
10681 u8 link_type, u8 addr_type, u8 reason,
10682 bool mgmt_connected)
10684 struct mgmt_ev_device_disconnected ev;
10685 struct sock *sk = NULL;
10687 /* The connection is still in hci_conn_hash so test for 1
10688 * instead of 0 to know if this is the last one.
10690 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10691 cancel_delayed_work(&hdev->power_off);
10692 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10695 if (!mgmt_connected)
10698 if (link_type != ACL_LINK && link_type != LE_LINK)
10701 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
10703 bacpy(&ev.addr.bdaddr, bdaddr);
10704 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10705 ev.reason = reason;
10707 /* Report disconnects due to suspend */
10708 if (hdev->suspended)
10709 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
10711 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
10716 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10720 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
10721 u8 link_type, u8 addr_type, u8 status)
10723 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
10724 struct mgmt_cp_disconnect *cp;
10725 struct mgmt_pending_cmd *cmd;
10727 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10730 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
10736 if (bacmp(bdaddr, &cp->addr.bdaddr))
10739 if (cp->addr.type != bdaddr_type)
10742 cmd->cmd_complete(cmd, mgmt_status(status));
10743 mgmt_pending_remove(cmd);
10746 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10747 u8 addr_type, u8 status)
10749 struct mgmt_ev_connect_failed ev;
10751 /* The connection is still in hci_conn_hash so test for 1
10752 * instead of 0 to know if this is the last one.
10754 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10755 cancel_delayed_work(&hdev->power_off);
10756 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10759 bacpy(&ev.addr.bdaddr, bdaddr);
10760 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10761 ev.status = mgmt_status(status);
10763 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
10766 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
10768 struct mgmt_ev_pin_code_request ev;
10770 bacpy(&ev.addr.bdaddr, bdaddr);
10771 ev.addr.type = BDADDR_BREDR;
10772 ev.secure = secure;
10774 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
10777 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10780 struct mgmt_pending_cmd *cmd;
10782 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
10786 cmd->cmd_complete(cmd, mgmt_status(status));
10787 mgmt_pending_remove(cmd);
10790 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10793 struct mgmt_pending_cmd *cmd;
10795 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
10799 cmd->cmd_complete(cmd, mgmt_status(status));
10800 mgmt_pending_remove(cmd);
10803 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10804 u8 link_type, u8 addr_type, u32 value,
10807 struct mgmt_ev_user_confirm_request ev;
10809 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10811 bacpy(&ev.addr.bdaddr, bdaddr);
10812 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10813 ev.confirm_hint = confirm_hint;
10814 ev.value = cpu_to_le32(value);
10816 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
10820 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10821 u8 link_type, u8 addr_type)
10823 struct mgmt_ev_user_passkey_request ev;
10825 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10827 bacpy(&ev.addr.bdaddr, bdaddr);
10828 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10830 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
10834 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10835 u8 link_type, u8 addr_type, u8 status,
10838 struct mgmt_pending_cmd *cmd;
10840 cmd = pending_find(opcode, hdev);
10844 cmd->cmd_complete(cmd, mgmt_status(status));
10845 mgmt_pending_remove(cmd);
10850 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10851 u8 link_type, u8 addr_type, u8 status)
10853 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10854 status, MGMT_OP_USER_CONFIRM_REPLY);
10857 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10858 u8 link_type, u8 addr_type, u8 status)
10860 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10862 MGMT_OP_USER_CONFIRM_NEG_REPLY);
10865 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10866 u8 link_type, u8 addr_type, u8 status)
10868 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10869 status, MGMT_OP_USER_PASSKEY_REPLY);
10872 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10873 u8 link_type, u8 addr_type, u8 status)
10875 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10877 MGMT_OP_USER_PASSKEY_NEG_REPLY);
10880 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
10881 u8 link_type, u8 addr_type, u32 passkey,
10884 struct mgmt_ev_passkey_notify ev;
10886 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10888 bacpy(&ev.addr.bdaddr, bdaddr);
10889 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10890 ev.passkey = __cpu_to_le32(passkey);
10891 ev.entered = entered;
10893 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10896 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10898 struct mgmt_ev_auth_failed ev;
10899 struct mgmt_pending_cmd *cmd;
10900 u8 status = mgmt_status(hci_status);
10902 bacpy(&ev.addr.bdaddr, &conn->dst);
10903 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10904 ev.status = status;
10906 cmd = find_pairing(conn);
10908 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10909 cmd ? cmd->sk : NULL);
10912 cmd->cmd_complete(cmd, status);
10913 mgmt_pending_remove(cmd);
10917 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10919 struct cmd_lookup match = { NULL, hdev };
10923 u8 mgmt_err = mgmt_status(status);
10924 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10925 cmd_status_rsp, &mgmt_err);
10929 if (test_bit(HCI_AUTH, &hdev->flags))
10930 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10932 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10934 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10938 new_settings(hdev, match.sk);
10941 sock_put(match.sk);
10944 static void clear_eir(struct hci_request *req)
10946 struct hci_dev *hdev = req->hdev;
10947 struct hci_cp_write_eir cp;
10949 if (!lmp_ext_inq_capable(hdev))
10952 memset(hdev->eir, 0, sizeof(hdev->eir));
10954 memset(&cp, 0, sizeof(cp));
10956 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
10959 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
10961 struct cmd_lookup match = { NULL, hdev };
10962 struct hci_request req;
10963 bool changed = false;
10966 u8 mgmt_err = mgmt_status(status);
10968 if (enable && hci_dev_test_and_clear_flag(hdev,
10969 HCI_SSP_ENABLED)) {
10970 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
10971 new_settings(hdev, NULL);
10974 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
10980 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
10982 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
10984 changed = hci_dev_test_and_clear_flag(hdev,
10987 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
10990 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
10993 new_settings(hdev, match.sk);
10996 sock_put(match.sk);
10998 hci_req_init(&req, hdev);
11000 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
11001 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
11002 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
11003 sizeof(enable), &enable);
11004 __hci_req_update_eir(&req);
11009 hci_req_run(&req, NULL);
11012 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
11014 struct cmd_lookup *match = data;
11016 if (match->sk == NULL) {
11017 match->sk = cmd->sk;
11018 sock_hold(match->sk);
11022 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
11025 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
11027 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
11028 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
11029 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
11032 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
11033 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11034 ext_info_changed(hdev, NULL);
11038 sock_put(match.sk);
11041 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
11043 struct mgmt_cp_set_local_name ev;
11044 struct mgmt_pending_cmd *cmd;
11049 memset(&ev, 0, sizeof(ev));
11050 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
11051 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
11053 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
11055 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
11057 /* If this is a HCI command related to powering on the
11058 * HCI dev don't send any mgmt signals.
11060 if (pending_find(MGMT_OP_SET_POWERED, hdev))
11064 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
11065 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
11066 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
11069 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
11073 for (i = 0; i < uuid_count; i++) {
11074 if (!memcmp(uuid, uuids[i], 16))
11081 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
11085 while (parsed < eir_len) {
11086 u8 field_len = eir[0];
11090 if (field_len == 0)
11093 if (eir_len - parsed < field_len + 1)
11097 case EIR_UUID16_ALL:
11098 case EIR_UUID16_SOME:
11099 for (i = 0; i + 3 <= field_len; i += 2) {
11100 memcpy(uuid, bluetooth_base_uuid, 16);
11101 uuid[13] = eir[i + 3];
11102 uuid[12] = eir[i + 2];
11103 if (has_uuid(uuid, uuid_count, uuids))
11107 case EIR_UUID32_ALL:
11108 case EIR_UUID32_SOME:
11109 for (i = 0; i + 5 <= field_len; i += 4) {
11110 memcpy(uuid, bluetooth_base_uuid, 16);
11111 uuid[15] = eir[i + 5];
11112 uuid[14] = eir[i + 4];
11113 uuid[13] = eir[i + 3];
11114 uuid[12] = eir[i + 2];
11115 if (has_uuid(uuid, uuid_count, uuids))
11119 case EIR_UUID128_ALL:
11120 case EIR_UUID128_SOME:
11121 for (i = 0; i + 17 <= field_len; i += 16) {
11122 memcpy(uuid, eir + i + 2, 16);
11123 if (has_uuid(uuid, uuid_count, uuids))
11129 parsed += field_len + 1;
11130 eir += field_len + 1;
11136 static void restart_le_scan(struct hci_dev *hdev)
11138 /* If controller is not scanning we are done. */
11139 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
11142 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
11143 hdev->discovery.scan_start +
11144 hdev->discovery.scan_duration))
11147 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
11148 DISCOV_LE_RESTART_DELAY);
11151 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
11152 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11154 /* If a RSSI threshold has been specified, and
11155 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
11156 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
11157 * is set, let it through for further processing, as we might need to
11158 * restart the scan.
11160 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
11161 * the results are also dropped.
11163 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11164 (rssi == HCI_RSSI_INVALID ||
11165 (rssi < hdev->discovery.rssi &&
11166 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
11169 if (hdev->discovery.uuid_count != 0) {
11170 /* If a list of UUIDs is provided in filter, results with no
11171 * matching UUID should be dropped.
11173 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
11174 hdev->discovery.uuids) &&
11175 !eir_has_uuids(scan_rsp, scan_rsp_len,
11176 hdev->discovery.uuid_count,
11177 hdev->discovery.uuids))
11181 /* If duplicate filtering does not report RSSI changes, then restart
11182 * scanning to ensure updated result with updated RSSI values.
11184 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
11185 restart_le_scan(hdev);
11187 /* Validate RSSI value against the RSSI threshold once more. */
11188 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11189 rssi < hdev->discovery.rssi)
11196 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11197 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
11198 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11201 struct mgmt_ev_device_found *ev = (void *)buf;
11204 /* Don't send events for a non-kernel initiated discovery. With
11205 * LE one exception is if we have pend_le_reports > 0 in which
11206 * case we're doing passive scanning and want these events.
11208 if (!hci_discovery_active(hdev)) {
11209 if (link_type == ACL_LINK)
11211 if (link_type == LE_LINK &&
11212 list_empty(&hdev->pend_le_reports) &&
11213 !hci_is_adv_monitoring(hdev)) {
11218 if (hdev->discovery.result_filtering) {
11219 /* We are using service discovery */
11220 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
11225 if (hdev->discovery.limited) {
11226 /* Check for limited discoverable bit */
11228 if (!(dev_class[1] & 0x20))
11231 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
11232 if (!flags || !(flags[0] & LE_AD_LIMITED))
11237 /* Make sure that the buffer is big enough. The 5 extra bytes
11238 * are for the potential CoD field.
11240 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
11243 memset(buf, 0, sizeof(buf));
11245 /* In case of device discovery with BR/EDR devices (pre 1.2), the
11246 * RSSI value was reported as 0 when not available. This behavior
11247 * is kept when using device discovery. This is required for full
11248 * backwards compatibility with the API.
11250 * However when using service discovery, the value 127 will be
11251 * returned when the RSSI is not available.
11253 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
11254 link_type == ACL_LINK)
11257 bacpy(&ev->addr.bdaddr, bdaddr);
11258 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11260 ev->flags = cpu_to_le32(flags);
11263 /* Copy EIR or advertising data into event */
11264 memcpy(ev->eir, eir, eir_len);
11266 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11268 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
11271 if (scan_rsp_len > 0)
11272 /* Append scan response data to event */
11273 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
11275 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
11276 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
11278 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
11281 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11282 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
11284 struct mgmt_ev_device_found *ev;
11285 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
11288 ev = (struct mgmt_ev_device_found *) buf;
11290 memset(buf, 0, sizeof(buf));
11292 bacpy(&ev->addr.bdaddr, bdaddr);
11293 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11296 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
11299 ev->eir_len = cpu_to_le16(eir_len);
11301 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
11304 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
11306 struct mgmt_ev_discovering ev;
11308 bt_dev_dbg(hdev, "discovering %u", discovering);
11310 memset(&ev, 0, sizeof(ev));
11311 ev.type = hdev->discovery.type;
11312 ev.discovering = discovering;
11314 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
11317 void mgmt_suspending(struct hci_dev *hdev, u8 state)
11319 struct mgmt_ev_controller_suspend ev;
11321 ev.suspend_state = state;
11322 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
11325 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
11328 struct mgmt_ev_controller_resume ev;
11330 ev.wake_reason = reason;
11332 bacpy(&ev.addr.bdaddr, bdaddr);
11333 ev.addr.type = addr_type;
11335 memset(&ev.addr, 0, sizeof(ev.addr));
11338 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
11341 static struct hci_mgmt_chan chan = {
11342 .channel = HCI_CHANNEL_CONTROL,
11343 .handler_count = ARRAY_SIZE(mgmt_handlers),
11344 .handlers = mgmt_handlers,
11346 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
11347 .tizen_handlers = tizen_mgmt_handlers,
11349 .hdev_init = mgmt_init_hdev,
11352 int mgmt_init(void)
11354 return hci_mgmt_chan_register(&chan);
11357 void mgmt_exit(void)
11359 hci_mgmt_chan_unregister(&chan);