2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
39 #include "hci_request.h"
41 #include "mgmt_util.h"
42 #include "mgmt_config.h"
45 #define MGMT_VERSION 1
46 #define MGMT_REVISION 21
48 static const u16 mgmt_commands[] = {
49 MGMT_OP_READ_INDEX_LIST,
52 MGMT_OP_SET_DISCOVERABLE,
53 MGMT_OP_SET_CONNECTABLE,
54 MGMT_OP_SET_FAST_CONNECTABLE,
56 MGMT_OP_SET_LINK_SECURITY,
60 MGMT_OP_SET_DEV_CLASS,
61 MGMT_OP_SET_LOCAL_NAME,
64 MGMT_OP_LOAD_LINK_KEYS,
65 MGMT_OP_LOAD_LONG_TERM_KEYS,
67 MGMT_OP_GET_CONNECTIONS,
68 MGMT_OP_PIN_CODE_REPLY,
69 MGMT_OP_PIN_CODE_NEG_REPLY,
70 MGMT_OP_SET_IO_CAPABILITY,
72 MGMT_OP_CANCEL_PAIR_DEVICE,
73 MGMT_OP_UNPAIR_DEVICE,
74 MGMT_OP_USER_CONFIRM_REPLY,
75 MGMT_OP_USER_CONFIRM_NEG_REPLY,
76 MGMT_OP_USER_PASSKEY_REPLY,
77 MGMT_OP_USER_PASSKEY_NEG_REPLY,
78 MGMT_OP_READ_LOCAL_OOB_DATA,
79 MGMT_OP_ADD_REMOTE_OOB_DATA,
80 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
81 MGMT_OP_START_DISCOVERY,
82 MGMT_OP_STOP_DISCOVERY,
85 MGMT_OP_UNBLOCK_DEVICE,
86 MGMT_OP_SET_DEVICE_ID,
87 MGMT_OP_SET_ADVERTISING,
89 MGMT_OP_SET_STATIC_ADDRESS,
90 MGMT_OP_SET_SCAN_PARAMS,
91 MGMT_OP_SET_SECURE_CONN,
92 MGMT_OP_SET_DEBUG_KEYS,
95 MGMT_OP_GET_CONN_INFO,
96 MGMT_OP_GET_CLOCK_INFO,
98 MGMT_OP_REMOVE_DEVICE,
99 MGMT_OP_LOAD_CONN_PARAM,
100 MGMT_OP_READ_UNCONF_INDEX_LIST,
101 MGMT_OP_READ_CONFIG_INFO,
102 MGMT_OP_SET_EXTERNAL_CONFIG,
103 MGMT_OP_SET_PUBLIC_ADDRESS,
104 MGMT_OP_START_SERVICE_DISCOVERY,
105 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
106 MGMT_OP_READ_EXT_INDEX_LIST,
107 MGMT_OP_READ_ADV_FEATURES,
108 MGMT_OP_ADD_ADVERTISING,
109 MGMT_OP_REMOVE_ADVERTISING,
110 MGMT_OP_GET_ADV_SIZE_INFO,
111 MGMT_OP_START_LIMITED_DISCOVERY,
112 MGMT_OP_READ_EXT_INFO,
113 MGMT_OP_SET_APPEARANCE,
114 MGMT_OP_GET_PHY_CONFIGURATION,
115 MGMT_OP_SET_PHY_CONFIGURATION,
116 MGMT_OP_SET_BLOCKED_KEYS,
117 MGMT_OP_SET_WIDEBAND_SPEECH,
118 MGMT_OP_READ_CONTROLLER_CAP,
119 MGMT_OP_READ_EXP_FEATURES_INFO,
120 MGMT_OP_SET_EXP_FEATURE,
121 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
122 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
123 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
124 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
125 MGMT_OP_GET_DEVICE_FLAGS,
126 MGMT_OP_SET_DEVICE_FLAGS,
127 MGMT_OP_READ_ADV_MONITOR_FEATURES,
128 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
129 MGMT_OP_REMOVE_ADV_MONITOR,
130 MGMT_OP_ADD_EXT_ADV_PARAMS,
131 MGMT_OP_ADD_EXT_ADV_DATA,
132 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
135 static const u16 mgmt_events[] = {
136 MGMT_EV_CONTROLLER_ERROR,
138 MGMT_EV_INDEX_REMOVED,
139 MGMT_EV_NEW_SETTINGS,
140 MGMT_EV_CLASS_OF_DEV_CHANGED,
141 MGMT_EV_LOCAL_NAME_CHANGED,
142 MGMT_EV_NEW_LINK_KEY,
143 MGMT_EV_NEW_LONG_TERM_KEY,
144 MGMT_EV_DEVICE_CONNECTED,
145 MGMT_EV_DEVICE_DISCONNECTED,
146 MGMT_EV_CONNECT_FAILED,
147 MGMT_EV_PIN_CODE_REQUEST,
148 MGMT_EV_USER_CONFIRM_REQUEST,
149 MGMT_EV_USER_PASSKEY_REQUEST,
151 MGMT_EV_DEVICE_FOUND,
153 MGMT_EV_DEVICE_BLOCKED,
154 MGMT_EV_DEVICE_UNBLOCKED,
155 MGMT_EV_DEVICE_UNPAIRED,
156 MGMT_EV_PASSKEY_NOTIFY,
159 MGMT_EV_DEVICE_ADDED,
160 MGMT_EV_DEVICE_REMOVED,
161 MGMT_EV_NEW_CONN_PARAM,
162 MGMT_EV_UNCONF_INDEX_ADDED,
163 MGMT_EV_UNCONF_INDEX_REMOVED,
164 MGMT_EV_NEW_CONFIG_OPTIONS,
165 MGMT_EV_EXT_INDEX_ADDED,
166 MGMT_EV_EXT_INDEX_REMOVED,
167 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
168 MGMT_EV_ADVERTISING_ADDED,
169 MGMT_EV_ADVERTISING_REMOVED,
170 MGMT_EV_EXT_INFO_CHANGED,
171 MGMT_EV_PHY_CONFIGURATION_CHANGED,
172 MGMT_EV_EXP_FEATURE_CHANGED,
173 MGMT_EV_DEVICE_FLAGS_CHANGED,
174 MGMT_EV_ADV_MONITOR_ADDED,
175 MGMT_EV_ADV_MONITOR_REMOVED,
176 MGMT_EV_CONTROLLER_SUSPEND,
177 MGMT_EV_CONTROLLER_RESUME,
180 static const u16 mgmt_untrusted_commands[] = {
181 MGMT_OP_READ_INDEX_LIST,
183 MGMT_OP_READ_UNCONF_INDEX_LIST,
184 MGMT_OP_READ_CONFIG_INFO,
185 MGMT_OP_READ_EXT_INDEX_LIST,
186 MGMT_OP_READ_EXT_INFO,
187 MGMT_OP_READ_CONTROLLER_CAP,
188 MGMT_OP_READ_EXP_FEATURES_INFO,
189 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
190 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
193 static const u16 mgmt_untrusted_events[] = {
195 MGMT_EV_INDEX_REMOVED,
196 MGMT_EV_NEW_SETTINGS,
197 MGMT_EV_CLASS_OF_DEV_CHANGED,
198 MGMT_EV_LOCAL_NAME_CHANGED,
199 MGMT_EV_UNCONF_INDEX_ADDED,
200 MGMT_EV_UNCONF_INDEX_REMOVED,
201 MGMT_EV_NEW_CONFIG_OPTIONS,
202 MGMT_EV_EXT_INDEX_ADDED,
203 MGMT_EV_EXT_INDEX_REMOVED,
204 MGMT_EV_EXT_INFO_CHANGED,
205 MGMT_EV_EXP_FEATURE_CHANGED,
208 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
210 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
211 "\x00\x00\x00\x00\x00\x00\x00\x00"
213 /* HCI to MGMT error code conversion table */
214 static const u8 mgmt_status_table[] = {
216 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
217 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
218 MGMT_STATUS_FAILED, /* Hardware Failure */
219 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
220 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
221 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
222 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
223 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
224 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
225 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
226 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
227 MGMT_STATUS_BUSY, /* Command Disallowed */
228 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
229 MGMT_STATUS_REJECTED, /* Rejected Security */
230 MGMT_STATUS_REJECTED, /* Rejected Personal */
231 MGMT_STATUS_TIMEOUT, /* Host Timeout */
232 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
233 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
234 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
235 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
236 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
237 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
238 MGMT_STATUS_BUSY, /* Repeated Attempts */
239 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
240 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
241 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
242 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
243 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
244 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
245 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
246 MGMT_STATUS_FAILED, /* Unspecified Error */
247 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
248 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
249 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
250 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
251 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
252 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
253 MGMT_STATUS_FAILED, /* Unit Link Key Used */
254 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
255 MGMT_STATUS_TIMEOUT, /* Instant Passed */
256 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
257 MGMT_STATUS_FAILED, /* Transaction Collision */
258 MGMT_STATUS_FAILED, /* Reserved for future use */
259 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
260 MGMT_STATUS_REJECTED, /* QoS Rejected */
261 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
262 MGMT_STATUS_REJECTED, /* Insufficient Security */
263 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
264 MGMT_STATUS_FAILED, /* Reserved for future use */
265 MGMT_STATUS_BUSY, /* Role Switch Pending */
266 MGMT_STATUS_FAILED, /* Reserved for future use */
267 MGMT_STATUS_FAILED, /* Slot Violation */
268 MGMT_STATUS_FAILED, /* Role Switch Failed */
269 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
270 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
271 MGMT_STATUS_BUSY, /* Host Busy Pairing */
272 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
273 MGMT_STATUS_BUSY, /* Controller Busy */
274 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
275 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
276 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
277 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
278 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
281 static u8 mgmt_status(u8 hci_status)
283 if (hci_status < ARRAY_SIZE(mgmt_status_table))
284 return mgmt_status_table[hci_status];
286 return MGMT_STATUS_FAILED;
289 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
292 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
296 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
297 u16 len, int flag, struct sock *skip_sk)
299 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
303 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
304 struct sock *skip_sk)
306 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
307 HCI_SOCK_TRUSTED, skip_sk);
310 static u8 le_addr_type(u8 mgmt_addr_type)
312 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
313 return ADDR_LE_DEV_PUBLIC;
315 return ADDR_LE_DEV_RANDOM;
318 void mgmt_fill_version_info(void *ver)
320 struct mgmt_rp_read_version *rp = ver;
322 rp->version = MGMT_VERSION;
323 rp->revision = cpu_to_le16(MGMT_REVISION);
326 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
329 struct mgmt_rp_read_version rp;
331 bt_dev_dbg(hdev, "sock %p", sk);
333 mgmt_fill_version_info(&rp);
335 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
339 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
342 struct mgmt_rp_read_commands *rp;
343 u16 num_commands, num_events;
347 bt_dev_dbg(hdev, "sock %p", sk);
349 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
350 num_commands = ARRAY_SIZE(mgmt_commands);
351 num_events = ARRAY_SIZE(mgmt_events);
353 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
354 num_events = ARRAY_SIZE(mgmt_untrusted_events);
357 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
359 rp = kmalloc(rp_size, GFP_KERNEL);
363 rp->num_commands = cpu_to_le16(num_commands);
364 rp->num_events = cpu_to_le16(num_events);
366 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
367 __le16 *opcode = rp->opcodes;
369 for (i = 0; i < num_commands; i++, opcode++)
370 put_unaligned_le16(mgmt_commands[i], opcode);
372 for (i = 0; i < num_events; i++, opcode++)
373 put_unaligned_le16(mgmt_events[i], opcode);
375 __le16 *opcode = rp->opcodes;
377 for (i = 0; i < num_commands; i++, opcode++)
378 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
380 for (i = 0; i < num_events; i++, opcode++)
381 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
384 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
391 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
394 struct mgmt_rp_read_index_list *rp;
400 bt_dev_dbg(hdev, "sock %p", sk);
402 read_lock(&hci_dev_list_lock);
405 list_for_each_entry(d, &hci_dev_list, list) {
406 if (d->dev_type == HCI_PRIMARY &&
407 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
411 rp_len = sizeof(*rp) + (2 * count);
412 rp = kmalloc(rp_len, GFP_ATOMIC);
414 read_unlock(&hci_dev_list_lock);
419 list_for_each_entry(d, &hci_dev_list, list) {
420 if (hci_dev_test_flag(d, HCI_SETUP) ||
421 hci_dev_test_flag(d, HCI_CONFIG) ||
422 hci_dev_test_flag(d, HCI_USER_CHANNEL))
425 /* Devices marked as raw-only are neither configured
426 * nor unconfigured controllers.
428 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
431 if (d->dev_type == HCI_PRIMARY &&
432 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
433 rp->index[count++] = cpu_to_le16(d->id);
434 bt_dev_dbg(hdev, "Added hci%u", d->id);
438 rp->num_controllers = cpu_to_le16(count);
439 rp_len = sizeof(*rp) + (2 * count);
441 read_unlock(&hci_dev_list_lock);
443 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
451 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
452 void *data, u16 data_len)
454 struct mgmt_rp_read_unconf_index_list *rp;
460 bt_dev_dbg(hdev, "sock %p", sk);
462 read_lock(&hci_dev_list_lock);
465 list_for_each_entry(d, &hci_dev_list, list) {
466 if (d->dev_type == HCI_PRIMARY &&
467 hci_dev_test_flag(d, HCI_UNCONFIGURED))
471 rp_len = sizeof(*rp) + (2 * count);
472 rp = kmalloc(rp_len, GFP_ATOMIC);
474 read_unlock(&hci_dev_list_lock);
479 list_for_each_entry(d, &hci_dev_list, list) {
480 if (hci_dev_test_flag(d, HCI_SETUP) ||
481 hci_dev_test_flag(d, HCI_CONFIG) ||
482 hci_dev_test_flag(d, HCI_USER_CHANNEL))
485 /* Devices marked as raw-only are neither configured
486 * nor unconfigured controllers.
488 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
491 if (d->dev_type == HCI_PRIMARY &&
492 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
493 rp->index[count++] = cpu_to_le16(d->id);
494 bt_dev_dbg(hdev, "Added hci%u", d->id);
498 rp->num_controllers = cpu_to_le16(count);
499 rp_len = sizeof(*rp) + (2 * count);
501 read_unlock(&hci_dev_list_lock);
503 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
504 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
511 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
512 void *data, u16 data_len)
514 struct mgmt_rp_read_ext_index_list *rp;
519 bt_dev_dbg(hdev, "sock %p", sk);
521 read_lock(&hci_dev_list_lock);
524 list_for_each_entry(d, &hci_dev_list, list) {
525 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
529 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
531 read_unlock(&hci_dev_list_lock);
536 list_for_each_entry(d, &hci_dev_list, list) {
537 if (hci_dev_test_flag(d, HCI_SETUP) ||
538 hci_dev_test_flag(d, HCI_CONFIG) ||
539 hci_dev_test_flag(d, HCI_USER_CHANNEL))
542 /* Devices marked as raw-only are neither configured
543 * nor unconfigured controllers.
545 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
548 if (d->dev_type == HCI_PRIMARY) {
549 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
550 rp->entry[count].type = 0x01;
552 rp->entry[count].type = 0x00;
553 } else if (d->dev_type == HCI_AMP) {
554 rp->entry[count].type = 0x02;
559 rp->entry[count].bus = d->bus;
560 rp->entry[count++].index = cpu_to_le16(d->id);
561 bt_dev_dbg(hdev, "Added hci%u", d->id);
564 rp->num_controllers = cpu_to_le16(count);
566 read_unlock(&hci_dev_list_lock);
568 /* If this command is called at least once, then all the
569 * default index and unconfigured index events are disabled
570 * and from now on only extended index events are used.
572 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
573 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
574 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
576 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
577 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
578 struct_size(rp, entry, count));
585 static bool is_configured(struct hci_dev *hdev)
587 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
588 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
591 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
592 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
593 !bacmp(&hdev->public_addr, BDADDR_ANY))
599 static __le32 get_missing_options(struct hci_dev *hdev)
603 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
604 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
605 options |= MGMT_OPTION_EXTERNAL_CONFIG;
607 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
608 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
609 !bacmp(&hdev->public_addr, BDADDR_ANY))
610 options |= MGMT_OPTION_PUBLIC_ADDRESS;
612 return cpu_to_le32(options);
615 static int new_options(struct hci_dev *hdev, struct sock *skip)
617 __le32 options = get_missing_options(hdev);
619 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
620 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
623 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
625 __le32 options = get_missing_options(hdev);
627 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
631 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
632 void *data, u16 data_len)
634 struct mgmt_rp_read_config_info rp;
637 bt_dev_dbg(hdev, "sock %p", sk);
641 memset(&rp, 0, sizeof(rp));
642 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
644 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
647 if (hdev->set_bdaddr)
648 options |= MGMT_OPTION_PUBLIC_ADDRESS;
650 rp.supported_options = cpu_to_le32(options);
651 rp.missing_options = get_missing_options(hdev);
653 hci_dev_unlock(hdev);
655 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
659 static u32 get_supported_phys(struct hci_dev *hdev)
661 u32 supported_phys = 0;
663 if (lmp_bredr_capable(hdev)) {
664 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
666 if (hdev->features[0][0] & LMP_3SLOT)
667 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
669 if (hdev->features[0][0] & LMP_5SLOT)
670 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
672 if (lmp_edr_2m_capable(hdev)) {
673 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
675 if (lmp_edr_3slot_capable(hdev))
676 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
678 if (lmp_edr_5slot_capable(hdev))
679 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
681 if (lmp_edr_3m_capable(hdev)) {
682 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
684 if (lmp_edr_3slot_capable(hdev))
685 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
687 if (lmp_edr_5slot_capable(hdev))
688 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
693 if (lmp_le_capable(hdev)) {
694 supported_phys |= MGMT_PHY_LE_1M_TX;
695 supported_phys |= MGMT_PHY_LE_1M_RX;
697 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
698 supported_phys |= MGMT_PHY_LE_2M_TX;
699 supported_phys |= MGMT_PHY_LE_2M_RX;
702 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
703 supported_phys |= MGMT_PHY_LE_CODED_TX;
704 supported_phys |= MGMT_PHY_LE_CODED_RX;
708 return supported_phys;
711 static u32 get_selected_phys(struct hci_dev *hdev)
713 u32 selected_phys = 0;
715 if (lmp_bredr_capable(hdev)) {
716 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
718 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
719 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
721 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
722 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
724 if (lmp_edr_2m_capable(hdev)) {
725 if (!(hdev->pkt_type & HCI_2DH1))
726 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
728 if (lmp_edr_3slot_capable(hdev) &&
729 !(hdev->pkt_type & HCI_2DH3))
730 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
732 if (lmp_edr_5slot_capable(hdev) &&
733 !(hdev->pkt_type & HCI_2DH5))
734 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
736 if (lmp_edr_3m_capable(hdev)) {
737 if (!(hdev->pkt_type & HCI_3DH1))
738 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
740 if (lmp_edr_3slot_capable(hdev) &&
741 !(hdev->pkt_type & HCI_3DH3))
742 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
744 if (lmp_edr_5slot_capable(hdev) &&
745 !(hdev->pkt_type & HCI_3DH5))
746 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
751 if (lmp_le_capable(hdev)) {
752 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
753 selected_phys |= MGMT_PHY_LE_1M_TX;
755 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
756 selected_phys |= MGMT_PHY_LE_1M_RX;
758 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
759 selected_phys |= MGMT_PHY_LE_2M_TX;
761 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
762 selected_phys |= MGMT_PHY_LE_2M_RX;
764 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
765 selected_phys |= MGMT_PHY_LE_CODED_TX;
767 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
768 selected_phys |= MGMT_PHY_LE_CODED_RX;
771 return selected_phys;
774 static u32 get_configurable_phys(struct hci_dev *hdev)
776 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
777 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
780 static u32 get_supported_settings(struct hci_dev *hdev)
784 settings |= MGMT_SETTING_POWERED;
785 settings |= MGMT_SETTING_BONDABLE;
786 settings |= MGMT_SETTING_DEBUG_KEYS;
787 settings |= MGMT_SETTING_CONNECTABLE;
788 settings |= MGMT_SETTING_DISCOVERABLE;
790 if (lmp_bredr_capable(hdev)) {
791 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
792 settings |= MGMT_SETTING_FAST_CONNECTABLE;
793 settings |= MGMT_SETTING_BREDR;
794 settings |= MGMT_SETTING_LINK_SECURITY;
796 if (lmp_ssp_capable(hdev)) {
797 settings |= MGMT_SETTING_SSP;
798 if (IS_ENABLED(CONFIG_BT_HS))
799 settings |= MGMT_SETTING_HS;
802 if (lmp_sc_capable(hdev))
803 settings |= MGMT_SETTING_SECURE_CONN;
805 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
807 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
810 if (lmp_le_capable(hdev)) {
811 settings |= MGMT_SETTING_LE;
812 settings |= MGMT_SETTING_SECURE_CONN;
813 settings |= MGMT_SETTING_PRIVACY;
814 settings |= MGMT_SETTING_STATIC_ADDRESS;
816 /* When the experimental feature for LL Privacy support is
817 * enabled, then advertising is no longer supported.
819 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
820 settings |= MGMT_SETTING_ADVERTISING;
823 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
825 settings |= MGMT_SETTING_CONFIGURATION;
827 settings |= MGMT_SETTING_PHY_CONFIGURATION;
832 static u32 get_current_settings(struct hci_dev *hdev)
836 if (hdev_is_powered(hdev))
837 settings |= MGMT_SETTING_POWERED;
839 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
840 settings |= MGMT_SETTING_CONNECTABLE;
842 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
843 settings |= MGMT_SETTING_FAST_CONNECTABLE;
845 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
846 settings |= MGMT_SETTING_DISCOVERABLE;
848 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
849 settings |= MGMT_SETTING_BONDABLE;
851 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
852 settings |= MGMT_SETTING_BREDR;
854 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
855 settings |= MGMT_SETTING_LE;
857 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
858 settings |= MGMT_SETTING_LINK_SECURITY;
860 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
861 settings |= MGMT_SETTING_SSP;
863 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
864 settings |= MGMT_SETTING_HS;
866 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
867 settings |= MGMT_SETTING_ADVERTISING;
869 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
870 settings |= MGMT_SETTING_SECURE_CONN;
872 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
873 settings |= MGMT_SETTING_DEBUG_KEYS;
875 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
876 settings |= MGMT_SETTING_PRIVACY;
878 /* The current setting for static address has two purposes. The
879 * first is to indicate if the static address will be used and
880 * the second is to indicate if it is actually set.
882 * This means if the static address is not configured, this flag
883 * will never be set. If the address is configured, then if the
884 * address is actually used decides if the flag is set or not.
886 * For single mode LE only controllers and dual-mode controllers
887 * with BR/EDR disabled, the existence of the static address will
890 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
891 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
892 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
893 if (bacmp(&hdev->static_addr, BDADDR_ANY))
894 settings |= MGMT_SETTING_STATIC_ADDRESS;
897 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
898 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
903 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
905 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
908 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
909 struct hci_dev *hdev,
912 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
915 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
917 struct mgmt_pending_cmd *cmd;
919 /* If there's a pending mgmt command the flags will not yet have
920 * their final values, so check for this first.
922 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
924 struct mgmt_mode *cp = cmd->param;
926 return LE_AD_GENERAL;
927 else if (cp->val == 0x02)
928 return LE_AD_LIMITED;
930 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
931 return LE_AD_LIMITED;
932 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
933 return LE_AD_GENERAL;
939 bool mgmt_get_connectable(struct hci_dev *hdev)
941 struct mgmt_pending_cmd *cmd;
943 /* If there's a pending mgmt command the flag will not yet have
944 * it's final value, so check for this first.
946 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
948 struct mgmt_mode *cp = cmd->param;
953 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
956 static void service_cache_off(struct work_struct *work)
958 struct hci_dev *hdev = container_of(work, struct hci_dev,
960 struct hci_request req;
962 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
965 hci_req_init(&req, hdev);
969 __hci_req_update_eir(&req);
970 __hci_req_update_class(&req);
972 hci_dev_unlock(hdev);
974 hci_req_run(&req, NULL);
977 static void rpa_expired(struct work_struct *work)
979 struct hci_dev *hdev = container_of(work, struct hci_dev,
981 struct hci_request req;
983 bt_dev_dbg(hdev, "");
985 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
987 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
990 /* The generation of a new RPA and programming it into the
991 * controller happens in the hci_req_enable_advertising()
994 hci_req_init(&req, hdev);
995 if (ext_adv_capable(hdev))
996 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
998 __hci_req_enable_advertising(&req);
999 hci_req_run(&req, NULL);
1002 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1004 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1007 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1008 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1010 /* Non-mgmt controlled devices get this bit set
1011 * implicitly so that pairing works for them, however
1012 * for mgmt we require user-space to explicitly enable
1015 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1018 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1019 void *data, u16 data_len)
1021 struct mgmt_rp_read_info rp;
1023 bt_dev_dbg(hdev, "sock %p", sk);
1027 memset(&rp, 0, sizeof(rp));
1029 bacpy(&rp.bdaddr, &hdev->bdaddr);
1031 rp.version = hdev->hci_ver;
1032 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1034 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1035 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1037 memcpy(rp.dev_class, hdev->dev_class, 3);
1039 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1040 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1042 hci_dev_unlock(hdev);
1044 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1048 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1053 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1054 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1055 hdev->dev_class, 3);
1057 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1058 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1061 name_len = strlen(hdev->dev_name);
1062 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1063 hdev->dev_name, name_len);
1065 name_len = strlen(hdev->short_name);
1066 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1067 hdev->short_name, name_len);
1072 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1073 void *data, u16 data_len)
1076 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1079 bt_dev_dbg(hdev, "sock %p", sk);
1081 memset(&buf, 0, sizeof(buf));
1085 bacpy(&rp->bdaddr, &hdev->bdaddr);
1087 rp->version = hdev->hci_ver;
1088 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1090 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1091 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1094 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1095 rp->eir_len = cpu_to_le16(eir_len);
1097 hci_dev_unlock(hdev);
1099 /* If this command is called at least once, then the events
1100 * for class of device and local name changes are disabled
1101 * and only the new extended controller information event
1104 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1105 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1106 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1108 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1109 sizeof(*rp) + eir_len);
1112 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1115 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1118 memset(buf, 0, sizeof(buf));
1120 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1121 ev->eir_len = cpu_to_le16(eir_len);
1123 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1124 sizeof(*ev) + eir_len,
1125 HCI_MGMT_EXT_INFO_EVENTS, skip);
1128 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1130 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1132 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1136 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1138 bt_dev_dbg(hdev, "status 0x%02x", status);
1140 if (hci_conn_count(hdev) == 0) {
1141 cancel_delayed_work(&hdev->power_off);
1142 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1146 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1148 struct mgmt_ev_advertising_added ev;
1150 ev.instance = instance;
1152 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1155 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1158 struct mgmt_ev_advertising_removed ev;
1160 ev.instance = instance;
1162 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1165 static void cancel_adv_timeout(struct hci_dev *hdev)
1167 if (hdev->adv_instance_timeout) {
1168 hdev->adv_instance_timeout = 0;
1169 cancel_delayed_work(&hdev->adv_instance_expire);
1173 static int clean_up_hci_state(struct hci_dev *hdev)
1175 struct hci_request req;
1176 struct hci_conn *conn;
1177 bool discov_stopped;
1180 hci_req_init(&req, hdev);
1182 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1183 test_bit(HCI_PSCAN, &hdev->flags)) {
1185 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1188 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1190 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1191 __hci_req_disable_advertising(&req);
1193 discov_stopped = hci_req_stop_discovery(&req);
1195 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1196 /* 0x15 == Terminated due to Power Off */
1197 __hci_abort_conn(&req, conn, 0x15);
1200 err = hci_req_run(&req, clean_up_hci_complete);
1201 if (!err && discov_stopped)
1202 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1207 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1210 struct mgmt_mode *cp = data;
1211 struct mgmt_pending_cmd *cmd;
1214 bt_dev_dbg(hdev, "sock %p", sk);
1216 if (cp->val != 0x00 && cp->val != 0x01)
1217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1218 MGMT_STATUS_INVALID_PARAMS);
1222 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1223 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1228 if (!!cp->val == hdev_is_powered(hdev)) {
1229 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1233 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1240 queue_work(hdev->req_workqueue, &hdev->power_on);
1243 /* Disconnect connections, stop scans, etc */
1244 err = clean_up_hci_state(hdev);
1246 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1247 HCI_POWER_OFF_TIMEOUT);
1249 /* ENODATA means there were no HCI commands queued */
1250 if (err == -ENODATA) {
1251 cancel_delayed_work(&hdev->power_off);
1252 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1258 hci_dev_unlock(hdev);
1262 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1264 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1266 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1267 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1270 int mgmt_new_settings(struct hci_dev *hdev)
1272 return new_settings(hdev, NULL);
1277 struct hci_dev *hdev;
1281 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1283 struct cmd_lookup *match = data;
1285 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1287 list_del(&cmd->list);
1289 if (match->sk == NULL) {
1290 match->sk = cmd->sk;
1291 sock_hold(match->sk);
1294 mgmt_pending_free(cmd);
1297 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1301 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1302 mgmt_pending_remove(cmd);
1305 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1307 if (cmd->cmd_complete) {
1310 cmd->cmd_complete(cmd, *status);
1311 mgmt_pending_remove(cmd);
1316 cmd_status_rsp(cmd, data);
1319 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1321 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1322 cmd->param, cmd->param_len);
1325 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1327 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1328 cmd->param, sizeof(struct mgmt_addr_info));
1331 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1333 if (!lmp_bredr_capable(hdev))
1334 return MGMT_STATUS_NOT_SUPPORTED;
1335 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1336 return MGMT_STATUS_REJECTED;
1338 return MGMT_STATUS_SUCCESS;
1341 static u8 mgmt_le_support(struct hci_dev *hdev)
1343 if (!lmp_le_capable(hdev))
1344 return MGMT_STATUS_NOT_SUPPORTED;
1345 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1346 return MGMT_STATUS_REJECTED;
1348 return MGMT_STATUS_SUCCESS;
1351 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1353 struct mgmt_pending_cmd *cmd;
1355 bt_dev_dbg(hdev, "status 0x%02x", status);
1359 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1364 u8 mgmt_err = mgmt_status(status);
1365 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1366 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1370 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1371 hdev->discov_timeout > 0) {
1372 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1373 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1376 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1377 new_settings(hdev, cmd->sk);
1380 mgmt_pending_remove(cmd);
1383 hci_dev_unlock(hdev);
1386 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1389 struct mgmt_cp_set_discoverable *cp = data;
1390 struct mgmt_pending_cmd *cmd;
1394 bt_dev_dbg(hdev, "sock %p", sk);
1396 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1397 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1398 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1399 MGMT_STATUS_REJECTED);
1401 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1402 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1403 MGMT_STATUS_INVALID_PARAMS);
1405 timeout = __le16_to_cpu(cp->timeout);
1407 /* Disabling discoverable requires that no timeout is set,
1408 * and enabling limited discoverable requires a timeout.
1410 if ((cp->val == 0x00 && timeout > 0) ||
1411 (cp->val == 0x02 && timeout == 0))
1412 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1413 MGMT_STATUS_INVALID_PARAMS);
1417 if (!hdev_is_powered(hdev) && timeout > 0) {
1418 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1419 MGMT_STATUS_NOT_POWERED);
1423 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1424 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1425 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1430 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1431 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1432 MGMT_STATUS_REJECTED);
1436 if (hdev->advertising_paused) {
1437 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1442 if (!hdev_is_powered(hdev)) {
1443 bool changed = false;
1445 /* Setting limited discoverable when powered off is
1446 * not a valid operation since it requires a timeout
1447 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1449 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1450 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1454 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1459 err = new_settings(hdev, sk);
1464 /* If the current mode is the same, then just update the timeout
1465 * value with the new value. And if only the timeout gets updated,
1466 * then no need for any HCI transactions.
1468 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1469 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1470 HCI_LIMITED_DISCOVERABLE)) {
1471 cancel_delayed_work(&hdev->discov_off);
1472 hdev->discov_timeout = timeout;
1474 if (cp->val && hdev->discov_timeout > 0) {
1475 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1476 queue_delayed_work(hdev->req_workqueue,
1477 &hdev->discov_off, to);
1480 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1484 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1490 /* Cancel any potential discoverable timeout that might be
1491 * still active and store new timeout value. The arming of
1492 * the timeout happens in the complete handler.
1494 cancel_delayed_work(&hdev->discov_off);
1495 hdev->discov_timeout = timeout;
1498 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1500 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1502 /* Limited discoverable mode */
1503 if (cp->val == 0x02)
1504 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1506 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1508 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1512 hci_dev_unlock(hdev);
1516 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1518 struct mgmt_pending_cmd *cmd;
1520 bt_dev_dbg(hdev, "status 0x%02x", status);
1524 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1529 u8 mgmt_err = mgmt_status(status);
1530 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1534 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1535 new_settings(hdev, cmd->sk);
1538 mgmt_pending_remove(cmd);
1541 hci_dev_unlock(hdev);
1544 static int set_connectable_update_settings(struct hci_dev *hdev,
1545 struct sock *sk, u8 val)
1547 bool changed = false;
1550 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1554 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1556 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1557 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1560 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1565 hci_req_update_scan(hdev);
1566 hci_update_background_scan(hdev);
1567 return new_settings(hdev, sk);
1573 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1576 struct mgmt_mode *cp = data;
1577 struct mgmt_pending_cmd *cmd;
1580 bt_dev_dbg(hdev, "sock %p", sk);
1582 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1583 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1585 MGMT_STATUS_REJECTED);
1587 if (cp->val != 0x00 && cp->val != 0x01)
1588 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1589 MGMT_STATUS_INVALID_PARAMS);
1593 if (!hdev_is_powered(hdev)) {
1594 err = set_connectable_update_settings(hdev, sk, cp->val);
1598 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1599 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1605 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1612 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1614 if (hdev->discov_timeout > 0)
1615 cancel_delayed_work(&hdev->discov_off);
1617 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1618 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1619 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1622 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1626 hci_dev_unlock(hdev);
1630 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1633 struct mgmt_mode *cp = data;
1637 bt_dev_dbg(hdev, "sock %p", sk);
1639 if (cp->val != 0x00 && cp->val != 0x01)
1640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1641 MGMT_STATUS_INVALID_PARAMS);
1646 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1648 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1650 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1655 /* In limited privacy mode the change of bondable mode
1656 * may affect the local advertising address.
1658 if (hdev_is_powered(hdev) &&
1659 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1660 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1661 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1662 queue_work(hdev->req_workqueue,
1663 &hdev->discoverable_update);
1665 err = new_settings(hdev, sk);
1669 hci_dev_unlock(hdev);
1673 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1676 struct mgmt_mode *cp = data;
1677 struct mgmt_pending_cmd *cmd;
1681 bt_dev_dbg(hdev, "sock %p", sk);
1683 status = mgmt_bredr_support(hdev);
1685 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1688 if (cp->val != 0x00 && cp->val != 0x01)
1689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1690 MGMT_STATUS_INVALID_PARAMS);
1694 if (!hdev_is_powered(hdev)) {
1695 bool changed = false;
1697 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1698 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1702 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1707 err = new_settings(hdev, sk);
1712 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1713 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1720 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1721 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1725 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1731 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1733 mgmt_pending_remove(cmd);
1738 hci_dev_unlock(hdev);
1742 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1744 struct mgmt_mode *cp = data;
1745 struct mgmt_pending_cmd *cmd;
1749 bt_dev_dbg(hdev, "sock %p", sk);
1751 status = mgmt_bredr_support(hdev);
1753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1755 if (!lmp_ssp_capable(hdev))
1756 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1757 MGMT_STATUS_NOT_SUPPORTED);
1759 if (cp->val != 0x00 && cp->val != 0x01)
1760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1761 MGMT_STATUS_INVALID_PARAMS);
1765 if (!hdev_is_powered(hdev)) {
1769 changed = !hci_dev_test_and_set_flag(hdev,
1772 changed = hci_dev_test_and_clear_flag(hdev,
1775 changed = hci_dev_test_and_clear_flag(hdev,
1778 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1781 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1786 err = new_settings(hdev, sk);
1791 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1792 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1797 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1798 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1802 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1808 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1809 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1810 sizeof(cp->val), &cp->val);
1812 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1814 mgmt_pending_remove(cmd);
1819 hci_dev_unlock(hdev);
1823 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1825 struct mgmt_mode *cp = data;
1830 bt_dev_dbg(hdev, "sock %p", sk);
1832 if (!IS_ENABLED(CONFIG_BT_HS))
1833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1834 MGMT_STATUS_NOT_SUPPORTED);
1836 status = mgmt_bredr_support(hdev);
1838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1840 if (!lmp_ssp_capable(hdev))
1841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1842 MGMT_STATUS_NOT_SUPPORTED);
1844 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1846 MGMT_STATUS_REJECTED);
1848 if (cp->val != 0x00 && cp->val != 0x01)
1849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1850 MGMT_STATUS_INVALID_PARAMS);
1854 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1855 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1861 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1863 if (hdev_is_powered(hdev)) {
1864 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1865 MGMT_STATUS_REJECTED);
1869 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1872 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1877 err = new_settings(hdev, sk);
1880 hci_dev_unlock(hdev);
1884 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1886 struct cmd_lookup match = { NULL, hdev };
1891 u8 mgmt_err = mgmt_status(status);
1893 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1898 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1900 new_settings(hdev, match.sk);
1905 /* Make sure the controller has a good default for
1906 * advertising data. Restrict the update to when LE
1907 * has actually been enabled. During power on, the
1908 * update in powered_update_hci will take care of it.
1910 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1911 struct hci_request req;
1912 hci_req_init(&req, hdev);
1913 if (ext_adv_capable(hdev)) {
1916 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1918 __hci_req_update_scan_rsp_data(&req, 0x00);
1920 __hci_req_update_adv_data(&req, 0x00);
1921 __hci_req_update_scan_rsp_data(&req, 0x00);
1923 hci_req_run(&req, NULL);
1924 hci_update_background_scan(hdev);
1928 hci_dev_unlock(hdev);
1931 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1933 struct mgmt_mode *cp = data;
1934 struct hci_cp_write_le_host_supported hci_cp;
1935 struct mgmt_pending_cmd *cmd;
1936 struct hci_request req;
1940 bt_dev_dbg(hdev, "sock %p", sk);
1942 if (!lmp_le_capable(hdev))
1943 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1944 MGMT_STATUS_NOT_SUPPORTED);
1946 if (cp->val != 0x00 && cp->val != 0x01)
1947 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1948 MGMT_STATUS_INVALID_PARAMS);
1950 /* Bluetooth single mode LE only controllers or dual-mode
1951 * controllers configured as LE only devices, do not allow
1952 * switching LE off. These have either LE enabled explicitly
1953 * or BR/EDR has been previously switched off.
1955 * When trying to enable an already enabled LE, then gracefully
1956 * send a positive response. Trying to disable it however will
1957 * result into rejection.
1959 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1960 if (cp->val == 0x01)
1961 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1964 MGMT_STATUS_REJECTED);
1970 enabled = lmp_host_le_capable(hdev);
1973 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1975 if (!hdev_is_powered(hdev) || val == enabled) {
1976 bool changed = false;
1978 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1979 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1983 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1984 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1988 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1993 err = new_settings(hdev, sk);
1998 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1999 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2000 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2005 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2011 hci_req_init(&req, hdev);
2013 memset(&hci_cp, 0, sizeof(hci_cp));
2017 hci_cp.simul = 0x00;
2019 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2020 __hci_req_disable_advertising(&req);
2022 if (ext_adv_capable(hdev))
2023 __hci_req_clear_ext_adv_sets(&req);
2026 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2029 err = hci_req_run(&req, le_enable_complete);
2031 mgmt_pending_remove(cmd);
2034 hci_dev_unlock(hdev);
2038 /* This is a helper function to test for pending mgmt commands that can
2039 * cause CoD or EIR HCI commands. We can only allow one such pending
2040 * mgmt command at a time since otherwise we cannot easily track what
2041 * the current values are, will be, and based on that calculate if a new
2042 * HCI command needs to be sent and if yes with what value.
2044 static bool pending_eir_or_class(struct hci_dev *hdev)
2046 struct mgmt_pending_cmd *cmd;
2048 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2049 switch (cmd->opcode) {
2050 case MGMT_OP_ADD_UUID:
2051 case MGMT_OP_REMOVE_UUID:
2052 case MGMT_OP_SET_DEV_CLASS:
2053 case MGMT_OP_SET_POWERED:
2061 static const u8 bluetooth_base_uuid[] = {
2062 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2063 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2066 static u8 get_uuid_size(const u8 *uuid)
2070 if (memcmp(uuid, bluetooth_base_uuid, 12))
2073 val = get_unaligned_le32(&uuid[12]);
2080 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2082 struct mgmt_pending_cmd *cmd;
2086 cmd = pending_find(mgmt_op, hdev);
2090 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2091 mgmt_status(status), hdev->dev_class, 3);
2093 mgmt_pending_remove(cmd);
2096 hci_dev_unlock(hdev);
2099 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2101 bt_dev_dbg(hdev, "status 0x%02x", status);
2103 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2106 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2108 struct mgmt_cp_add_uuid *cp = data;
2109 struct mgmt_pending_cmd *cmd;
2110 struct hci_request req;
2111 struct bt_uuid *uuid;
2114 bt_dev_dbg(hdev, "sock %p", sk);
2118 if (pending_eir_or_class(hdev)) {
2119 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2124 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2130 memcpy(uuid->uuid, cp->uuid, 16);
2131 uuid->svc_hint = cp->svc_hint;
2132 uuid->size = get_uuid_size(cp->uuid);
2134 list_add_tail(&uuid->list, &hdev->uuids);
2136 hci_req_init(&req, hdev);
2138 __hci_req_update_class(&req);
2139 __hci_req_update_eir(&req);
2141 err = hci_req_run(&req, add_uuid_complete);
2143 if (err != -ENODATA)
2146 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2147 hdev->dev_class, 3);
2151 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2160 hci_dev_unlock(hdev);
2164 static bool enable_service_cache(struct hci_dev *hdev)
2166 if (!hdev_is_powered(hdev))
2169 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2170 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2178 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2180 bt_dev_dbg(hdev, "status 0x%02x", status);
2182 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2185 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2188 struct mgmt_cp_remove_uuid *cp = data;
2189 struct mgmt_pending_cmd *cmd;
2190 struct bt_uuid *match, *tmp;
2191 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2192 struct hci_request req;
2195 bt_dev_dbg(hdev, "sock %p", sk);
2199 if (pending_eir_or_class(hdev)) {
2200 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2205 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2206 hci_uuids_clear(hdev);
2208 if (enable_service_cache(hdev)) {
2209 err = mgmt_cmd_complete(sk, hdev->id,
2210 MGMT_OP_REMOVE_UUID,
2211 0, hdev->dev_class, 3);
2220 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2221 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2224 list_del(&match->list);
2230 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2231 MGMT_STATUS_INVALID_PARAMS);
2236 hci_req_init(&req, hdev);
2238 __hci_req_update_class(&req);
2239 __hci_req_update_eir(&req);
2241 err = hci_req_run(&req, remove_uuid_complete);
2243 if (err != -ENODATA)
2246 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2247 hdev->dev_class, 3);
2251 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2260 hci_dev_unlock(hdev);
2264 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2266 bt_dev_dbg(hdev, "status 0x%02x", status);
2268 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2271 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2274 struct mgmt_cp_set_dev_class *cp = data;
2275 struct mgmt_pending_cmd *cmd;
2276 struct hci_request req;
2279 bt_dev_dbg(hdev, "sock %p", sk);
2281 if (!lmp_bredr_capable(hdev))
2282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2283 MGMT_STATUS_NOT_SUPPORTED);
2287 if (pending_eir_or_class(hdev)) {
2288 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2293 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2294 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2295 MGMT_STATUS_INVALID_PARAMS);
2299 hdev->major_class = cp->major;
2300 hdev->minor_class = cp->minor;
2302 if (!hdev_is_powered(hdev)) {
2303 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2304 hdev->dev_class, 3);
2308 hci_req_init(&req, hdev);
2310 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2311 hci_dev_unlock(hdev);
2312 cancel_delayed_work_sync(&hdev->service_cache);
2314 __hci_req_update_eir(&req);
2317 __hci_req_update_class(&req);
2319 err = hci_req_run(&req, set_class_complete);
2321 if (err != -ENODATA)
2324 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2325 hdev->dev_class, 3);
2329 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2338 hci_dev_unlock(hdev);
2342 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2345 struct mgmt_cp_load_link_keys *cp = data;
2346 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2347 sizeof(struct mgmt_link_key_info));
2348 u16 key_count, expected_len;
2352 bt_dev_dbg(hdev, "sock %p", sk);
2354 if (!lmp_bredr_capable(hdev))
2355 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2356 MGMT_STATUS_NOT_SUPPORTED);
2358 key_count = __le16_to_cpu(cp->key_count);
2359 if (key_count > max_key_count) {
2360 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 MGMT_STATUS_INVALID_PARAMS);
2366 expected_len = struct_size(cp, keys, key_count);
2367 if (expected_len != len) {
2368 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2371 MGMT_STATUS_INVALID_PARAMS);
2374 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2376 MGMT_STATUS_INVALID_PARAMS);
2378 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2381 for (i = 0; i < key_count; i++) {
2382 struct mgmt_link_key_info *key = &cp->keys[i];
2384 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2385 return mgmt_cmd_status(sk, hdev->id,
2386 MGMT_OP_LOAD_LINK_KEYS,
2387 MGMT_STATUS_INVALID_PARAMS);
2392 hci_link_keys_clear(hdev);
2395 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2397 changed = hci_dev_test_and_clear_flag(hdev,
2398 HCI_KEEP_DEBUG_KEYS);
2401 new_settings(hdev, NULL);
2403 for (i = 0; i < key_count; i++) {
2404 struct mgmt_link_key_info *key = &cp->keys[i];
2406 if (hci_is_blocked_key(hdev,
2407 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2409 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2414 /* Always ignore debug keys and require a new pairing if
2415 * the user wants to use them.
2417 if (key->type == HCI_LK_DEBUG_COMBINATION)
2420 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2421 key->type, key->pin_len, NULL);
2424 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2426 hci_dev_unlock(hdev);
2431 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2432 u8 addr_type, struct sock *skip_sk)
2434 struct mgmt_ev_device_unpaired ev;
2436 bacpy(&ev.addr.bdaddr, bdaddr);
2437 ev.addr.type = addr_type;
2439 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2443 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2446 struct mgmt_cp_unpair_device *cp = data;
2447 struct mgmt_rp_unpair_device rp;
2448 struct hci_conn_params *params;
2449 struct mgmt_pending_cmd *cmd;
2450 struct hci_conn *conn;
2454 memset(&rp, 0, sizeof(rp));
2455 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2456 rp.addr.type = cp->addr.type;
2458 if (!bdaddr_type_is_valid(cp->addr.type))
2459 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2460 MGMT_STATUS_INVALID_PARAMS,
2463 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2464 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2465 MGMT_STATUS_INVALID_PARAMS,
2470 if (!hdev_is_powered(hdev)) {
2471 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2472 MGMT_STATUS_NOT_POWERED, &rp,
2477 if (cp->addr.type == BDADDR_BREDR) {
2478 /* If disconnection is requested, then look up the
2479 * connection. If the remote device is connected, it
2480 * will be later used to terminate the link.
2482 * Setting it to NULL explicitly will cause no
2483 * termination of the link.
2486 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2491 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2493 err = mgmt_cmd_complete(sk, hdev->id,
2494 MGMT_OP_UNPAIR_DEVICE,
2495 MGMT_STATUS_NOT_PAIRED, &rp,
2503 /* LE address type */
2504 addr_type = le_addr_type(cp->addr.type);
2506 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2507 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2509 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2510 MGMT_STATUS_NOT_PAIRED, &rp,
2515 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2517 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2522 /* Defer clearing up the connection parameters until closing to
2523 * give a chance of keeping them if a repairing happens.
2525 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2527 /* Disable auto-connection parameters if present */
2528 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2530 if (params->explicit_connect)
2531 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2533 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2536 /* If disconnection is not requested, then clear the connection
2537 * variable so that the link is not terminated.
2539 if (!cp->disconnect)
2543 /* If the connection variable is set, then termination of the
2544 * link is requested.
2547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2549 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2553 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2560 cmd->cmd_complete = addr_cmd_complete;
2562 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2564 mgmt_pending_remove(cmd);
2567 hci_dev_unlock(hdev);
2571 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2574 struct mgmt_cp_disconnect *cp = data;
2575 struct mgmt_rp_disconnect rp;
2576 struct mgmt_pending_cmd *cmd;
2577 struct hci_conn *conn;
2580 bt_dev_dbg(hdev, "sock %p", sk);
2582 memset(&rp, 0, sizeof(rp));
2583 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2584 rp.addr.type = cp->addr.type;
2586 if (!bdaddr_type_is_valid(cp->addr.type))
2587 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2588 MGMT_STATUS_INVALID_PARAMS,
2593 if (!test_bit(HCI_UP, &hdev->flags)) {
2594 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2595 MGMT_STATUS_NOT_POWERED, &rp,
2600 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2601 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2602 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2606 if (cp->addr.type == BDADDR_BREDR)
2607 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2610 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2611 le_addr_type(cp->addr.type));
2613 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2614 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2615 MGMT_STATUS_NOT_CONNECTED, &rp,
2620 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2626 cmd->cmd_complete = generic_cmd_complete;
2628 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2630 mgmt_pending_remove(cmd);
2633 hci_dev_unlock(hdev);
2637 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2639 switch (link_type) {
2641 switch (addr_type) {
2642 case ADDR_LE_DEV_PUBLIC:
2643 return BDADDR_LE_PUBLIC;
2646 /* Fallback to LE Random address type */
2647 return BDADDR_LE_RANDOM;
2651 /* Fallback to BR/EDR type */
2652 return BDADDR_BREDR;
2656 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2659 struct mgmt_rp_get_connections *rp;
2664 bt_dev_dbg(hdev, "sock %p", sk);
2668 if (!hdev_is_powered(hdev)) {
2669 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2670 MGMT_STATUS_NOT_POWERED);
2675 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2676 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2680 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2687 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2688 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2690 bacpy(&rp->addr[i].bdaddr, &c->dst);
2691 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2692 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2697 rp->conn_count = cpu_to_le16(i);
2699 /* Recalculate length in case of filtered SCO connections, etc */
2700 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2701 struct_size(rp, addr, i));
2706 hci_dev_unlock(hdev);
2710 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2711 struct mgmt_cp_pin_code_neg_reply *cp)
2713 struct mgmt_pending_cmd *cmd;
2716 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2721 cmd->cmd_complete = addr_cmd_complete;
2723 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2724 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2726 mgmt_pending_remove(cmd);
2731 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2734 struct hci_conn *conn;
2735 struct mgmt_cp_pin_code_reply *cp = data;
2736 struct hci_cp_pin_code_reply reply;
2737 struct mgmt_pending_cmd *cmd;
2740 bt_dev_dbg(hdev, "sock %p", sk);
2744 if (!hdev_is_powered(hdev)) {
2745 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2746 MGMT_STATUS_NOT_POWERED);
2750 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2752 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2753 MGMT_STATUS_NOT_CONNECTED);
2757 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2758 struct mgmt_cp_pin_code_neg_reply ncp;
2760 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2762 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2764 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2766 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2767 MGMT_STATUS_INVALID_PARAMS);
2772 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2778 cmd->cmd_complete = addr_cmd_complete;
2780 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2781 reply.pin_len = cp->pin_len;
2782 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2784 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2786 mgmt_pending_remove(cmd);
2789 hci_dev_unlock(hdev);
2793 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2796 struct mgmt_cp_set_io_capability *cp = data;
2798 bt_dev_dbg(hdev, "sock %p", sk);
2800 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2801 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2802 MGMT_STATUS_INVALID_PARAMS);
2806 hdev->io_capability = cp->io_capability;
2808 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2810 hci_dev_unlock(hdev);
2812 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2816 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2818 struct hci_dev *hdev = conn->hdev;
2819 struct mgmt_pending_cmd *cmd;
2821 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2822 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2825 if (cmd->user_data != conn)
2834 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2836 struct mgmt_rp_pair_device rp;
2837 struct hci_conn *conn = cmd->user_data;
2840 bacpy(&rp.addr.bdaddr, &conn->dst);
2841 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2843 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2844 status, &rp, sizeof(rp));
2846 /* So we don't get further callbacks for this connection */
2847 conn->connect_cfm_cb = NULL;
2848 conn->security_cfm_cb = NULL;
2849 conn->disconn_cfm_cb = NULL;
2851 hci_conn_drop(conn);
2853 /* The device is paired so there is no need to remove
2854 * its connection parameters anymore.
2856 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2863 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2865 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2866 struct mgmt_pending_cmd *cmd;
2868 cmd = find_pairing(conn);
2870 cmd->cmd_complete(cmd, status);
2871 mgmt_pending_remove(cmd);
2875 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2877 struct mgmt_pending_cmd *cmd;
2879 BT_DBG("status %u", status);
2881 cmd = find_pairing(conn);
2883 BT_DBG("Unable to find a pending command");
2887 cmd->cmd_complete(cmd, mgmt_status(status));
2888 mgmt_pending_remove(cmd);
2891 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2893 struct mgmt_pending_cmd *cmd;
2895 BT_DBG("status %u", status);
2900 cmd = find_pairing(conn);
2902 BT_DBG("Unable to find a pending command");
2906 cmd->cmd_complete(cmd, mgmt_status(status));
2907 mgmt_pending_remove(cmd);
2910 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2913 struct mgmt_cp_pair_device *cp = data;
2914 struct mgmt_rp_pair_device rp;
2915 struct mgmt_pending_cmd *cmd;
2916 u8 sec_level, auth_type;
2917 struct hci_conn *conn;
2920 bt_dev_dbg(hdev, "sock %p", sk);
2922 memset(&rp, 0, sizeof(rp));
2923 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2924 rp.addr.type = cp->addr.type;
2926 if (!bdaddr_type_is_valid(cp->addr.type))
2927 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2928 MGMT_STATUS_INVALID_PARAMS,
2931 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2932 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2933 MGMT_STATUS_INVALID_PARAMS,
2938 if (!hdev_is_powered(hdev)) {
2939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2940 MGMT_STATUS_NOT_POWERED, &rp,
2945 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2946 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2947 MGMT_STATUS_ALREADY_PAIRED, &rp,
2952 sec_level = BT_SECURITY_MEDIUM;
2953 auth_type = HCI_AT_DEDICATED_BONDING;
2955 if (cp->addr.type == BDADDR_BREDR) {
2956 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2957 auth_type, CONN_REASON_PAIR_DEVICE);
2959 u8 addr_type = le_addr_type(cp->addr.type);
2960 struct hci_conn_params *p;
2962 /* When pairing a new device, it is expected to remember
2963 * this device for future connections. Adding the connection
2964 * parameter information ahead of time allows tracking
2965 * of the peripheral preferred values and will speed up any
2966 * further connection establishment.
2968 * If connection parameters already exist, then they
2969 * will be kept and this function does nothing.
2971 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2973 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2974 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2976 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2977 sec_level, HCI_LE_CONN_TIMEOUT,
2978 CONN_REASON_PAIR_DEVICE);
2984 if (PTR_ERR(conn) == -EBUSY)
2985 status = MGMT_STATUS_BUSY;
2986 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2987 status = MGMT_STATUS_NOT_SUPPORTED;
2988 else if (PTR_ERR(conn) == -ECONNREFUSED)
2989 status = MGMT_STATUS_REJECTED;
2991 status = MGMT_STATUS_CONNECT_FAILED;
2993 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2994 status, &rp, sizeof(rp));
2998 if (conn->connect_cfm_cb) {
2999 hci_conn_drop(conn);
3000 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3001 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3005 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3008 hci_conn_drop(conn);
3012 cmd->cmd_complete = pairing_complete;
3014 /* For LE, just connecting isn't a proof that the pairing finished */
3015 if (cp->addr.type == BDADDR_BREDR) {
3016 conn->connect_cfm_cb = pairing_complete_cb;
3017 conn->security_cfm_cb = pairing_complete_cb;
3018 conn->disconn_cfm_cb = pairing_complete_cb;
3020 conn->connect_cfm_cb = le_pairing_complete_cb;
3021 conn->security_cfm_cb = le_pairing_complete_cb;
3022 conn->disconn_cfm_cb = le_pairing_complete_cb;
3025 conn->io_capability = cp->io_cap;
3026 cmd->user_data = hci_conn_get(conn);
3028 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3029 hci_conn_security(conn, sec_level, auth_type, true)) {
3030 cmd->cmd_complete(cmd, 0);
3031 mgmt_pending_remove(cmd);
3037 hci_dev_unlock(hdev);
3041 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3044 struct mgmt_addr_info *addr = data;
3045 struct mgmt_pending_cmd *cmd;
3046 struct hci_conn *conn;
3049 bt_dev_dbg(hdev, "sock %p", sk);
3053 if (!hdev_is_powered(hdev)) {
3054 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3055 MGMT_STATUS_NOT_POWERED);
3059 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3061 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3062 MGMT_STATUS_INVALID_PARAMS);
3066 conn = cmd->user_data;
3068 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3069 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3070 MGMT_STATUS_INVALID_PARAMS);
3074 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3075 mgmt_pending_remove(cmd);
3077 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3078 addr, sizeof(*addr));
3080 /* Since user doesn't want to proceed with the connection, abort any
3081 * ongoing pairing and then terminate the link if it was created
3082 * because of the pair device action.
3084 if (addr->type == BDADDR_BREDR)
3085 hci_remove_link_key(hdev, &addr->bdaddr);
3087 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3088 le_addr_type(addr->type));
3090 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3091 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3094 hci_dev_unlock(hdev);
3098 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3099 struct mgmt_addr_info *addr, u16 mgmt_op,
3100 u16 hci_op, __le32 passkey)
3102 struct mgmt_pending_cmd *cmd;
3103 struct hci_conn *conn;
3108 if (!hdev_is_powered(hdev)) {
3109 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3110 MGMT_STATUS_NOT_POWERED, addr,
3115 if (addr->type == BDADDR_BREDR)
3116 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3118 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3119 le_addr_type(addr->type));
3122 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3123 MGMT_STATUS_NOT_CONNECTED, addr,
3128 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3129 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3131 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3132 MGMT_STATUS_SUCCESS, addr,
3135 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3136 MGMT_STATUS_FAILED, addr,
3142 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3148 cmd->cmd_complete = addr_cmd_complete;
3150 /* Continue with pairing via HCI */
3151 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3152 struct hci_cp_user_passkey_reply cp;
3154 bacpy(&cp.bdaddr, &addr->bdaddr);
3155 cp.passkey = passkey;
3156 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3158 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3162 mgmt_pending_remove(cmd);
3165 hci_dev_unlock(hdev);
3169 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3170 void *data, u16 len)
3172 struct mgmt_cp_pin_code_neg_reply *cp = data;
3174 bt_dev_dbg(hdev, "sock %p", sk);
3176 return user_pairing_resp(sk, hdev, &cp->addr,
3177 MGMT_OP_PIN_CODE_NEG_REPLY,
3178 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3181 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3184 struct mgmt_cp_user_confirm_reply *cp = data;
3186 bt_dev_dbg(hdev, "sock %p", sk);
3188 if (len != sizeof(*cp))
3189 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3190 MGMT_STATUS_INVALID_PARAMS);
3192 return user_pairing_resp(sk, hdev, &cp->addr,
3193 MGMT_OP_USER_CONFIRM_REPLY,
3194 HCI_OP_USER_CONFIRM_REPLY, 0);
3197 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3198 void *data, u16 len)
3200 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3202 bt_dev_dbg(hdev, "sock %p", sk);
3204 return user_pairing_resp(sk, hdev, &cp->addr,
3205 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3206 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3209 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3212 struct mgmt_cp_user_passkey_reply *cp = data;
3214 bt_dev_dbg(hdev, "sock %p", sk);
3216 return user_pairing_resp(sk, hdev, &cp->addr,
3217 MGMT_OP_USER_PASSKEY_REPLY,
3218 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3221 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3222 void *data, u16 len)
3224 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3226 bt_dev_dbg(hdev, "sock %p", sk);
3228 return user_pairing_resp(sk, hdev, &cp->addr,
3229 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3230 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3233 static void adv_expire(struct hci_dev *hdev, u32 flags)
3235 struct adv_info *adv_instance;
3236 struct hci_request req;
3239 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3243 /* stop if current instance doesn't need to be changed */
3244 if (!(adv_instance->flags & flags))
3247 cancel_adv_timeout(hdev);
3249 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3253 hci_req_init(&req, hdev);
3254 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3259 hci_req_run(&req, NULL);
3262 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3264 struct mgmt_cp_set_local_name *cp;
3265 struct mgmt_pending_cmd *cmd;
3267 bt_dev_dbg(hdev, "status 0x%02x", status);
3271 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3278 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3279 mgmt_status(status));
3281 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3284 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3285 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3288 mgmt_pending_remove(cmd);
3291 hci_dev_unlock(hdev);
3294 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3297 struct mgmt_cp_set_local_name *cp = data;
3298 struct mgmt_pending_cmd *cmd;
3299 struct hci_request req;
3302 bt_dev_dbg(hdev, "sock %p", sk);
3306 /* If the old values are the same as the new ones just return a
3307 * direct command complete event.
3309 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3310 !memcmp(hdev->short_name, cp->short_name,
3311 sizeof(hdev->short_name))) {
3312 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3317 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3319 if (!hdev_is_powered(hdev)) {
3320 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3322 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3327 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3328 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3329 ext_info_changed(hdev, sk);
3334 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3340 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3342 hci_req_init(&req, hdev);
3344 if (lmp_bredr_capable(hdev)) {
3345 __hci_req_update_name(&req);
3346 __hci_req_update_eir(&req);
3349 /* The name is stored in the scan response data and so
3350 * no need to update the advertising data here.
3352 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3353 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3355 err = hci_req_run(&req, set_name_complete);
3357 mgmt_pending_remove(cmd);
3360 hci_dev_unlock(hdev);
3364 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3367 struct mgmt_cp_set_appearance *cp = data;
3371 bt_dev_dbg(hdev, "sock %p", sk);
3373 if (!lmp_le_capable(hdev))
3374 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3375 MGMT_STATUS_NOT_SUPPORTED);
3377 appearance = le16_to_cpu(cp->appearance);
3381 if (hdev->appearance != appearance) {
3382 hdev->appearance = appearance;
3384 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3385 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3387 ext_info_changed(hdev, sk);
3390 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3393 hci_dev_unlock(hdev);
3398 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3399 void *data, u16 len)
3401 struct mgmt_rp_get_phy_configuration rp;
3403 bt_dev_dbg(hdev, "sock %p", sk);
3407 memset(&rp, 0, sizeof(rp));
3409 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3410 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3411 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3413 hci_dev_unlock(hdev);
3415 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3419 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3421 struct mgmt_ev_phy_configuration_changed ev;
3423 memset(&ev, 0, sizeof(ev));
3425 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3427 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3431 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3432 u16 opcode, struct sk_buff *skb)
3434 struct mgmt_pending_cmd *cmd;
3436 bt_dev_dbg(hdev, "status 0x%02x", status);
3440 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3445 mgmt_cmd_status(cmd->sk, hdev->id,
3446 MGMT_OP_SET_PHY_CONFIGURATION,
3447 mgmt_status(status));
3449 mgmt_cmd_complete(cmd->sk, hdev->id,
3450 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3453 mgmt_phy_configuration_changed(hdev, cmd->sk);
3456 mgmt_pending_remove(cmd);
3459 hci_dev_unlock(hdev);
3462 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3463 void *data, u16 len)
3465 struct mgmt_cp_set_phy_configuration *cp = data;
3466 struct hci_cp_le_set_default_phy cp_phy;
3467 struct mgmt_pending_cmd *cmd;
3468 struct hci_request req;
3469 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3470 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3471 bool changed = false;
3474 bt_dev_dbg(hdev, "sock %p", sk);
3476 configurable_phys = get_configurable_phys(hdev);
3477 supported_phys = get_supported_phys(hdev);
3478 selected_phys = __le32_to_cpu(cp->selected_phys);
3480 if (selected_phys & ~supported_phys)
3481 return mgmt_cmd_status(sk, hdev->id,
3482 MGMT_OP_SET_PHY_CONFIGURATION,
3483 MGMT_STATUS_INVALID_PARAMS);
3485 unconfigure_phys = supported_phys & ~configurable_phys;
3487 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3488 return mgmt_cmd_status(sk, hdev->id,
3489 MGMT_OP_SET_PHY_CONFIGURATION,
3490 MGMT_STATUS_INVALID_PARAMS);
3492 if (selected_phys == get_selected_phys(hdev))
3493 return mgmt_cmd_complete(sk, hdev->id,
3494 MGMT_OP_SET_PHY_CONFIGURATION,
3499 if (!hdev_is_powered(hdev)) {
3500 err = mgmt_cmd_status(sk, hdev->id,
3501 MGMT_OP_SET_PHY_CONFIGURATION,
3502 MGMT_STATUS_REJECTED);
3506 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3507 err = mgmt_cmd_status(sk, hdev->id,
3508 MGMT_OP_SET_PHY_CONFIGURATION,
3513 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3514 pkt_type |= (HCI_DH3 | HCI_DM3);
3516 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3518 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3519 pkt_type |= (HCI_DH5 | HCI_DM5);
3521 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3523 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3524 pkt_type &= ~HCI_2DH1;
3526 pkt_type |= HCI_2DH1;
3528 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3529 pkt_type &= ~HCI_2DH3;
3531 pkt_type |= HCI_2DH3;
3533 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3534 pkt_type &= ~HCI_2DH5;
3536 pkt_type |= HCI_2DH5;
3538 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3539 pkt_type &= ~HCI_3DH1;
3541 pkt_type |= HCI_3DH1;
3543 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3544 pkt_type &= ~HCI_3DH3;
3546 pkt_type |= HCI_3DH3;
3548 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3549 pkt_type &= ~HCI_3DH5;
3551 pkt_type |= HCI_3DH5;
3553 if (pkt_type != hdev->pkt_type) {
3554 hdev->pkt_type = pkt_type;
3558 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3559 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3561 mgmt_phy_configuration_changed(hdev, sk);
3563 err = mgmt_cmd_complete(sk, hdev->id,
3564 MGMT_OP_SET_PHY_CONFIGURATION,
3570 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3577 hci_req_init(&req, hdev);
3579 memset(&cp_phy, 0, sizeof(cp_phy));
3581 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3582 cp_phy.all_phys |= 0x01;
3584 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3585 cp_phy.all_phys |= 0x02;
3587 if (selected_phys & MGMT_PHY_LE_1M_TX)
3588 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3590 if (selected_phys & MGMT_PHY_LE_2M_TX)
3591 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3593 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3594 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3596 if (selected_phys & MGMT_PHY_LE_1M_RX)
3597 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3599 if (selected_phys & MGMT_PHY_LE_2M_RX)
3600 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3602 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3603 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3605 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3607 err = hci_req_run_skb(&req, set_default_phy_complete);
3609 mgmt_pending_remove(cmd);
3612 hci_dev_unlock(hdev);
3617 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3620 int err = MGMT_STATUS_SUCCESS;
3621 struct mgmt_cp_set_blocked_keys *keys = data;
3622 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3623 sizeof(struct mgmt_blocked_key_info));
3624 u16 key_count, expected_len;
3627 bt_dev_dbg(hdev, "sock %p", sk);
3629 key_count = __le16_to_cpu(keys->key_count);
3630 if (key_count > max_key_count) {
3631 bt_dev_err(hdev, "too big key_count value %u", key_count);
3632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3633 MGMT_STATUS_INVALID_PARAMS);
3636 expected_len = struct_size(keys, keys, key_count);
3637 if (expected_len != len) {
3638 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3641 MGMT_STATUS_INVALID_PARAMS);
3646 hci_blocked_keys_clear(hdev);
3648 for (i = 0; i < keys->key_count; ++i) {
3649 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3652 err = MGMT_STATUS_NO_RESOURCES;
3656 b->type = keys->keys[i].type;
3657 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3658 list_add_rcu(&b->list, &hdev->blocked_keys);
3660 hci_dev_unlock(hdev);
3662 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3666 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3667 void *data, u16 len)
3669 struct mgmt_mode *cp = data;
3671 bool changed = false;
3673 bt_dev_dbg(hdev, "sock %p", sk);
3675 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3676 return mgmt_cmd_status(sk, hdev->id,
3677 MGMT_OP_SET_WIDEBAND_SPEECH,
3678 MGMT_STATUS_NOT_SUPPORTED);
3680 if (cp->val != 0x00 && cp->val != 0x01)
3681 return mgmt_cmd_status(sk, hdev->id,
3682 MGMT_OP_SET_WIDEBAND_SPEECH,
3683 MGMT_STATUS_INVALID_PARAMS);
3687 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3688 err = mgmt_cmd_status(sk, hdev->id,
3689 MGMT_OP_SET_WIDEBAND_SPEECH,
3694 if (hdev_is_powered(hdev) &&
3695 !!cp->val != hci_dev_test_flag(hdev,
3696 HCI_WIDEBAND_SPEECH_ENABLED)) {
3697 err = mgmt_cmd_status(sk, hdev->id,
3698 MGMT_OP_SET_WIDEBAND_SPEECH,
3699 MGMT_STATUS_REJECTED);
3704 changed = !hci_dev_test_and_set_flag(hdev,
3705 HCI_WIDEBAND_SPEECH_ENABLED);
3707 changed = hci_dev_test_and_clear_flag(hdev,
3708 HCI_WIDEBAND_SPEECH_ENABLED);
3710 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3715 err = new_settings(hdev, sk);
3718 hci_dev_unlock(hdev);
3722 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3723 void *data, u16 data_len)
3726 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3729 u8 tx_power_range[2];
3731 bt_dev_dbg(hdev, "sock %p", sk);
3733 memset(&buf, 0, sizeof(buf));
3737 /* When the Read Simple Pairing Options command is supported, then
3738 * the remote public key validation is supported.
3740 * Alternatively, when Microsoft extensions are available, they can
3741 * indicate support for public key validation as well.
3743 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3744 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3746 flags |= 0x02; /* Remote public key validation (LE) */
3748 /* When the Read Encryption Key Size command is supported, then the
3749 * encryption key size is enforced.
3751 if (hdev->commands[20] & 0x10)
3752 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3754 flags |= 0x08; /* Encryption key size enforcement (LE) */
3756 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3759 /* When the Read Simple Pairing Options command is supported, then
3760 * also max encryption key size information is provided.
3762 if (hdev->commands[41] & 0x08)
3763 cap_len = eir_append_le16(rp->cap, cap_len,
3764 MGMT_CAP_MAX_ENC_KEY_SIZE,
3765 hdev->max_enc_key_size);
3767 cap_len = eir_append_le16(rp->cap, cap_len,
3768 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3769 SMP_MAX_ENC_KEY_SIZE);
3771 /* Append the min/max LE tx power parameters if we were able to fetch
3772 * it from the controller
3774 if (hdev->commands[38] & 0x80) {
3775 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3776 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3777 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3781 rp->cap_len = cpu_to_le16(cap_len);
3783 hci_dev_unlock(hdev);
3785 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3786 rp, sizeof(*rp) + cap_len);
3789 #ifdef CONFIG_BT_FEATURE_DEBUG
3790 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3791 static const u8 debug_uuid[16] = {
3792 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3793 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3797 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3798 static const u8 simult_central_periph_uuid[16] = {
3799 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3800 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3803 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3804 static const u8 rpa_resolution_uuid[16] = {
3805 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3806 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3809 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3810 void *data, u16 data_len)
3812 char buf[62]; /* Enough space for 3 features */
3813 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3817 bt_dev_dbg(hdev, "sock %p", sk);
3819 memset(&buf, 0, sizeof(buf));
3821 #ifdef CONFIG_BT_FEATURE_DEBUG
3823 flags = bt_dbg_get() ? BIT(0) : 0;
3825 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3826 rp->features[idx].flags = cpu_to_le32(flags);
3832 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3833 (hdev->le_states[4] & 0x08) && /* Central */
3834 (hdev->le_states[4] & 0x40) && /* Peripheral */
3835 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3840 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3841 rp->features[idx].flags = cpu_to_le32(flags);
3845 if (hdev && use_ll_privacy(hdev)) {
3846 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3847 flags = BIT(0) | BIT(1);
3851 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3852 rp->features[idx].flags = cpu_to_le32(flags);
3856 rp->feature_count = cpu_to_le16(idx);
3858 /* After reading the experimental features information, enable
3859 * the events to update client on any future change.
3861 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3863 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3864 MGMT_OP_READ_EXP_FEATURES_INFO,
3865 0, rp, sizeof(*rp) + (20 * idx));
3868 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3871 struct mgmt_ev_exp_feature_changed ev;
3873 memset(&ev, 0, sizeof(ev));
3874 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3875 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3877 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3879 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3883 #ifdef CONFIG_BT_FEATURE_DEBUG
3884 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3886 struct mgmt_ev_exp_feature_changed ev;
3888 memset(&ev, 0, sizeof(ev));
3889 memcpy(ev.uuid, debug_uuid, 16);
3890 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3892 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3894 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3898 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3899 void *data, u16 data_len)
3901 struct mgmt_cp_set_exp_feature *cp = data;
3902 struct mgmt_rp_set_exp_feature rp;
3904 bt_dev_dbg(hdev, "sock %p", sk);
3906 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3907 memset(rp.uuid, 0, 16);
3908 rp.flags = cpu_to_le32(0);
3910 #ifdef CONFIG_BT_FEATURE_DEBUG
3912 bool changed = bt_dbg_get();
3917 exp_debug_feature_changed(false, sk);
3921 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3922 bool changed = hci_dev_test_flag(hdev,
3923 HCI_ENABLE_LL_PRIVACY);
3925 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3928 exp_ll_privacy_feature_changed(false, hdev, sk);
3931 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3933 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3934 MGMT_OP_SET_EXP_FEATURE, 0,
3938 #ifdef CONFIG_BT_FEATURE_DEBUG
3939 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3943 /* Command requires to use the non-controller index */
3945 return mgmt_cmd_status(sk, hdev->id,
3946 MGMT_OP_SET_EXP_FEATURE,
3947 MGMT_STATUS_INVALID_INDEX);
3949 /* Parameters are limited to a single octet */
3950 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3951 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3952 MGMT_OP_SET_EXP_FEATURE,
3953 MGMT_STATUS_INVALID_PARAMS);
3955 /* Only boolean on/off is supported */
3956 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3957 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3958 MGMT_OP_SET_EXP_FEATURE,
3959 MGMT_STATUS_INVALID_PARAMS);
3961 val = !!cp->param[0];
3962 changed = val ? !bt_dbg_get() : bt_dbg_get();
3965 memcpy(rp.uuid, debug_uuid, 16);
3966 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3968 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3970 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3971 MGMT_OP_SET_EXP_FEATURE, 0,
3975 exp_debug_feature_changed(val, sk);
3981 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3986 /* Command requires to use the controller index */
3988 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3989 MGMT_OP_SET_EXP_FEATURE,
3990 MGMT_STATUS_INVALID_INDEX);
3992 /* Changes can only be made when controller is powered down */
3993 if (hdev_is_powered(hdev))
3994 return mgmt_cmd_status(sk, hdev->id,
3995 MGMT_OP_SET_EXP_FEATURE,
3996 MGMT_STATUS_REJECTED);
3998 /* Parameters are limited to a single octet */
3999 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4000 return mgmt_cmd_status(sk, hdev->id,
4001 MGMT_OP_SET_EXP_FEATURE,
4002 MGMT_STATUS_INVALID_PARAMS);
4004 /* Only boolean on/off is supported */
4005 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4006 return mgmt_cmd_status(sk, hdev->id,
4007 MGMT_OP_SET_EXP_FEATURE,
4008 MGMT_STATUS_INVALID_PARAMS);
4010 val = !!cp->param[0];
4013 changed = !hci_dev_test_flag(hdev,
4014 HCI_ENABLE_LL_PRIVACY);
4015 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4016 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4018 /* Enable LL privacy + supported settings changed */
4019 flags = BIT(0) | BIT(1);
4021 changed = hci_dev_test_flag(hdev,
4022 HCI_ENABLE_LL_PRIVACY);
4023 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4025 /* Disable LL privacy + supported settings changed */
4029 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4030 rp.flags = cpu_to_le32(flags);
4032 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4034 err = mgmt_cmd_complete(sk, hdev->id,
4035 MGMT_OP_SET_EXP_FEATURE, 0,
4039 exp_ll_privacy_feature_changed(val, hdev, sk);
4044 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4045 MGMT_OP_SET_EXP_FEATURE,
4046 MGMT_STATUS_NOT_SUPPORTED);
4049 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4051 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4054 struct mgmt_cp_get_device_flags *cp = data;
4055 struct mgmt_rp_get_device_flags rp;
4056 struct bdaddr_list_with_flags *br_params;
4057 struct hci_conn_params *params;
4058 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4059 u32 current_flags = 0;
4060 u8 status = MGMT_STATUS_INVALID_PARAMS;
4062 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4063 &cp->addr.bdaddr, cp->addr.type);
4067 memset(&rp, 0, sizeof(rp));
4069 if (cp->addr.type == BDADDR_BREDR) {
4070 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4076 current_flags = br_params->current_flags;
4078 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4079 le_addr_type(cp->addr.type));
4084 current_flags = params->current_flags;
4087 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4088 rp.addr.type = cp->addr.type;
4089 rp.supported_flags = cpu_to_le32(supported_flags);
4090 rp.current_flags = cpu_to_le32(current_flags);
4092 status = MGMT_STATUS_SUCCESS;
4095 hci_dev_unlock(hdev);
4097 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4101 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4102 bdaddr_t *bdaddr, u8 bdaddr_type,
4103 u32 supported_flags, u32 current_flags)
4105 struct mgmt_ev_device_flags_changed ev;
4107 bacpy(&ev.addr.bdaddr, bdaddr);
4108 ev.addr.type = bdaddr_type;
4109 ev.supported_flags = cpu_to_le32(supported_flags);
4110 ev.current_flags = cpu_to_le32(current_flags);
4112 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4115 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4118 struct mgmt_cp_set_device_flags *cp = data;
4119 struct bdaddr_list_with_flags *br_params;
4120 struct hci_conn_params *params;
4121 u8 status = MGMT_STATUS_INVALID_PARAMS;
4122 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4123 u32 current_flags = __le32_to_cpu(cp->current_flags);
4125 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4126 &cp->addr.bdaddr, cp->addr.type,
4127 __le32_to_cpu(current_flags));
4129 if ((supported_flags | current_flags) != supported_flags) {
4130 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4131 current_flags, supported_flags);
4137 if (cp->addr.type == BDADDR_BREDR) {
4138 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4143 br_params->current_flags = current_flags;
4144 status = MGMT_STATUS_SUCCESS;
4146 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4147 &cp->addr.bdaddr, cp->addr.type);
4150 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4151 le_addr_type(cp->addr.type));
4153 params->current_flags = current_flags;
4154 status = MGMT_STATUS_SUCCESS;
4156 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4158 le_addr_type(cp->addr.type));
4163 hci_dev_unlock(hdev);
4165 if (status == MGMT_STATUS_SUCCESS)
4166 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4167 supported_flags, current_flags);
4169 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4170 &cp->addr, sizeof(cp->addr));
4173 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4176 struct mgmt_ev_adv_monitor_added ev;
4178 ev.monitor_handle = cpu_to_le16(handle);
4180 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4183 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4185 struct mgmt_ev_adv_monitor_removed ev;
4186 struct mgmt_pending_cmd *cmd;
4187 struct sock *sk_skip = NULL;
4188 struct mgmt_cp_remove_adv_monitor *cp;
4190 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4194 if (cp->monitor_handle)
4198 ev.monitor_handle = cpu_to_le16(handle);
4200 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4203 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4204 void *data, u16 len)
4206 struct adv_monitor *monitor = NULL;
4207 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4210 __u32 supported = 0;
4212 __u16 num_handles = 0;
4213 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4215 BT_DBG("request for %s", hdev->name);
4219 if (msft_monitor_supported(hdev))
4220 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4222 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4223 handles[num_handles++] = monitor->handle;
4225 hci_dev_unlock(hdev);
4227 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4228 rp = kmalloc(rp_size, GFP_KERNEL);
4232 /* All supported features are currently enabled */
4233 enabled = supported;
4235 rp->supported_features = cpu_to_le32(supported);
4236 rp->enabled_features = cpu_to_le32(enabled);
4237 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4238 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4239 rp->num_handles = cpu_to_le16(num_handles);
4241 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4243 err = mgmt_cmd_complete(sk, hdev->id,
4244 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4245 MGMT_STATUS_SUCCESS, rp, rp_size);
4252 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4254 struct mgmt_rp_add_adv_patterns_monitor rp;
4255 struct mgmt_pending_cmd *cmd;
4256 struct adv_monitor *monitor;
4261 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4263 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4268 monitor = cmd->user_data;
4269 rp.monitor_handle = cpu_to_le16(monitor->handle);
4272 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4273 hdev->adv_monitors_cnt++;
4274 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4275 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4276 hci_update_background_scan(hdev);
4279 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4280 mgmt_status(status), &rp, sizeof(rp));
4281 mgmt_pending_remove(cmd);
4284 hci_dev_unlock(hdev);
4285 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4286 rp.monitor_handle, status);
4291 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4292 struct adv_monitor *m, u8 status,
4293 void *data, u16 len, u16 op)
4295 struct mgmt_rp_add_adv_patterns_monitor rp;
4296 struct mgmt_pending_cmd *cmd;
4305 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4306 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4307 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4308 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4309 status = MGMT_STATUS_BUSY;
4313 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4315 status = MGMT_STATUS_NO_RESOURCES;
4320 pending = hci_add_adv_monitor(hdev, m, &err);
4322 if (err == -ENOSPC || err == -ENOMEM)
4323 status = MGMT_STATUS_NO_RESOURCES;
4324 else if (err == -EINVAL)
4325 status = MGMT_STATUS_INVALID_PARAMS;
4327 status = MGMT_STATUS_FAILED;
4329 mgmt_pending_remove(cmd);
4334 mgmt_pending_remove(cmd);
4335 rp.monitor_handle = cpu_to_le16(m->handle);
4336 mgmt_adv_monitor_added(sk, hdev, m->handle);
4337 m->state = ADV_MONITOR_STATE_REGISTERED;
4338 hdev->adv_monitors_cnt++;
4340 hci_dev_unlock(hdev);
4341 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4345 hci_dev_unlock(hdev);
4350 hci_free_adv_monitor(hdev, m);
4351 hci_dev_unlock(hdev);
4352 return mgmt_cmd_status(sk, hdev->id, op, status);
4355 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4356 struct mgmt_adv_rssi_thresholds *rssi)
4359 m->rssi.low_threshold = rssi->low_threshold;
4360 m->rssi.low_threshold_timeout =
4361 __le16_to_cpu(rssi->low_threshold_timeout);
4362 m->rssi.high_threshold = rssi->high_threshold;
4363 m->rssi.high_threshold_timeout =
4364 __le16_to_cpu(rssi->high_threshold_timeout);
4365 m->rssi.sampling_period = rssi->sampling_period;
4367 /* Default values. These numbers are the least constricting
4368 * parameters for MSFT API to work, so it behaves as if there
4369 * are no rssi parameter to consider. May need to be changed
4370 * if other API are to be supported.
4372 m->rssi.low_threshold = -127;
4373 m->rssi.low_threshold_timeout = 60;
4374 m->rssi.high_threshold = -127;
4375 m->rssi.high_threshold_timeout = 0;
4376 m->rssi.sampling_period = 0;
4380 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4381 struct mgmt_adv_pattern *patterns)
4383 u8 offset = 0, length = 0;
4384 struct adv_pattern *p = NULL;
4387 for (i = 0; i < pattern_count; i++) {
4388 offset = patterns[i].offset;
4389 length = patterns[i].length;
4390 if (offset >= HCI_MAX_AD_LENGTH ||
4391 length > HCI_MAX_AD_LENGTH ||
4392 (offset + length) > HCI_MAX_AD_LENGTH)
4393 return MGMT_STATUS_INVALID_PARAMS;
4395 p = kmalloc(sizeof(*p), GFP_KERNEL);
4397 return MGMT_STATUS_NO_RESOURCES;
4399 p->ad_type = patterns[i].ad_type;
4400 p->offset = patterns[i].offset;
4401 p->length = patterns[i].length;
4402 memcpy(p->value, patterns[i].value, p->length);
4404 INIT_LIST_HEAD(&p->list);
4405 list_add(&p->list, &m->patterns);
4408 return MGMT_STATUS_SUCCESS;
4411 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4412 void *data, u16 len)
4414 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4415 struct adv_monitor *m = NULL;
4416 u8 status = MGMT_STATUS_SUCCESS;
4417 size_t expected_size = sizeof(*cp);
4419 BT_DBG("request for %s", hdev->name);
4421 if (len <= sizeof(*cp)) {
4422 status = MGMT_STATUS_INVALID_PARAMS;
4426 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4427 if (len != expected_size) {
4428 status = MGMT_STATUS_INVALID_PARAMS;
4432 m = kzalloc(sizeof(*m), GFP_KERNEL);
4434 status = MGMT_STATUS_NO_RESOURCES;
4438 INIT_LIST_HEAD(&m->patterns);
4440 parse_adv_monitor_rssi(m, NULL);
4441 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4444 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4445 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4448 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4449 void *data, u16 len)
4451 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4452 struct adv_monitor *m = NULL;
4453 u8 status = MGMT_STATUS_SUCCESS;
4454 size_t expected_size = sizeof(*cp);
4456 BT_DBG("request for %s", hdev->name);
4458 if (len <= sizeof(*cp)) {
4459 status = MGMT_STATUS_INVALID_PARAMS;
4463 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4464 if (len != expected_size) {
4465 status = MGMT_STATUS_INVALID_PARAMS;
4469 m = kzalloc(sizeof(*m), GFP_KERNEL);
4471 status = MGMT_STATUS_NO_RESOURCES;
4475 INIT_LIST_HEAD(&m->patterns);
4477 parse_adv_monitor_rssi(m, &cp->rssi);
4478 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4481 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4482 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4485 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4487 struct mgmt_rp_remove_adv_monitor rp;
4488 struct mgmt_cp_remove_adv_monitor *cp;
4489 struct mgmt_pending_cmd *cmd;
4494 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4499 rp.monitor_handle = cp->monitor_handle;
4502 hci_update_background_scan(hdev);
4504 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4505 mgmt_status(status), &rp, sizeof(rp));
4506 mgmt_pending_remove(cmd);
4509 hci_dev_unlock(hdev);
4510 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4511 rp.monitor_handle, status);
4516 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4517 void *data, u16 len)
4519 struct mgmt_cp_remove_adv_monitor *cp = data;
4520 struct mgmt_rp_remove_adv_monitor rp;
4521 struct mgmt_pending_cmd *cmd;
4522 u16 handle = __le16_to_cpu(cp->monitor_handle);
4526 BT_DBG("request for %s", hdev->name);
4527 rp.monitor_handle = cp->monitor_handle;
4531 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4532 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4533 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4534 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4535 status = MGMT_STATUS_BUSY;
4539 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4541 status = MGMT_STATUS_NO_RESOURCES;
4546 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4548 pending = hci_remove_all_adv_monitor(hdev, &err);
4551 mgmt_pending_remove(cmd);
4554 status = MGMT_STATUS_INVALID_INDEX;
4556 status = MGMT_STATUS_FAILED;
4561 /* monitor can be removed without forwarding request to controller */
4563 mgmt_pending_remove(cmd);
4564 hci_dev_unlock(hdev);
4566 return mgmt_cmd_complete(sk, hdev->id,
4567 MGMT_OP_REMOVE_ADV_MONITOR,
4568 MGMT_STATUS_SUCCESS,
4572 hci_dev_unlock(hdev);
4576 hci_dev_unlock(hdev);
4577 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4581 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4582 u16 opcode, struct sk_buff *skb)
4584 struct mgmt_rp_read_local_oob_data mgmt_rp;
4585 size_t rp_size = sizeof(mgmt_rp);
4586 struct mgmt_pending_cmd *cmd;
4588 bt_dev_dbg(hdev, "status %u", status);
4590 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4594 if (status || !skb) {
4595 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4596 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4600 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4602 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4603 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4605 if (skb->len < sizeof(*rp)) {
4606 mgmt_cmd_status(cmd->sk, hdev->id,
4607 MGMT_OP_READ_LOCAL_OOB_DATA,
4608 MGMT_STATUS_FAILED);
4612 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4613 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4615 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4617 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4619 if (skb->len < sizeof(*rp)) {
4620 mgmt_cmd_status(cmd->sk, hdev->id,
4621 MGMT_OP_READ_LOCAL_OOB_DATA,
4622 MGMT_STATUS_FAILED);
4626 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4627 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4629 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4630 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4633 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4634 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4637 mgmt_pending_remove(cmd);
4640 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4641 void *data, u16 data_len)
4643 struct mgmt_pending_cmd *cmd;
4644 struct hci_request req;
4647 bt_dev_dbg(hdev, "sock %p", sk);
4651 if (!hdev_is_powered(hdev)) {
4652 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4653 MGMT_STATUS_NOT_POWERED);
4657 if (!lmp_ssp_capable(hdev)) {
4658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4659 MGMT_STATUS_NOT_SUPPORTED);
4663 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4669 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4675 hci_req_init(&req, hdev);
4677 if (bredr_sc_enabled(hdev))
4678 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4680 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4682 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4684 mgmt_pending_remove(cmd);
4687 hci_dev_unlock(hdev);
4691 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4692 void *data, u16 len)
4694 struct mgmt_addr_info *addr = data;
4697 bt_dev_dbg(hdev, "sock %p", sk);
4699 if (!bdaddr_type_is_valid(addr->type))
4700 return mgmt_cmd_complete(sk, hdev->id,
4701 MGMT_OP_ADD_REMOTE_OOB_DATA,
4702 MGMT_STATUS_INVALID_PARAMS,
4703 addr, sizeof(*addr));
4707 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4708 struct mgmt_cp_add_remote_oob_data *cp = data;
4711 if (cp->addr.type != BDADDR_BREDR) {
4712 err = mgmt_cmd_complete(sk, hdev->id,
4713 MGMT_OP_ADD_REMOTE_OOB_DATA,
4714 MGMT_STATUS_INVALID_PARAMS,
4715 &cp->addr, sizeof(cp->addr));
4719 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4720 cp->addr.type, cp->hash,
4721 cp->rand, NULL, NULL);
4723 status = MGMT_STATUS_FAILED;
4725 status = MGMT_STATUS_SUCCESS;
4727 err = mgmt_cmd_complete(sk, hdev->id,
4728 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4729 &cp->addr, sizeof(cp->addr));
4730 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4731 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4732 u8 *rand192, *hash192, *rand256, *hash256;
4735 if (bdaddr_type_is_le(cp->addr.type)) {
4736 /* Enforce zero-valued 192-bit parameters as
4737 * long as legacy SMP OOB isn't implemented.
4739 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4740 memcmp(cp->hash192, ZERO_KEY, 16)) {
4741 err = mgmt_cmd_complete(sk, hdev->id,
4742 MGMT_OP_ADD_REMOTE_OOB_DATA,
4743 MGMT_STATUS_INVALID_PARAMS,
4744 addr, sizeof(*addr));
4751 /* In case one of the P-192 values is set to zero,
4752 * then just disable OOB data for P-192.
4754 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4755 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4759 rand192 = cp->rand192;
4760 hash192 = cp->hash192;
4764 /* In case one of the P-256 values is set to zero, then just
4765 * disable OOB data for P-256.
4767 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4768 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4772 rand256 = cp->rand256;
4773 hash256 = cp->hash256;
4776 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4777 cp->addr.type, hash192, rand192,
4780 status = MGMT_STATUS_FAILED;
4782 status = MGMT_STATUS_SUCCESS;
4784 err = mgmt_cmd_complete(sk, hdev->id,
4785 MGMT_OP_ADD_REMOTE_OOB_DATA,
4786 status, &cp->addr, sizeof(cp->addr));
4788 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4790 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4791 MGMT_STATUS_INVALID_PARAMS);
4795 hci_dev_unlock(hdev);
4799 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4800 void *data, u16 len)
4802 struct mgmt_cp_remove_remote_oob_data *cp = data;
4806 bt_dev_dbg(hdev, "sock %p", sk);
4808 if (cp->addr.type != BDADDR_BREDR)
4809 return mgmt_cmd_complete(sk, hdev->id,
4810 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4811 MGMT_STATUS_INVALID_PARAMS,
4812 &cp->addr, sizeof(cp->addr));
4816 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4817 hci_remote_oob_data_clear(hdev);
4818 status = MGMT_STATUS_SUCCESS;
4822 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4824 status = MGMT_STATUS_INVALID_PARAMS;
4826 status = MGMT_STATUS_SUCCESS;
4829 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4830 status, &cp->addr, sizeof(cp->addr));
4832 hci_dev_unlock(hdev);
4836 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4838 struct mgmt_pending_cmd *cmd;
4840 bt_dev_dbg(hdev, "status %u", status);
4844 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4846 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4849 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4852 cmd->cmd_complete(cmd, mgmt_status(status));
4853 mgmt_pending_remove(cmd);
4856 hci_dev_unlock(hdev);
4858 /* Handle suspend notifier */
4859 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4860 hdev->suspend_tasks)) {
4861 bt_dev_dbg(hdev, "Unpaused discovery");
4862 wake_up(&hdev->suspend_wait_q);
4866 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4867 uint8_t *mgmt_status)
4870 case DISCOV_TYPE_LE:
4871 *mgmt_status = mgmt_le_support(hdev);
4875 case DISCOV_TYPE_INTERLEAVED:
4876 *mgmt_status = mgmt_le_support(hdev);
4880 case DISCOV_TYPE_BREDR:
4881 *mgmt_status = mgmt_bredr_support(hdev);
4886 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4893 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4894 u16 op, void *data, u16 len)
4896 struct mgmt_cp_start_discovery *cp = data;
4897 struct mgmt_pending_cmd *cmd;
4901 bt_dev_dbg(hdev, "sock %p", sk);
4905 if (!hdev_is_powered(hdev)) {
4906 err = mgmt_cmd_complete(sk, hdev->id, op,
4907 MGMT_STATUS_NOT_POWERED,
4908 &cp->type, sizeof(cp->type));
4912 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4913 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4914 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4915 &cp->type, sizeof(cp->type));
4919 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4920 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4921 &cp->type, sizeof(cp->type));
4925 /* Can't start discovery when it is paused */
4926 if (hdev->discovery_paused) {
4927 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4928 &cp->type, sizeof(cp->type));
4932 /* Clear the discovery filter first to free any previously
4933 * allocated memory for the UUID list.
4935 hci_discovery_filter_clear(hdev);
4937 hdev->discovery.type = cp->type;
4938 hdev->discovery.report_invalid_rssi = false;
4939 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4940 hdev->discovery.limited = true;
4942 hdev->discovery.limited = false;
4944 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4950 cmd->cmd_complete = generic_cmd_complete;
4952 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4953 queue_work(hdev->req_workqueue, &hdev->discov_update);
4957 hci_dev_unlock(hdev);
4961 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4962 void *data, u16 len)
4964 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4968 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4969 void *data, u16 len)
4971 return start_discovery_internal(sk, hdev,
4972 MGMT_OP_START_LIMITED_DISCOVERY,
4976 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4979 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4983 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4984 void *data, u16 len)
4986 struct mgmt_cp_start_service_discovery *cp = data;
4987 struct mgmt_pending_cmd *cmd;
4988 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4989 u16 uuid_count, expected_len;
4993 bt_dev_dbg(hdev, "sock %p", sk);
4997 if (!hdev_is_powered(hdev)) {
4998 err = mgmt_cmd_complete(sk, hdev->id,
4999 MGMT_OP_START_SERVICE_DISCOVERY,
5000 MGMT_STATUS_NOT_POWERED,
5001 &cp->type, sizeof(cp->type));
5005 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5006 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5007 err = mgmt_cmd_complete(sk, hdev->id,
5008 MGMT_OP_START_SERVICE_DISCOVERY,
5009 MGMT_STATUS_BUSY, &cp->type,
5014 if (hdev->discovery_paused) {
5015 err = mgmt_cmd_complete(sk, hdev->id,
5016 MGMT_OP_START_SERVICE_DISCOVERY,
5017 MGMT_STATUS_BUSY, &cp->type,
5022 uuid_count = __le16_to_cpu(cp->uuid_count);
5023 if (uuid_count > max_uuid_count) {
5024 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5026 err = mgmt_cmd_complete(sk, hdev->id,
5027 MGMT_OP_START_SERVICE_DISCOVERY,
5028 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5033 expected_len = sizeof(*cp) + uuid_count * 16;
5034 if (expected_len != len) {
5035 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5037 err = mgmt_cmd_complete(sk, hdev->id,
5038 MGMT_OP_START_SERVICE_DISCOVERY,
5039 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5044 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5045 err = mgmt_cmd_complete(sk, hdev->id,
5046 MGMT_OP_START_SERVICE_DISCOVERY,
5047 status, &cp->type, sizeof(cp->type));
5051 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5058 cmd->cmd_complete = service_discovery_cmd_complete;
5060 /* Clear the discovery filter first to free any previously
5061 * allocated memory for the UUID list.
5063 hci_discovery_filter_clear(hdev);
5065 hdev->discovery.result_filtering = true;
5066 hdev->discovery.type = cp->type;
5067 hdev->discovery.rssi = cp->rssi;
5068 hdev->discovery.uuid_count = uuid_count;
5070 if (uuid_count > 0) {
5071 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5073 if (!hdev->discovery.uuids) {
5074 err = mgmt_cmd_complete(sk, hdev->id,
5075 MGMT_OP_START_SERVICE_DISCOVERY,
5077 &cp->type, sizeof(cp->type));
5078 mgmt_pending_remove(cmd);
5083 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5084 queue_work(hdev->req_workqueue, &hdev->discov_update);
5088 hci_dev_unlock(hdev);
5092 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5094 struct mgmt_pending_cmd *cmd;
5096 bt_dev_dbg(hdev, "status %u", status);
5100 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5102 cmd->cmd_complete(cmd, mgmt_status(status));
5103 mgmt_pending_remove(cmd);
5106 hci_dev_unlock(hdev);
5108 /* Handle suspend notifier */
5109 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5110 bt_dev_dbg(hdev, "Paused discovery");
5111 wake_up(&hdev->suspend_wait_q);
5115 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5118 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5119 struct mgmt_pending_cmd *cmd;
5122 bt_dev_dbg(hdev, "sock %p", sk);
5126 if (!hci_discovery_active(hdev)) {
5127 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5128 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5129 sizeof(mgmt_cp->type));
5133 if (hdev->discovery.type != mgmt_cp->type) {
5134 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5135 MGMT_STATUS_INVALID_PARAMS,
5136 &mgmt_cp->type, sizeof(mgmt_cp->type));
5140 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5146 cmd->cmd_complete = generic_cmd_complete;
5148 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5149 queue_work(hdev->req_workqueue, &hdev->discov_update);
5153 hci_dev_unlock(hdev);
5157 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5160 struct mgmt_cp_confirm_name *cp = data;
5161 struct inquiry_entry *e;
5164 bt_dev_dbg(hdev, "sock %p", sk);
5168 if (!hci_discovery_active(hdev)) {
5169 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5170 MGMT_STATUS_FAILED, &cp->addr,
5175 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5177 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5178 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5183 if (cp->name_known) {
5184 e->name_state = NAME_KNOWN;
5187 e->name_state = NAME_NEEDED;
5188 hci_inquiry_cache_update_resolve(hdev, e);
5191 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5192 &cp->addr, sizeof(cp->addr));
5195 hci_dev_unlock(hdev);
5199 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5202 struct mgmt_cp_block_device *cp = data;
5206 bt_dev_dbg(hdev, "sock %p", sk);
5208 if (!bdaddr_type_is_valid(cp->addr.type))
5209 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5210 MGMT_STATUS_INVALID_PARAMS,
5211 &cp->addr, sizeof(cp->addr));
5215 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5218 status = MGMT_STATUS_FAILED;
5222 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5224 status = MGMT_STATUS_SUCCESS;
5227 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5228 &cp->addr, sizeof(cp->addr));
5230 hci_dev_unlock(hdev);
5235 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5238 struct mgmt_cp_unblock_device *cp = data;
5242 bt_dev_dbg(hdev, "sock %p", sk);
5244 if (!bdaddr_type_is_valid(cp->addr.type))
5245 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5246 MGMT_STATUS_INVALID_PARAMS,
5247 &cp->addr, sizeof(cp->addr));
5251 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5254 status = MGMT_STATUS_INVALID_PARAMS;
5258 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5260 status = MGMT_STATUS_SUCCESS;
5263 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5264 &cp->addr, sizeof(cp->addr));
5266 hci_dev_unlock(hdev);
5271 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5274 struct mgmt_cp_set_device_id *cp = data;
5275 struct hci_request req;
5279 bt_dev_dbg(hdev, "sock %p", sk);
5281 source = __le16_to_cpu(cp->source);
5283 if (source > 0x0002)
5284 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5285 MGMT_STATUS_INVALID_PARAMS);
5289 hdev->devid_source = source;
5290 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5291 hdev->devid_product = __le16_to_cpu(cp->product);
5292 hdev->devid_version = __le16_to_cpu(cp->version);
5294 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5297 hci_req_init(&req, hdev);
5298 __hci_req_update_eir(&req);
5299 hci_req_run(&req, NULL);
5301 hci_dev_unlock(hdev);
5306 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5309 bt_dev_dbg(hdev, "status %u", status);
5312 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5315 struct cmd_lookup match = { NULL, hdev };
5316 struct hci_request req;
5318 struct adv_info *adv_instance;
5324 u8 mgmt_err = mgmt_status(status);
5326 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5327 cmd_status_rsp, &mgmt_err);
5331 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5332 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5334 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5336 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5339 new_settings(hdev, match.sk);
5344 /* Handle suspend notifier */
5345 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5346 hdev->suspend_tasks)) {
5347 bt_dev_dbg(hdev, "Paused advertising");
5348 wake_up(&hdev->suspend_wait_q);
5349 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5350 hdev->suspend_tasks)) {
5351 bt_dev_dbg(hdev, "Unpaused advertising");
5352 wake_up(&hdev->suspend_wait_q);
5355 /* If "Set Advertising" was just disabled and instance advertising was
5356 * set up earlier, then re-enable multi-instance advertising.
5358 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5359 list_empty(&hdev->adv_instances))
5362 instance = hdev->cur_adv_instance;
5364 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5365 struct adv_info, list);
5369 instance = adv_instance->instance;
5372 hci_req_init(&req, hdev);
5374 err = __hci_req_schedule_adv_instance(&req, instance, true);
5377 err = hci_req_run(&req, enable_advertising_instance);
5380 bt_dev_err(hdev, "failed to re-configure advertising");
5383 hci_dev_unlock(hdev);
5386 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5389 struct mgmt_mode *cp = data;
5390 struct mgmt_pending_cmd *cmd;
5391 struct hci_request req;
5395 bt_dev_dbg(hdev, "sock %p", sk);
5397 status = mgmt_le_support(hdev);
5399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5402 /* Enabling the experimental LL Privay support disables support for
5405 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5406 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5407 MGMT_STATUS_NOT_SUPPORTED);
5409 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5410 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5411 MGMT_STATUS_INVALID_PARAMS);
5413 if (hdev->advertising_paused)
5414 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5421 /* The following conditions are ones which mean that we should
5422 * not do any HCI communication but directly send a mgmt
5423 * response to user space (after toggling the flag if
5426 if (!hdev_is_powered(hdev) ||
5427 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5428 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5429 hci_conn_num(hdev, LE_LINK) > 0 ||
5430 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5431 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5435 hdev->cur_adv_instance = 0x00;
5436 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5437 if (cp->val == 0x02)
5438 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5440 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5442 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5443 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5446 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5451 err = new_settings(hdev, sk);
5456 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5457 pending_find(MGMT_OP_SET_LE, hdev)) {
5458 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5463 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5469 hci_req_init(&req, hdev);
5471 if (cp->val == 0x02)
5472 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5474 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5476 cancel_adv_timeout(hdev);
5479 /* Switch to instance "0" for the Set Advertising setting.
5480 * We cannot use update_[adv|scan_rsp]_data() here as the
5481 * HCI_ADVERTISING flag is not yet set.
5483 hdev->cur_adv_instance = 0x00;
5485 if (ext_adv_capable(hdev)) {
5486 __hci_req_start_ext_adv(&req, 0x00);
5488 __hci_req_update_adv_data(&req, 0x00);
5489 __hci_req_update_scan_rsp_data(&req, 0x00);
5490 __hci_req_enable_advertising(&req);
5493 __hci_req_disable_advertising(&req);
5496 err = hci_req_run(&req, set_advertising_complete);
5498 mgmt_pending_remove(cmd);
5501 hci_dev_unlock(hdev);
5505 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5506 void *data, u16 len)
5508 struct mgmt_cp_set_static_address *cp = data;
5511 bt_dev_dbg(hdev, "sock %p", sk);
5513 if (!lmp_le_capable(hdev))
5514 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5515 MGMT_STATUS_NOT_SUPPORTED);
5517 if (hdev_is_powered(hdev))
5518 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5519 MGMT_STATUS_REJECTED);
5521 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5522 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5523 return mgmt_cmd_status(sk, hdev->id,
5524 MGMT_OP_SET_STATIC_ADDRESS,
5525 MGMT_STATUS_INVALID_PARAMS);
5527 /* Two most significant bits shall be set */
5528 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5529 return mgmt_cmd_status(sk, hdev->id,
5530 MGMT_OP_SET_STATIC_ADDRESS,
5531 MGMT_STATUS_INVALID_PARAMS);
5536 bacpy(&hdev->static_addr, &cp->bdaddr);
5538 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5542 err = new_settings(hdev, sk);
5545 hci_dev_unlock(hdev);
5549 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5550 void *data, u16 len)
5552 struct mgmt_cp_set_scan_params *cp = data;
5553 __u16 interval, window;
5556 bt_dev_dbg(hdev, "sock %p", sk);
5558 if (!lmp_le_capable(hdev))
5559 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5560 MGMT_STATUS_NOT_SUPPORTED);
5562 interval = __le16_to_cpu(cp->interval);
5564 if (interval < 0x0004 || interval > 0x4000)
5565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5566 MGMT_STATUS_INVALID_PARAMS);
5568 window = __le16_to_cpu(cp->window);
5570 if (window < 0x0004 || window > 0x4000)
5571 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5572 MGMT_STATUS_INVALID_PARAMS);
5574 if (window > interval)
5575 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5576 MGMT_STATUS_INVALID_PARAMS);
5580 hdev->le_scan_interval = interval;
5581 hdev->le_scan_window = window;
5583 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5586 /* If background scan is running, restart it so new parameters are
5589 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5590 hdev->discovery.state == DISCOVERY_STOPPED) {
5591 struct hci_request req;
5593 hci_req_init(&req, hdev);
5595 hci_req_add_le_scan_disable(&req, false);
5596 hci_req_add_le_passive_scan(&req);
5598 hci_req_run(&req, NULL);
5601 hci_dev_unlock(hdev);
5606 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5609 struct mgmt_pending_cmd *cmd;
5611 bt_dev_dbg(hdev, "status 0x%02x", status);
5615 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5620 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5621 mgmt_status(status));
5623 struct mgmt_mode *cp = cmd->param;
5626 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5628 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5630 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5631 new_settings(hdev, cmd->sk);
5634 mgmt_pending_remove(cmd);
5637 hci_dev_unlock(hdev);
5640 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5641 void *data, u16 len)
5643 struct mgmt_mode *cp = data;
5644 struct mgmt_pending_cmd *cmd;
5645 struct hci_request req;
5648 bt_dev_dbg(hdev, "sock %p", sk);
5650 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5651 hdev->hci_ver < BLUETOOTH_VER_1_2)
5652 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5653 MGMT_STATUS_NOT_SUPPORTED);
5655 if (cp->val != 0x00 && cp->val != 0x01)
5656 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5657 MGMT_STATUS_INVALID_PARAMS);
5661 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5662 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5667 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5668 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5673 if (!hdev_is_powered(hdev)) {
5674 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5675 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5677 new_settings(hdev, sk);
5681 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5688 hci_req_init(&req, hdev);
5690 __hci_req_write_fast_connectable(&req, cp->val);
5692 err = hci_req_run(&req, fast_connectable_complete);
5694 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5695 MGMT_STATUS_FAILED);
5696 mgmt_pending_remove(cmd);
5700 hci_dev_unlock(hdev);
5705 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5707 struct mgmt_pending_cmd *cmd;
5709 bt_dev_dbg(hdev, "status 0x%02x", status);
5713 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5718 u8 mgmt_err = mgmt_status(status);
5720 /* We need to restore the flag if related HCI commands
5723 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5725 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5727 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5728 new_settings(hdev, cmd->sk);
5731 mgmt_pending_remove(cmd);
5734 hci_dev_unlock(hdev);
5737 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5739 struct mgmt_mode *cp = data;
5740 struct mgmt_pending_cmd *cmd;
5741 struct hci_request req;
5744 bt_dev_dbg(hdev, "sock %p", sk);
5746 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5748 MGMT_STATUS_NOT_SUPPORTED);
5750 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5752 MGMT_STATUS_REJECTED);
5754 if (cp->val != 0x00 && cp->val != 0x01)
5755 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5756 MGMT_STATUS_INVALID_PARAMS);
5760 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5761 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5765 if (!hdev_is_powered(hdev)) {
5767 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5768 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5769 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5770 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5771 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5774 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5776 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5780 err = new_settings(hdev, sk);
5784 /* Reject disabling when powered on */
5786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5787 MGMT_STATUS_REJECTED);
5790 /* When configuring a dual-mode controller to operate
5791 * with LE only and using a static address, then switching
5792 * BR/EDR back on is not allowed.
5794 * Dual-mode controllers shall operate with the public
5795 * address as its identity address for BR/EDR and LE. So
5796 * reject the attempt to create an invalid configuration.
5798 * The same restrictions applies when secure connections
5799 * has been enabled. For BR/EDR this is a controller feature
5800 * while for LE it is a host stack feature. This means that
5801 * switching BR/EDR back on when secure connections has been
5802 * enabled is not a supported transaction.
5804 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5805 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5806 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5807 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5808 MGMT_STATUS_REJECTED);
5813 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5814 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5819 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5825 /* We need to flip the bit already here so that
5826 * hci_req_update_adv_data generates the correct flags.
5828 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5830 hci_req_init(&req, hdev);
5832 __hci_req_write_fast_connectable(&req, false);
5833 __hci_req_update_scan(&req);
5835 /* Since only the advertising data flags will change, there
5836 * is no need to update the scan response data.
5838 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5840 err = hci_req_run(&req, set_bredr_complete);
5842 mgmt_pending_remove(cmd);
5845 hci_dev_unlock(hdev);
5849 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5851 struct mgmt_pending_cmd *cmd;
5852 struct mgmt_mode *cp;
5854 bt_dev_dbg(hdev, "status %u", status);
5858 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5863 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5864 mgmt_status(status));
5872 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5873 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5876 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5877 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5880 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5881 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5885 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5886 new_settings(hdev, cmd->sk);
5889 mgmt_pending_remove(cmd);
5891 hci_dev_unlock(hdev);
5894 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5895 void *data, u16 len)
5897 struct mgmt_mode *cp = data;
5898 struct mgmt_pending_cmd *cmd;
5899 struct hci_request req;
5903 bt_dev_dbg(hdev, "sock %p", sk);
5905 if (!lmp_sc_capable(hdev) &&
5906 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5908 MGMT_STATUS_NOT_SUPPORTED);
5910 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5911 lmp_sc_capable(hdev) &&
5912 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5913 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5914 MGMT_STATUS_REJECTED);
5916 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5917 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5918 MGMT_STATUS_INVALID_PARAMS);
5922 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5923 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5927 changed = !hci_dev_test_and_set_flag(hdev,
5929 if (cp->val == 0x02)
5930 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5932 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5934 changed = hci_dev_test_and_clear_flag(hdev,
5936 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5939 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5944 err = new_settings(hdev, sk);
5949 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5950 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5957 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5958 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5959 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5963 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5969 hci_req_init(&req, hdev);
5970 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5971 err = hci_req_run(&req, sc_enable_complete);
5973 mgmt_pending_remove(cmd);
5978 hci_dev_unlock(hdev);
5982 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5983 void *data, u16 len)
5985 struct mgmt_mode *cp = data;
5986 bool changed, use_changed;
5989 bt_dev_dbg(hdev, "sock %p", sk);
5991 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5993 MGMT_STATUS_INVALID_PARAMS);
5998 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6000 changed = hci_dev_test_and_clear_flag(hdev,
6001 HCI_KEEP_DEBUG_KEYS);
6003 if (cp->val == 0x02)
6004 use_changed = !hci_dev_test_and_set_flag(hdev,
6005 HCI_USE_DEBUG_KEYS);
6007 use_changed = hci_dev_test_and_clear_flag(hdev,
6008 HCI_USE_DEBUG_KEYS);
6010 if (hdev_is_powered(hdev) && use_changed &&
6011 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6012 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6013 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6014 sizeof(mode), &mode);
6017 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6022 err = new_settings(hdev, sk);
6025 hci_dev_unlock(hdev);
6029 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6032 struct mgmt_cp_set_privacy *cp = cp_data;
6036 bt_dev_dbg(hdev, "sock %p", sk);
6038 if (!lmp_le_capable(hdev))
6039 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6040 MGMT_STATUS_NOT_SUPPORTED);
6042 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6043 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6044 MGMT_STATUS_INVALID_PARAMS);
6046 if (hdev_is_powered(hdev))
6047 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6048 MGMT_STATUS_REJECTED);
6052 /* If user space supports this command it is also expected to
6053 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6055 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6058 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6059 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6060 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6061 hci_adv_instances_set_rpa_expired(hdev, true);
6062 if (cp->privacy == 0x02)
6063 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6065 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6067 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6068 memset(hdev->irk, 0, sizeof(hdev->irk));
6069 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6070 hci_adv_instances_set_rpa_expired(hdev, false);
6071 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6074 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6079 err = new_settings(hdev, sk);
6082 hci_dev_unlock(hdev);
6086 static bool irk_is_valid(struct mgmt_irk_info *irk)
6088 switch (irk->addr.type) {
6089 case BDADDR_LE_PUBLIC:
6092 case BDADDR_LE_RANDOM:
6093 /* Two most significant bits shall be set */
6094 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6102 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6105 struct mgmt_cp_load_irks *cp = cp_data;
6106 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6107 sizeof(struct mgmt_irk_info));
6108 u16 irk_count, expected_len;
6111 bt_dev_dbg(hdev, "sock %p", sk);
6113 if (!lmp_le_capable(hdev))
6114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6115 MGMT_STATUS_NOT_SUPPORTED);
6117 irk_count = __le16_to_cpu(cp->irk_count);
6118 if (irk_count > max_irk_count) {
6119 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6121 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6122 MGMT_STATUS_INVALID_PARAMS);
6125 expected_len = struct_size(cp, irks, irk_count);
6126 if (expected_len != len) {
6127 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6129 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6130 MGMT_STATUS_INVALID_PARAMS);
6133 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6135 for (i = 0; i < irk_count; i++) {
6136 struct mgmt_irk_info *key = &cp->irks[i];
6138 if (!irk_is_valid(key))
6139 return mgmt_cmd_status(sk, hdev->id,
6141 MGMT_STATUS_INVALID_PARAMS);
6146 hci_smp_irks_clear(hdev);
6148 for (i = 0; i < irk_count; i++) {
6149 struct mgmt_irk_info *irk = &cp->irks[i];
6151 if (hci_is_blocked_key(hdev,
6152 HCI_BLOCKED_KEY_TYPE_IRK,
6154 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6159 hci_add_irk(hdev, &irk->addr.bdaddr,
6160 le_addr_type(irk->addr.type), irk->val,
6164 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6166 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6168 hci_dev_unlock(hdev);
6174 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
6175 void *data, u16 len)
6177 struct mgmt_cp_set_advertising_params *cp = data;
6182 BT_DBG("%s", hdev->name);
6184 if (!lmp_le_capable(hdev))
6185 return mgmt_cmd_status(sk, hdev->id,
6186 MGMT_OP_SET_ADVERTISING_PARAMS,
6187 MGMT_STATUS_NOT_SUPPORTED);
6189 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6190 return mgmt_cmd_status(sk, hdev->id,
6191 MGMT_OP_SET_ADVERTISING_PARAMS,
6194 min_interval = __le16_to_cpu(cp->interval_min);
6195 max_interval = __le16_to_cpu(cp->interval_max);
6197 if (min_interval > max_interval ||
6198 min_interval < 0x0020 || max_interval > 0x4000)
6199 return mgmt_cmd_status(sk, hdev->id,
6200 MGMT_OP_SET_ADVERTISING_PARAMS,
6201 MGMT_STATUS_INVALID_PARAMS);
6205 hdev->le_adv_min_interval = min_interval;
6206 hdev->le_adv_max_interval = max_interval;
6207 hdev->adv_filter_policy = cp->filter_policy;
6208 hdev->adv_type = cp->type;
6210 err = mgmt_cmd_complete(sk, hdev->id,
6211 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
6213 hci_dev_unlock(hdev);
6218 static void set_advertising_data_complete(struct hci_dev *hdev,
6219 u8 status, u16 opcode)
6221 struct mgmt_cp_set_advertising_data *cp;
6222 struct mgmt_pending_cmd *cmd;
6224 BT_DBG("status 0x%02x", status);
6228 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
6235 mgmt_cmd_status(cmd->sk, hdev->id,
6236 MGMT_OP_SET_ADVERTISING_DATA,
6237 mgmt_status(status));
6239 mgmt_cmd_complete(cmd->sk, hdev->id,
6240 MGMT_OP_SET_ADVERTISING_DATA, 0,
6243 mgmt_pending_remove(cmd);
6246 hci_dev_unlock(hdev);
6249 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
6250 void *data, u16 len)
6252 struct mgmt_pending_cmd *cmd;
6253 struct hci_request req;
6254 struct mgmt_cp_set_advertising_data *cp = data;
6255 struct hci_cp_le_set_adv_data adv;
6258 BT_DBG("%s", hdev->name);
6260 if (!lmp_le_capable(hdev)) {
6261 return mgmt_cmd_status(sk, hdev->id,
6262 MGMT_OP_SET_ADVERTISING_DATA,
6263 MGMT_STATUS_NOT_SUPPORTED);
6268 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
6269 err = mgmt_cmd_status(sk, hdev->id,
6270 MGMT_OP_SET_ADVERTISING_DATA,
6275 if (len > HCI_MAX_AD_LENGTH) {
6276 err = mgmt_cmd_status(sk, hdev->id,
6277 MGMT_OP_SET_ADVERTISING_DATA,
6278 MGMT_STATUS_INVALID_PARAMS);
6282 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
6289 hci_req_init(&req, hdev);
6291 memset(&adv, 0, sizeof(adv));
6292 memcpy(adv.data, cp->data, len);
6295 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
6297 err = hci_req_run(&req, set_advertising_data_complete);
6299 mgmt_pending_remove(cmd);
6302 hci_dev_unlock(hdev);
6307 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
6310 struct mgmt_cp_set_scan_rsp_data *cp;
6311 struct mgmt_pending_cmd *cmd;
6313 BT_DBG("status 0x%02x", status);
6317 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
6324 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6325 mgmt_status(status));
6327 mgmt_cmd_complete(cmd->sk, hdev->id,
6328 MGMT_OP_SET_SCAN_RSP_DATA, 0,
6331 mgmt_pending_remove(cmd);
6334 hci_dev_unlock(hdev);
6337 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
6340 struct mgmt_pending_cmd *cmd;
6341 struct hci_request req;
6342 struct mgmt_cp_set_scan_rsp_data *cp = data;
6343 struct hci_cp_le_set_scan_rsp_data rsp;
6346 BT_DBG("%s", hdev->name);
6348 if (!lmp_le_capable(hdev))
6349 return mgmt_cmd_status(sk, hdev->id,
6350 MGMT_OP_SET_SCAN_RSP_DATA,
6351 MGMT_STATUS_NOT_SUPPORTED);
6355 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
6356 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6361 if (len > HCI_MAX_AD_LENGTH) {
6362 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6363 MGMT_STATUS_INVALID_PARAMS);
6367 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
6373 hci_req_init(&req, hdev);
6375 memset(&rsp, 0, sizeof(rsp));
6376 memcpy(rsp.data, cp->data, len);
6379 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
6381 err = hci_req_run(&req, set_scan_rsp_data_complete);
6383 mgmt_pending_remove(cmd);
6386 hci_dev_unlock(hdev);
6390 #endif /* TIZEN_BT */
6392 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6394 if (key->initiator != 0x00 && key->initiator != 0x01)
6397 switch (key->addr.type) {
6398 case BDADDR_LE_PUBLIC:
6401 case BDADDR_LE_RANDOM:
6402 /* Two most significant bits shall be set */
6403 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6411 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6412 void *cp_data, u16 len)
6414 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6415 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6416 sizeof(struct mgmt_ltk_info));
6417 u16 key_count, expected_len;
6420 bt_dev_dbg(hdev, "sock %p", sk);
6422 if (!lmp_le_capable(hdev))
6423 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6424 MGMT_STATUS_NOT_SUPPORTED);
6426 key_count = __le16_to_cpu(cp->key_count);
6427 if (key_count > max_key_count) {
6428 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6430 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6431 MGMT_STATUS_INVALID_PARAMS);
6434 expected_len = struct_size(cp, keys, key_count);
6435 if (expected_len != len) {
6436 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6438 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6439 MGMT_STATUS_INVALID_PARAMS);
6442 bt_dev_dbg(hdev, "key_count %u", key_count);
6444 for (i = 0; i < key_count; i++) {
6445 struct mgmt_ltk_info *key = &cp->keys[i];
6447 if (!ltk_is_valid(key))
6448 return mgmt_cmd_status(sk, hdev->id,
6449 MGMT_OP_LOAD_LONG_TERM_KEYS,
6450 MGMT_STATUS_INVALID_PARAMS);
6455 hci_smp_ltks_clear(hdev);
6457 for (i = 0; i < key_count; i++) {
6458 struct mgmt_ltk_info *key = &cp->keys[i];
6459 u8 type, authenticated;
6461 if (hci_is_blocked_key(hdev,
6462 HCI_BLOCKED_KEY_TYPE_LTK,
6464 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6469 switch (key->type) {
6470 case MGMT_LTK_UNAUTHENTICATED:
6471 authenticated = 0x00;
6472 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6474 case MGMT_LTK_AUTHENTICATED:
6475 authenticated = 0x01;
6476 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6478 case MGMT_LTK_P256_UNAUTH:
6479 authenticated = 0x00;
6480 type = SMP_LTK_P256;
6482 case MGMT_LTK_P256_AUTH:
6483 authenticated = 0x01;
6484 type = SMP_LTK_P256;
6486 case MGMT_LTK_P256_DEBUG:
6487 authenticated = 0x00;
6488 type = SMP_LTK_P256_DEBUG;
6494 hci_add_ltk(hdev, &key->addr.bdaddr,
6495 le_addr_type(key->addr.type), type, authenticated,
6496 key->val, key->enc_size, key->ediv, key->rand);
6499 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6502 hci_dev_unlock(hdev);
6507 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6509 struct hci_conn *conn = cmd->user_data;
6510 struct mgmt_rp_get_conn_info rp;
6513 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6515 if (status == MGMT_STATUS_SUCCESS) {
6516 rp.rssi = conn->rssi;
6517 rp.tx_power = conn->tx_power;
6518 rp.max_tx_power = conn->max_tx_power;
6520 rp.rssi = HCI_RSSI_INVALID;
6521 rp.tx_power = HCI_TX_POWER_INVALID;
6522 rp.max_tx_power = HCI_TX_POWER_INVALID;
6525 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6526 status, &rp, sizeof(rp));
6528 hci_conn_drop(conn);
6534 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6537 struct hci_cp_read_rssi *cp;
6538 struct mgmt_pending_cmd *cmd;
6539 struct hci_conn *conn;
6543 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6547 /* Commands sent in request are either Read RSSI or Read Transmit Power
6548 * Level so we check which one was last sent to retrieve connection
6549 * handle. Both commands have handle as first parameter so it's safe to
6550 * cast data on the same command struct.
6552 * First command sent is always Read RSSI and we fail only if it fails.
6553 * In other case we simply override error to indicate success as we
6554 * already remembered if TX power value is actually valid.
6556 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6558 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6559 status = MGMT_STATUS_SUCCESS;
6561 status = mgmt_status(hci_status);
6565 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6569 handle = __le16_to_cpu(cp->handle);
6570 conn = hci_conn_hash_lookup_handle(hdev, handle);
6572 bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
6577 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6581 cmd->cmd_complete(cmd, status);
6582 mgmt_pending_remove(cmd);
6585 hci_dev_unlock(hdev);
6588 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6591 struct mgmt_cp_get_conn_info *cp = data;
6592 struct mgmt_rp_get_conn_info rp;
6593 struct hci_conn *conn;
6594 unsigned long conn_info_age;
6597 bt_dev_dbg(hdev, "sock %p", sk);
6599 memset(&rp, 0, sizeof(rp));
6600 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6601 rp.addr.type = cp->addr.type;
6603 if (!bdaddr_type_is_valid(cp->addr.type))
6604 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6605 MGMT_STATUS_INVALID_PARAMS,
6610 if (!hdev_is_powered(hdev)) {
6611 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6612 MGMT_STATUS_NOT_POWERED, &rp,
6617 if (cp->addr.type == BDADDR_BREDR)
6618 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6621 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6623 if (!conn || conn->state != BT_CONNECTED) {
6624 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6625 MGMT_STATUS_NOT_CONNECTED, &rp,
6630 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6631 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6632 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6636 /* To avoid client trying to guess when to poll again for information we
6637 * calculate conn info age as random value between min/max set in hdev.
6639 conn_info_age = hdev->conn_info_min_age +
6640 prandom_u32_max(hdev->conn_info_max_age -
6641 hdev->conn_info_min_age);
6643 /* Query controller to refresh cached values if they are too old or were
6646 if (time_after(jiffies, conn->conn_info_timestamp +
6647 msecs_to_jiffies(conn_info_age)) ||
6648 !conn->conn_info_timestamp) {
6649 struct hci_request req;
6650 struct hci_cp_read_tx_power req_txp_cp;
6651 struct hci_cp_read_rssi req_rssi_cp;
6652 struct mgmt_pending_cmd *cmd;
6654 hci_req_init(&req, hdev);
6655 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6656 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6659 /* For LE links TX power does not change thus we don't need to
6660 * query for it once value is known.
6662 if (!bdaddr_type_is_le(cp->addr.type) ||
6663 conn->tx_power == HCI_TX_POWER_INVALID) {
6664 req_txp_cp.handle = cpu_to_le16(conn->handle);
6665 req_txp_cp.type = 0x00;
6666 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6667 sizeof(req_txp_cp), &req_txp_cp);
6670 /* Max TX power needs to be read only once per connection */
6671 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6672 req_txp_cp.handle = cpu_to_le16(conn->handle);
6673 req_txp_cp.type = 0x01;
6674 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6675 sizeof(req_txp_cp), &req_txp_cp);
6678 err = hci_req_run(&req, conn_info_refresh_complete);
6682 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6689 hci_conn_hold(conn);
6690 cmd->user_data = hci_conn_get(conn);
6691 cmd->cmd_complete = conn_info_cmd_complete;
6693 conn->conn_info_timestamp = jiffies;
6695 /* Cache is valid, just reply with values cached in hci_conn */
6696 rp.rssi = conn->rssi;
6697 rp.tx_power = conn->tx_power;
6698 rp.max_tx_power = conn->max_tx_power;
6700 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6701 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6705 hci_dev_unlock(hdev);
6709 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6711 struct hci_conn *conn = cmd->user_data;
6712 struct mgmt_rp_get_clock_info rp;
6713 struct hci_dev *hdev;
6716 memset(&rp, 0, sizeof(rp));
6717 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6722 hdev = hci_dev_get(cmd->index);
6724 rp.local_clock = cpu_to_le32(hdev->clock);
6729 rp.piconet_clock = cpu_to_le32(conn->clock);
6730 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6734 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6738 hci_conn_drop(conn);
6745 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6747 struct hci_cp_read_clock *hci_cp;
6748 struct mgmt_pending_cmd *cmd;
6749 struct hci_conn *conn;
6751 bt_dev_dbg(hdev, "status %u", status);
6755 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6759 if (hci_cp->which) {
6760 u16 handle = __le16_to_cpu(hci_cp->handle);
6761 conn = hci_conn_hash_lookup_handle(hdev, handle);
6766 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6770 cmd->cmd_complete(cmd, mgmt_status(status));
6771 mgmt_pending_remove(cmd);
6774 hci_dev_unlock(hdev);
6777 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6780 struct mgmt_cp_get_clock_info *cp = data;
6781 struct mgmt_rp_get_clock_info rp;
6782 struct hci_cp_read_clock hci_cp;
6783 struct mgmt_pending_cmd *cmd;
6784 struct hci_request req;
6785 struct hci_conn *conn;
6788 bt_dev_dbg(hdev, "sock %p", sk);
6790 memset(&rp, 0, sizeof(rp));
6791 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6792 rp.addr.type = cp->addr.type;
6794 if (cp->addr.type != BDADDR_BREDR)
6795 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6796 MGMT_STATUS_INVALID_PARAMS,
6801 if (!hdev_is_powered(hdev)) {
6802 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6803 MGMT_STATUS_NOT_POWERED, &rp,
6808 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6809 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6811 if (!conn || conn->state != BT_CONNECTED) {
6812 err = mgmt_cmd_complete(sk, hdev->id,
6813 MGMT_OP_GET_CLOCK_INFO,
6814 MGMT_STATUS_NOT_CONNECTED,
6822 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6828 cmd->cmd_complete = clock_info_cmd_complete;
6830 hci_req_init(&req, hdev);
6832 memset(&hci_cp, 0, sizeof(hci_cp));
6833 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6836 hci_conn_hold(conn);
6837 cmd->user_data = hci_conn_get(conn);
6839 hci_cp.handle = cpu_to_le16(conn->handle);
6840 hci_cp.which = 0x01; /* Piconet clock */
6841 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6844 err = hci_req_run(&req, get_clock_info_complete);
6846 mgmt_pending_remove(cmd);
6849 hci_dev_unlock(hdev);
6853 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6855 struct hci_conn *conn;
6857 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6861 if (conn->dst_type != type)
6864 if (conn->state != BT_CONNECTED)
6870 /* This function requires the caller holds hdev->lock */
6871 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6872 u8 addr_type, u8 auto_connect)
6874 struct hci_conn_params *params;
6876 params = hci_conn_params_add(hdev, addr, addr_type);
6880 if (params->auto_connect == auto_connect)
6883 list_del_init(¶ms->action);
6885 switch (auto_connect) {
6886 case HCI_AUTO_CONN_DISABLED:
6887 case HCI_AUTO_CONN_LINK_LOSS:
6888 /* If auto connect is being disabled when we're trying to
6889 * connect to device, keep connecting.
6891 if (params->explicit_connect)
6892 list_add(¶ms->action, &hdev->pend_le_conns);
6894 case HCI_AUTO_CONN_REPORT:
6895 if (params->explicit_connect)
6896 list_add(¶ms->action, &hdev->pend_le_conns);
6898 list_add(¶ms->action, &hdev->pend_le_reports);
6900 case HCI_AUTO_CONN_DIRECT:
6901 case HCI_AUTO_CONN_ALWAYS:
6902 if (!is_connected(hdev, addr, addr_type))
6903 list_add(¶ms->action, &hdev->pend_le_conns);
6907 params->auto_connect = auto_connect;
6909 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6910 addr, addr_type, auto_connect);
6915 static void device_added(struct sock *sk, struct hci_dev *hdev,
6916 bdaddr_t *bdaddr, u8 type, u8 action)
6918 struct mgmt_ev_device_added ev;
6920 bacpy(&ev.addr.bdaddr, bdaddr);
6921 ev.addr.type = type;
6924 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6927 static int add_device(struct sock *sk, struct hci_dev *hdev,
6928 void *data, u16 len)
6930 struct mgmt_cp_add_device *cp = data;
6931 u8 auto_conn, addr_type;
6932 struct hci_conn_params *params;
6934 u32 current_flags = 0;
6936 bt_dev_dbg(hdev, "sock %p", sk);
6938 if (!bdaddr_type_is_valid(cp->addr.type) ||
6939 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6940 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6941 MGMT_STATUS_INVALID_PARAMS,
6942 &cp->addr, sizeof(cp->addr));
6944 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6945 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6946 MGMT_STATUS_INVALID_PARAMS,
6947 &cp->addr, sizeof(cp->addr));
6951 if (cp->addr.type == BDADDR_BREDR) {
6952 /* Only incoming connections action is supported for now */
6953 if (cp->action != 0x01) {
6954 err = mgmt_cmd_complete(sk, hdev->id,
6956 MGMT_STATUS_INVALID_PARAMS,
6957 &cp->addr, sizeof(cp->addr));
6961 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
6967 hci_req_update_scan(hdev);
6972 addr_type = le_addr_type(cp->addr.type);
6974 if (cp->action == 0x02)
6975 auto_conn = HCI_AUTO_CONN_ALWAYS;
6976 else if (cp->action == 0x01)
6977 auto_conn = HCI_AUTO_CONN_DIRECT;
6979 auto_conn = HCI_AUTO_CONN_REPORT;
6981 /* Kernel internally uses conn_params with resolvable private
6982 * address, but Add Device allows only identity addresses.
6983 * Make sure it is enforced before calling
6984 * hci_conn_params_lookup.
6986 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6987 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6988 MGMT_STATUS_INVALID_PARAMS,
6989 &cp->addr, sizeof(cp->addr));
6993 /* If the connection parameters don't exist for this device,
6994 * they will be created and configured with defaults.
6996 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6998 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6999 MGMT_STATUS_FAILED, &cp->addr,
7003 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7006 current_flags = params->current_flags;
7009 hci_update_background_scan(hdev);
7012 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7013 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7014 SUPPORTED_DEVICE_FLAGS(), current_flags);
7016 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7017 MGMT_STATUS_SUCCESS, &cp->addr,
7021 hci_dev_unlock(hdev);
7025 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7026 bdaddr_t *bdaddr, u8 type)
7028 struct mgmt_ev_device_removed ev;
7030 bacpy(&ev.addr.bdaddr, bdaddr);
7031 ev.addr.type = type;
7033 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7036 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7037 void *data, u16 len)
7039 struct mgmt_cp_remove_device *cp = data;
7042 bt_dev_dbg(hdev, "sock %p", sk);
7046 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7047 struct hci_conn_params *params;
7050 if (!bdaddr_type_is_valid(cp->addr.type)) {
7051 err = mgmt_cmd_complete(sk, hdev->id,
7052 MGMT_OP_REMOVE_DEVICE,
7053 MGMT_STATUS_INVALID_PARAMS,
7054 &cp->addr, sizeof(cp->addr));
7058 if (cp->addr.type == BDADDR_BREDR) {
7059 err = hci_bdaddr_list_del(&hdev->accept_list,
7063 err = mgmt_cmd_complete(sk, hdev->id,
7064 MGMT_OP_REMOVE_DEVICE,
7065 MGMT_STATUS_INVALID_PARAMS,
7071 hci_req_update_scan(hdev);
7073 device_removed(sk, hdev, &cp->addr.bdaddr,
7078 addr_type = le_addr_type(cp->addr.type);
7080 /* Kernel internally uses conn_params with resolvable private
7081 * address, but Remove Device allows only identity addresses.
7082 * Make sure it is enforced before calling
7083 * hci_conn_params_lookup.
7085 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7086 err = mgmt_cmd_complete(sk, hdev->id,
7087 MGMT_OP_REMOVE_DEVICE,
7088 MGMT_STATUS_INVALID_PARAMS,
7089 &cp->addr, sizeof(cp->addr));
7093 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7096 err = mgmt_cmd_complete(sk, hdev->id,
7097 MGMT_OP_REMOVE_DEVICE,
7098 MGMT_STATUS_INVALID_PARAMS,
7099 &cp->addr, sizeof(cp->addr));
7103 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7104 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7105 err = mgmt_cmd_complete(sk, hdev->id,
7106 MGMT_OP_REMOVE_DEVICE,
7107 MGMT_STATUS_INVALID_PARAMS,
7108 &cp->addr, sizeof(cp->addr));
7112 list_del(¶ms->action);
7113 list_del(¶ms->list);
7115 hci_update_background_scan(hdev);
7117 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7119 struct hci_conn_params *p, *tmp;
7120 struct bdaddr_list *b, *btmp;
7122 if (cp->addr.type) {
7123 err = mgmt_cmd_complete(sk, hdev->id,
7124 MGMT_OP_REMOVE_DEVICE,
7125 MGMT_STATUS_INVALID_PARAMS,
7126 &cp->addr, sizeof(cp->addr));
7130 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7131 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7136 hci_req_update_scan(hdev);
7138 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7139 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7141 device_removed(sk, hdev, &p->addr, p->addr_type);
7142 if (p->explicit_connect) {
7143 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7146 list_del(&p->action);
7151 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7153 hci_update_background_scan(hdev);
7157 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7158 MGMT_STATUS_SUCCESS, &cp->addr,
7161 hci_dev_unlock(hdev);
7165 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7168 struct mgmt_cp_load_conn_param *cp = data;
7169 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7170 sizeof(struct mgmt_conn_param));
7171 u16 param_count, expected_len;
7174 if (!lmp_le_capable(hdev))
7175 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7176 MGMT_STATUS_NOT_SUPPORTED);
7178 param_count = __le16_to_cpu(cp->param_count);
7179 if (param_count > max_param_count) {
7180 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7182 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7183 MGMT_STATUS_INVALID_PARAMS);
7186 expected_len = struct_size(cp, params, param_count);
7187 if (expected_len != len) {
7188 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7191 MGMT_STATUS_INVALID_PARAMS);
7194 bt_dev_dbg(hdev, "param_count %u", param_count);
7198 hci_conn_params_clear_disabled(hdev);
7200 for (i = 0; i < param_count; i++) {
7201 struct mgmt_conn_param *param = &cp->params[i];
7202 struct hci_conn_params *hci_param;
7203 u16 min, max, latency, timeout;
7206 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7209 if (param->addr.type == BDADDR_LE_PUBLIC) {
7210 addr_type = ADDR_LE_DEV_PUBLIC;
7211 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7212 addr_type = ADDR_LE_DEV_RANDOM;
7214 bt_dev_err(hdev, "ignoring invalid connection parameters");
7218 min = le16_to_cpu(param->min_interval);
7219 max = le16_to_cpu(param->max_interval);
7220 latency = le16_to_cpu(param->latency);
7221 timeout = le16_to_cpu(param->timeout);
7223 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7224 min, max, latency, timeout);
7226 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7227 bt_dev_err(hdev, "ignoring invalid connection parameters");
7231 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7234 bt_dev_err(hdev, "failed to add connection parameters");
7238 hci_param->conn_min_interval = min;
7239 hci_param->conn_max_interval = max;
7240 hci_param->conn_latency = latency;
7241 hci_param->supervision_timeout = timeout;
7244 hci_dev_unlock(hdev);
7246 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7250 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7251 void *data, u16 len)
7253 struct mgmt_cp_set_external_config *cp = data;
7257 bt_dev_dbg(hdev, "sock %p", sk);
7259 if (hdev_is_powered(hdev))
7260 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7261 MGMT_STATUS_REJECTED);
7263 if (cp->config != 0x00 && cp->config != 0x01)
7264 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7265 MGMT_STATUS_INVALID_PARAMS);
7267 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7268 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7269 MGMT_STATUS_NOT_SUPPORTED);
7274 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7276 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7278 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7285 err = new_options(hdev, sk);
7287 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7288 mgmt_index_removed(hdev);
7290 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7291 hci_dev_set_flag(hdev, HCI_CONFIG);
7292 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7294 queue_work(hdev->req_workqueue, &hdev->power_on);
7296 set_bit(HCI_RAW, &hdev->flags);
7297 mgmt_index_added(hdev);
7302 hci_dev_unlock(hdev);
7306 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7307 void *data, u16 len)
7309 struct mgmt_cp_set_public_address *cp = data;
7313 bt_dev_dbg(hdev, "sock %p", sk);
7315 if (hdev_is_powered(hdev))
7316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7317 MGMT_STATUS_REJECTED);
7319 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7320 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7321 MGMT_STATUS_INVALID_PARAMS);
7323 if (!hdev->set_bdaddr)
7324 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7325 MGMT_STATUS_NOT_SUPPORTED);
7329 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7330 bacpy(&hdev->public_addr, &cp->bdaddr);
7332 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7339 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7340 err = new_options(hdev, sk);
7342 if (is_configured(hdev)) {
7343 mgmt_index_removed(hdev);
7345 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7347 hci_dev_set_flag(hdev, HCI_CONFIG);
7348 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7350 queue_work(hdev->req_workqueue, &hdev->power_on);
7354 hci_dev_unlock(hdev);
7358 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7359 u16 opcode, struct sk_buff *skb)
7361 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7362 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7363 u8 *h192, *r192, *h256, *r256;
7364 struct mgmt_pending_cmd *cmd;
7368 bt_dev_dbg(hdev, "status %u", status);
7370 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7374 mgmt_cp = cmd->param;
7377 status = mgmt_status(status);
7384 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7385 struct hci_rp_read_local_oob_data *rp;
7387 if (skb->len != sizeof(*rp)) {
7388 status = MGMT_STATUS_FAILED;
7391 status = MGMT_STATUS_SUCCESS;
7392 rp = (void *)skb->data;
7394 eir_len = 5 + 18 + 18;
7401 struct hci_rp_read_local_oob_ext_data *rp;
7403 if (skb->len != sizeof(*rp)) {
7404 status = MGMT_STATUS_FAILED;
7407 status = MGMT_STATUS_SUCCESS;
7408 rp = (void *)skb->data;
7410 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7411 eir_len = 5 + 18 + 18;
7415 eir_len = 5 + 18 + 18 + 18 + 18;
7425 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7432 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7433 hdev->dev_class, 3);
7436 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7437 EIR_SSP_HASH_C192, h192, 16);
7438 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7439 EIR_SSP_RAND_R192, r192, 16);
7443 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7444 EIR_SSP_HASH_C256, h256, 16);
7445 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7446 EIR_SSP_RAND_R256, r256, 16);
7450 mgmt_rp->type = mgmt_cp->type;
7451 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7453 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7454 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7455 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7456 if (err < 0 || status)
7459 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7461 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7462 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7463 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7466 mgmt_pending_remove(cmd);
7469 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7470 struct mgmt_cp_read_local_oob_ext_data *cp)
7472 struct mgmt_pending_cmd *cmd;
7473 struct hci_request req;
7476 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7481 hci_req_init(&req, hdev);
7483 if (bredr_sc_enabled(hdev))
7484 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7486 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7488 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7490 mgmt_pending_remove(cmd);
7497 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7498 void *data, u16 data_len)
7500 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7501 struct mgmt_rp_read_local_oob_ext_data *rp;
7504 u8 status, flags, role, addr[7], hash[16], rand[16];
7507 bt_dev_dbg(hdev, "sock %p", sk);
7509 if (hdev_is_powered(hdev)) {
7511 case BIT(BDADDR_BREDR):
7512 status = mgmt_bredr_support(hdev);
7518 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7519 status = mgmt_le_support(hdev);
7523 eir_len = 9 + 3 + 18 + 18 + 3;
7526 status = MGMT_STATUS_INVALID_PARAMS;
7531 status = MGMT_STATUS_NOT_POWERED;
7535 rp_len = sizeof(*rp) + eir_len;
7536 rp = kmalloc(rp_len, GFP_ATOMIC);
7547 case BIT(BDADDR_BREDR):
7548 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7549 err = read_local_ssp_oob_req(hdev, sk, cp);
7550 hci_dev_unlock(hdev);
7554 status = MGMT_STATUS_FAILED;
7557 eir_len = eir_append_data(rp->eir, eir_len,
7559 hdev->dev_class, 3);
7562 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7563 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7564 smp_generate_oob(hdev, hash, rand) < 0) {
7565 hci_dev_unlock(hdev);
7566 status = MGMT_STATUS_FAILED;
7570 /* This should return the active RPA, but since the RPA
7571 * is only programmed on demand, it is really hard to fill
7572 * this in at the moment. For now disallow retrieving
7573 * local out-of-band data when privacy is in use.
7575 * Returning the identity address will not help here since
7576 * pairing happens before the identity resolving key is
7577 * known and thus the connection establishment happens
7578 * based on the RPA and not the identity address.
7580 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7581 hci_dev_unlock(hdev);
7582 status = MGMT_STATUS_REJECTED;
7586 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7587 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7588 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7589 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7590 memcpy(addr, &hdev->static_addr, 6);
7593 memcpy(addr, &hdev->bdaddr, 6);
7597 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7598 addr, sizeof(addr));
7600 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7605 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7606 &role, sizeof(role));
7608 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7609 eir_len = eir_append_data(rp->eir, eir_len,
7611 hash, sizeof(hash));
7613 eir_len = eir_append_data(rp->eir, eir_len,
7615 rand, sizeof(rand));
7618 flags = mgmt_get_adv_discov_flags(hdev);
7620 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7621 flags |= LE_AD_NO_BREDR;
7623 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7624 &flags, sizeof(flags));
7628 hci_dev_unlock(hdev);
7630 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7632 status = MGMT_STATUS_SUCCESS;
7635 rp->type = cp->type;
7636 rp->eir_len = cpu_to_le16(eir_len);
7638 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7639 status, rp, sizeof(*rp) + eir_len);
7640 if (err < 0 || status)
7643 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7644 rp, sizeof(*rp) + eir_len,
7645 HCI_MGMT_OOB_DATA_EVENTS, sk);
7653 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7657 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7658 flags |= MGMT_ADV_FLAG_DISCOV;
7659 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7660 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7661 flags |= MGMT_ADV_FLAG_APPEARANCE;
7662 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7663 flags |= MGMT_ADV_PARAM_DURATION;
7664 flags |= MGMT_ADV_PARAM_TIMEOUT;
7665 flags |= MGMT_ADV_PARAM_INTERVALS;
7666 flags |= MGMT_ADV_PARAM_TX_POWER;
7667 flags |= MGMT_ADV_PARAM_SCAN_RSP;
7669 /* In extended adv TX_POWER returned from Set Adv Param
7670 * will be always valid.
7672 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7673 ext_adv_capable(hdev))
7674 flags |= MGMT_ADV_FLAG_TX_POWER;
7676 if (ext_adv_capable(hdev)) {
7677 flags |= MGMT_ADV_FLAG_SEC_1M;
7678 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7679 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7681 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7682 flags |= MGMT_ADV_FLAG_SEC_2M;
7684 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7685 flags |= MGMT_ADV_FLAG_SEC_CODED;
7691 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7692 void *data, u16 data_len)
7694 struct mgmt_rp_read_adv_features *rp;
7697 struct adv_info *adv_instance;
7698 u32 supported_flags;
7701 bt_dev_dbg(hdev, "sock %p", sk);
7703 if (!lmp_le_capable(hdev))
7704 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7705 MGMT_STATUS_REJECTED);
7707 /* Enabling the experimental LL Privay support disables support for
7710 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7711 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7712 MGMT_STATUS_NOT_SUPPORTED);
7716 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7717 rp = kmalloc(rp_len, GFP_ATOMIC);
7719 hci_dev_unlock(hdev);
7723 supported_flags = get_supported_adv_flags(hdev);
7725 rp->supported_flags = cpu_to_le32(supported_flags);
7726 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7727 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7728 rp->max_instances = hdev->le_num_of_adv_sets;
7729 rp->num_instances = hdev->adv_instance_cnt;
7731 instance = rp->instance;
7732 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7733 *instance = adv_instance->instance;
7737 hci_dev_unlock(hdev);
7739 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7740 MGMT_STATUS_SUCCESS, rp, rp_len);
7747 static u8 calculate_name_len(struct hci_dev *hdev)
7749 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7751 return append_local_name(hdev, buf, 0);
7754 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7757 u8 max_len = HCI_MAX_AD_LENGTH;
7760 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7761 MGMT_ADV_FLAG_LIMITED_DISCOV |
7762 MGMT_ADV_FLAG_MANAGED_FLAGS))
7765 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7768 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7769 max_len -= calculate_name_len(hdev);
7771 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7778 static bool flags_managed(u32 adv_flags)
7780 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7781 MGMT_ADV_FLAG_LIMITED_DISCOV |
7782 MGMT_ADV_FLAG_MANAGED_FLAGS);
7785 static bool tx_power_managed(u32 adv_flags)
7787 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7790 static bool name_managed(u32 adv_flags)
7792 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7795 static bool appearance_managed(u32 adv_flags)
7797 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7800 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7801 u8 len, bool is_adv_data)
7806 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7811 /* Make sure that the data is correctly formatted. */
7812 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7818 if (data[i + 1] == EIR_FLAGS &&
7819 (!is_adv_data || flags_managed(adv_flags)))
7822 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7825 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7828 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7831 if (data[i + 1] == EIR_APPEARANCE &&
7832 appearance_managed(adv_flags))
7835 /* If the current field length would exceed the total data
7836 * length, then it's invalid.
7838 if (i + cur_len >= len)
7845 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7847 u32 supported_flags, phy_flags;
7849 /* The current implementation only supports a subset of the specified
7850 * flags. Also need to check mutual exclusiveness of sec flags.
7852 supported_flags = get_supported_adv_flags(hdev);
7853 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7854 if (adv_flags & ~supported_flags ||
7855 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7861 static bool adv_busy(struct hci_dev *hdev)
7863 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7864 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7865 pending_find(MGMT_OP_SET_LE, hdev) ||
7866 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7867 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7870 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7873 struct mgmt_pending_cmd *cmd;
7874 struct mgmt_cp_add_advertising *cp;
7875 struct mgmt_rp_add_advertising rp;
7876 struct adv_info *adv_instance, *n;
7879 bt_dev_dbg(hdev, "status %u", status);
7883 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7885 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
7887 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7888 if (!adv_instance->pending)
7892 adv_instance->pending = false;
7896 instance = adv_instance->instance;
7898 if (hdev->cur_adv_instance == instance)
7899 cancel_adv_timeout(hdev);
7901 hci_remove_adv_instance(hdev, instance);
7902 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7909 rp.instance = cp->instance;
7912 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7913 mgmt_status(status));
7915 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7916 mgmt_status(status), &rp, sizeof(rp));
7918 mgmt_pending_remove(cmd);
7921 hci_dev_unlock(hdev);
7924 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7925 void *data, u16 data_len)
7927 struct mgmt_cp_add_advertising *cp = data;
7928 struct mgmt_rp_add_advertising rp;
7931 u16 timeout, duration;
7932 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7933 u8 schedule_instance = 0;
7934 struct adv_info *next_instance;
7936 struct mgmt_pending_cmd *cmd;
7937 struct hci_request req;
7939 bt_dev_dbg(hdev, "sock %p", sk);
7941 status = mgmt_le_support(hdev);
7943 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7946 /* Enabling the experimental LL Privay support disables support for
7949 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7951 MGMT_STATUS_NOT_SUPPORTED);
7953 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7954 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7955 MGMT_STATUS_INVALID_PARAMS);
7957 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7958 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7959 MGMT_STATUS_INVALID_PARAMS);
7961 flags = __le32_to_cpu(cp->flags);
7962 timeout = __le16_to_cpu(cp->timeout);
7963 duration = __le16_to_cpu(cp->duration);
7965 if (!requested_adv_flags_are_valid(hdev, flags))
7966 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7967 MGMT_STATUS_INVALID_PARAMS);
7971 if (timeout && !hdev_is_powered(hdev)) {
7972 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7973 MGMT_STATUS_REJECTED);
7977 if (adv_busy(hdev)) {
7978 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7983 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7984 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7985 cp->scan_rsp_len, false)) {
7986 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7987 MGMT_STATUS_INVALID_PARAMS);
7991 err = hci_add_adv_instance(hdev, cp->instance, flags,
7992 cp->adv_data_len, cp->data,
7994 cp->data + cp->adv_data_len,
7996 HCI_ADV_TX_POWER_NO_PREFERENCE,
7997 hdev->le_adv_min_interval,
7998 hdev->le_adv_max_interval);
8000 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8001 MGMT_STATUS_FAILED);
8005 /* Only trigger an advertising added event if a new instance was
8008 if (hdev->adv_instance_cnt > prev_instance_cnt)
8009 mgmt_advertising_added(sk, hdev, cp->instance);
8011 if (hdev->cur_adv_instance == cp->instance) {
8012 /* If the currently advertised instance is being changed then
8013 * cancel the current advertising and schedule the next
8014 * instance. If there is only one instance then the overridden
8015 * advertising data will be visible right away.
8017 cancel_adv_timeout(hdev);
8019 next_instance = hci_get_next_instance(hdev, cp->instance);
8021 schedule_instance = next_instance->instance;
8022 } else if (!hdev->adv_instance_timeout) {
8023 /* Immediately advertise the new instance if no other
8024 * instance is currently being advertised.
8026 schedule_instance = cp->instance;
8029 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8030 * there is no instance to be advertised then we have no HCI
8031 * communication to make. Simply return.
8033 if (!hdev_is_powered(hdev) ||
8034 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8035 !schedule_instance) {
8036 rp.instance = cp->instance;
8037 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8038 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8042 /* We're good to go, update advertising data, parameters, and start
8045 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8052 hci_req_init(&req, hdev);
8054 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
8057 err = hci_req_run(&req, add_advertising_complete);
8060 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8061 MGMT_STATUS_FAILED);
8062 mgmt_pending_remove(cmd);
8066 hci_dev_unlock(hdev);
8071 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
8074 struct mgmt_pending_cmd *cmd;
8075 struct mgmt_cp_add_ext_adv_params *cp;
8076 struct mgmt_rp_add_ext_adv_params rp;
8077 struct adv_info *adv_instance;
8080 BT_DBG("%s", hdev->name);
8084 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
8089 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8093 rp.instance = cp->instance;
8094 rp.tx_power = adv_instance->tx_power;
8096 /* While we're at it, inform userspace of the available space for this
8097 * advertisement, given the flags that will be used.
8099 flags = __le32_to_cpu(cp->flags);
8100 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8101 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8104 /* If this advertisement was previously advertising and we
8105 * failed to update it, we signal that it has been removed and
8106 * delete its structure
8108 if (!adv_instance->pending)
8109 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8111 hci_remove_adv_instance(hdev, cp->instance);
8113 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8114 mgmt_status(status));
8117 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8118 mgmt_status(status), &rp, sizeof(rp));
8123 mgmt_pending_remove(cmd);
8125 hci_dev_unlock(hdev);
8128 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8129 void *data, u16 data_len)
8131 struct mgmt_cp_add_ext_adv_params *cp = data;
8132 struct mgmt_rp_add_ext_adv_params rp;
8133 struct mgmt_pending_cmd *cmd = NULL;
8134 struct adv_info *adv_instance;
8135 struct hci_request req;
8136 u32 flags, min_interval, max_interval;
8137 u16 timeout, duration;
8142 BT_DBG("%s", hdev->name);
8144 status = mgmt_le_support(hdev);
8146 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8149 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8150 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8151 MGMT_STATUS_INVALID_PARAMS);
8153 /* The purpose of breaking add_advertising into two separate MGMT calls
8154 * for params and data is to allow more parameters to be added to this
8155 * structure in the future. For this reason, we verify that we have the
8156 * bare minimum structure we know of when the interface was defined. Any
8157 * extra parameters we don't know about will be ignored in this request.
8159 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8160 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8161 MGMT_STATUS_INVALID_PARAMS);
8163 flags = __le32_to_cpu(cp->flags);
8165 if (!requested_adv_flags_are_valid(hdev, flags))
8166 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8167 MGMT_STATUS_INVALID_PARAMS);
8171 /* In new interface, we require that we are powered to register */
8172 if (!hdev_is_powered(hdev)) {
8173 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8174 MGMT_STATUS_REJECTED);
8178 if (adv_busy(hdev)) {
8179 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8184 /* Parse defined parameters from request, use defaults otherwise */
8185 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8186 __le16_to_cpu(cp->timeout) : 0;
8188 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8189 __le16_to_cpu(cp->duration) :
8190 hdev->def_multi_adv_rotation_duration;
8192 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8193 __le32_to_cpu(cp->min_interval) :
8194 hdev->le_adv_min_interval;
8196 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8197 __le32_to_cpu(cp->max_interval) :
8198 hdev->le_adv_max_interval;
8200 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8202 HCI_ADV_TX_POWER_NO_PREFERENCE;
8204 /* Create advertising instance with no advertising or response data */
8205 err = hci_add_adv_instance(hdev, cp->instance, flags,
8206 0, NULL, 0, NULL, timeout, duration,
8207 tx_power, min_interval, max_interval);
8210 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8211 MGMT_STATUS_FAILED);
8215 /* Submit request for advertising params if ext adv available */
8216 if (ext_adv_capable(hdev)) {
8217 hci_req_init(&req, hdev);
8218 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8220 /* Updating parameters of an active instance will return a
8221 * Command Disallowed error, so we must first disable the
8222 * instance if it is active.
8224 if (!adv_instance->pending)
8225 __hci_req_disable_ext_adv_instance(&req, cp->instance);
8227 __hci_req_setup_ext_adv_instance(&req, cp->instance);
8229 err = hci_req_run(&req, add_ext_adv_params_complete);
8232 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
8233 hdev, data, data_len);
8236 hci_remove_adv_instance(hdev, cp->instance);
8241 rp.instance = cp->instance;
8242 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8243 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8244 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8245 err = mgmt_cmd_complete(sk, hdev->id,
8246 MGMT_OP_ADD_EXT_ADV_PARAMS,
8247 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8251 hci_dev_unlock(hdev);
8256 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8259 struct mgmt_cp_add_ext_adv_data *cp = data;
8260 struct mgmt_rp_add_ext_adv_data rp;
8261 u8 schedule_instance = 0;
8262 struct adv_info *next_instance;
8263 struct adv_info *adv_instance;
8265 struct mgmt_pending_cmd *cmd;
8266 struct hci_request req;
8268 BT_DBG("%s", hdev->name);
8272 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8274 if (!adv_instance) {
8275 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8276 MGMT_STATUS_INVALID_PARAMS);
8280 /* In new interface, we require that we are powered to register */
8281 if (!hdev_is_powered(hdev)) {
8282 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8283 MGMT_STATUS_REJECTED);
8284 goto clear_new_instance;
8287 if (adv_busy(hdev)) {
8288 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8290 goto clear_new_instance;
8293 /* Validate new data */
8294 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8295 cp->adv_data_len, true) ||
8296 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8297 cp->adv_data_len, cp->scan_rsp_len, false)) {
8298 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8299 MGMT_STATUS_INVALID_PARAMS);
8300 goto clear_new_instance;
8303 /* Set the data in the advertising instance */
8304 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8305 cp->data, cp->scan_rsp_len,
8306 cp->data + cp->adv_data_len);
8308 /* We're good to go, update advertising data, parameters, and start
8312 hci_req_init(&req, hdev);
8314 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
8316 if (ext_adv_capable(hdev)) {
8317 __hci_req_update_adv_data(&req, cp->instance);
8318 __hci_req_update_scan_rsp_data(&req, cp->instance);
8319 __hci_req_enable_ext_advertising(&req, cp->instance);
8322 /* If using software rotation, determine next instance to use */
8324 if (hdev->cur_adv_instance == cp->instance) {
8325 /* If the currently advertised instance is being changed
8326 * then cancel the current advertising and schedule the
8327 * next instance. If there is only one instance then the
8328 * overridden advertising data will be visible right
8331 cancel_adv_timeout(hdev);
8333 next_instance = hci_get_next_instance(hdev,
8336 schedule_instance = next_instance->instance;
8337 } else if (!hdev->adv_instance_timeout) {
8338 /* Immediately advertise the new instance if no other
8339 * instance is currently being advertised.
8341 schedule_instance = cp->instance;
8344 /* If the HCI_ADVERTISING flag is set or there is no instance to
8345 * be advertised then we have no HCI communication to make.
8348 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8349 !schedule_instance) {
8350 if (adv_instance->pending) {
8351 mgmt_advertising_added(sk, hdev, cp->instance);
8352 adv_instance->pending = false;
8354 rp.instance = cp->instance;
8355 err = mgmt_cmd_complete(sk, hdev->id,
8356 MGMT_OP_ADD_EXT_ADV_DATA,
8357 MGMT_STATUS_SUCCESS, &rp,
8362 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
8366 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8370 goto clear_new_instance;
8374 err = hci_req_run(&req, add_advertising_complete);
8377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8378 MGMT_STATUS_FAILED);
8379 mgmt_pending_remove(cmd);
8380 goto clear_new_instance;
8383 /* We were successful in updating data, so trigger advertising_added
8384 * event if this is an instance that wasn't previously advertising. If
8385 * a failure occurs in the requests we initiated, we will remove the
8386 * instance again in add_advertising_complete
8388 if (adv_instance->pending)
8389 mgmt_advertising_added(sk, hdev, cp->instance);
8394 hci_remove_adv_instance(hdev, cp->instance);
8397 hci_dev_unlock(hdev);
8402 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
8405 struct mgmt_pending_cmd *cmd;
8406 struct mgmt_cp_remove_advertising *cp;
8407 struct mgmt_rp_remove_advertising rp;
8409 bt_dev_dbg(hdev, "status %u", status);
8413 /* A failure status here only means that we failed to disable
8414 * advertising. Otherwise, the advertising instance has been removed,
8415 * so report success.
8417 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
8422 rp.instance = cp->instance;
8424 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
8426 mgmt_pending_remove(cmd);
8429 hci_dev_unlock(hdev);
8432 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8433 void *data, u16 data_len)
8435 struct mgmt_cp_remove_advertising *cp = data;
8436 struct mgmt_rp_remove_advertising rp;
8437 struct mgmt_pending_cmd *cmd;
8438 struct hci_request req;
8441 bt_dev_dbg(hdev, "sock %p", sk);
8443 /* Enabling the experimental LL Privay support disables support for
8446 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
8448 MGMT_STATUS_NOT_SUPPORTED);
8452 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8453 err = mgmt_cmd_status(sk, hdev->id,
8454 MGMT_OP_REMOVE_ADVERTISING,
8455 MGMT_STATUS_INVALID_PARAMS);
8459 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8460 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8461 pending_find(MGMT_OP_SET_LE, hdev)) {
8462 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8467 if (list_empty(&hdev->adv_instances)) {
8468 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8469 MGMT_STATUS_INVALID_PARAMS);
8473 hci_req_init(&req, hdev);
8475 /* If we use extended advertising, instance is disabled and removed */
8476 if (ext_adv_capable(hdev)) {
8477 __hci_req_disable_ext_adv_instance(&req, cp->instance);
8478 __hci_req_remove_ext_adv_instance(&req, cp->instance);
8481 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8483 if (list_empty(&hdev->adv_instances))
8484 __hci_req_disable_advertising(&req);
8486 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
8487 * flag is set or the device isn't powered then we have no HCI
8488 * communication to make. Simply return.
8490 if (skb_queue_empty(&req.cmd_q) ||
8491 !hdev_is_powered(hdev) ||
8492 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8493 hci_req_purge(&req);
8494 rp.instance = cp->instance;
8495 err = mgmt_cmd_complete(sk, hdev->id,
8496 MGMT_OP_REMOVE_ADVERTISING,
8497 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8501 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8508 err = hci_req_run(&req, remove_advertising_complete);
8510 mgmt_pending_remove(cmd);
8513 hci_dev_unlock(hdev);
8518 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8519 void *data, u16 data_len)
8521 struct mgmt_cp_get_adv_size_info *cp = data;
8522 struct mgmt_rp_get_adv_size_info rp;
8523 u32 flags, supported_flags;
8526 bt_dev_dbg(hdev, "sock %p", sk);
8528 if (!lmp_le_capable(hdev))
8529 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8530 MGMT_STATUS_REJECTED);
8532 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8533 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8534 MGMT_STATUS_INVALID_PARAMS);
8536 flags = __le32_to_cpu(cp->flags);
8538 /* The current implementation only supports a subset of the specified
8541 supported_flags = get_supported_adv_flags(hdev);
8542 if (flags & ~supported_flags)
8543 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8544 MGMT_STATUS_INVALID_PARAMS);
8546 rp.instance = cp->instance;
8547 rp.flags = cp->flags;
8548 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8549 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8551 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8552 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8557 static const struct hci_mgmt_handler mgmt_handlers[] = {
8558 { NULL }, /* 0x0000 (no command) */
8559 { read_version, MGMT_READ_VERSION_SIZE,
8561 HCI_MGMT_UNTRUSTED },
8562 { read_commands, MGMT_READ_COMMANDS_SIZE,
8564 HCI_MGMT_UNTRUSTED },
8565 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8567 HCI_MGMT_UNTRUSTED },
8568 { read_controller_info, MGMT_READ_INFO_SIZE,
8569 HCI_MGMT_UNTRUSTED },
8570 { set_powered, MGMT_SETTING_SIZE },
8571 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8572 { set_connectable, MGMT_SETTING_SIZE },
8573 { set_fast_connectable, MGMT_SETTING_SIZE },
8574 { set_bondable, MGMT_SETTING_SIZE },
8575 { set_link_security, MGMT_SETTING_SIZE },
8576 { set_ssp, MGMT_SETTING_SIZE },
8577 { set_hs, MGMT_SETTING_SIZE },
8578 { set_le, MGMT_SETTING_SIZE },
8579 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8580 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8581 { add_uuid, MGMT_ADD_UUID_SIZE },
8582 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8583 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8585 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8587 { disconnect, MGMT_DISCONNECT_SIZE },
8588 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8589 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8590 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8591 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8592 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8593 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8594 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8595 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8596 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8597 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8598 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8599 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8600 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8602 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8603 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8604 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8605 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8606 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8607 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8608 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8609 { set_advertising, MGMT_SETTING_SIZE },
8610 { set_bredr, MGMT_SETTING_SIZE },
8611 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8612 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8613 { set_secure_conn, MGMT_SETTING_SIZE },
8614 { set_debug_keys, MGMT_SETTING_SIZE },
8615 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8616 { load_irks, MGMT_LOAD_IRKS_SIZE,
8618 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8619 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8620 { add_device, MGMT_ADD_DEVICE_SIZE },
8621 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8622 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8624 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8626 HCI_MGMT_UNTRUSTED },
8627 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8628 HCI_MGMT_UNCONFIGURED |
8629 HCI_MGMT_UNTRUSTED },
8630 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8631 HCI_MGMT_UNCONFIGURED },
8632 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8633 HCI_MGMT_UNCONFIGURED },
8634 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8636 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8637 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8639 HCI_MGMT_UNTRUSTED },
8640 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8641 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8643 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8644 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8645 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8646 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8647 HCI_MGMT_UNTRUSTED },
8648 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8649 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8650 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8651 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8653 { set_wideband_speech, MGMT_SETTING_SIZE },
8654 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
8655 HCI_MGMT_UNTRUSTED },
8656 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
8657 HCI_MGMT_UNTRUSTED |
8658 HCI_MGMT_HDEV_OPTIONAL },
8659 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
8661 HCI_MGMT_HDEV_OPTIONAL },
8662 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8663 HCI_MGMT_UNTRUSTED },
8664 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8666 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8667 HCI_MGMT_UNTRUSTED },
8668 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8670 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
8671 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
8672 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8673 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8675 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
8676 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8678 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
8680 { add_adv_patterns_monitor_rssi,
8681 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8686 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
8687 { NULL }, /* 0x0000 (no command) */
8688 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
8689 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
8691 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
8696 void mgmt_index_added(struct hci_dev *hdev)
8698 struct mgmt_ev_ext_index ev;
8700 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8703 switch (hdev->dev_type) {
8705 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8706 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8707 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8710 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8711 HCI_MGMT_INDEX_EVENTS);
8724 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8725 HCI_MGMT_EXT_INDEX_EVENTS);
8728 void mgmt_index_removed(struct hci_dev *hdev)
8730 struct mgmt_ev_ext_index ev;
8731 u8 status = MGMT_STATUS_INVALID_INDEX;
8733 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8736 switch (hdev->dev_type) {
8738 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8740 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8741 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8742 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8745 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8746 HCI_MGMT_INDEX_EVENTS);
8759 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8760 HCI_MGMT_EXT_INDEX_EVENTS);
8763 /* This function requires the caller holds hdev->lock */
8764 static void restart_le_actions(struct hci_dev *hdev)
8766 struct hci_conn_params *p;
8768 list_for_each_entry(p, &hdev->le_conn_params, list) {
8769 /* Needed for AUTO_OFF case where might not "really"
8770 * have been powered off.
8772 list_del_init(&p->action);
8774 switch (p->auto_connect) {
8775 case HCI_AUTO_CONN_DIRECT:
8776 case HCI_AUTO_CONN_ALWAYS:
8777 list_add(&p->action, &hdev->pend_le_conns);
8779 case HCI_AUTO_CONN_REPORT:
8780 list_add(&p->action, &hdev->pend_le_reports);
8788 void mgmt_power_on(struct hci_dev *hdev, int err)
8790 struct cmd_lookup match = { NULL, hdev };
8792 bt_dev_dbg(hdev, "err %d", err);
8797 restart_le_actions(hdev);
8798 hci_update_background_scan(hdev);
8801 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8803 new_settings(hdev, match.sk);
8808 hci_dev_unlock(hdev);
8811 void __mgmt_power_off(struct hci_dev *hdev)
8813 struct cmd_lookup match = { NULL, hdev };
8814 u8 status, zero_cod[] = { 0, 0, 0 };
8816 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8818 /* If the power off is because of hdev unregistration let
8819 * use the appropriate INVALID_INDEX status. Otherwise use
8820 * NOT_POWERED. We cover both scenarios here since later in
8821 * mgmt_index_removed() any hci_conn callbacks will have already
8822 * been triggered, potentially causing misleading DISCONNECTED
8825 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8826 status = MGMT_STATUS_INVALID_INDEX;
8828 status = MGMT_STATUS_NOT_POWERED;
8830 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8832 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8833 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8834 zero_cod, sizeof(zero_cod),
8835 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8836 ext_info_changed(hdev, NULL);
8839 new_settings(hdev, match.sk);
8845 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8847 struct mgmt_pending_cmd *cmd;
8850 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8854 if (err == -ERFKILL)
8855 status = MGMT_STATUS_RFKILLED;
8857 status = MGMT_STATUS_FAILED;
8859 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8861 mgmt_pending_remove(cmd);
8864 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8867 struct mgmt_ev_new_link_key ev;
8869 memset(&ev, 0, sizeof(ev));
8871 ev.store_hint = persistent;
8872 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8873 ev.key.addr.type = BDADDR_BREDR;
8874 ev.key.type = key->type;
8875 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8876 ev.key.pin_len = key->pin_len;
8878 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8881 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8883 switch (ltk->type) {
8885 case SMP_LTK_RESPONDER:
8886 if (ltk->authenticated)
8887 return MGMT_LTK_AUTHENTICATED;
8888 return MGMT_LTK_UNAUTHENTICATED;
8890 if (ltk->authenticated)
8891 return MGMT_LTK_P256_AUTH;
8892 return MGMT_LTK_P256_UNAUTH;
8893 case SMP_LTK_P256_DEBUG:
8894 return MGMT_LTK_P256_DEBUG;
8897 return MGMT_LTK_UNAUTHENTICATED;
8900 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8902 struct mgmt_ev_new_long_term_key ev;
8904 memset(&ev, 0, sizeof(ev));
8906 /* Devices using resolvable or non-resolvable random addresses
8907 * without providing an identity resolving key don't require
8908 * to store long term keys. Their addresses will change the
8911 * Only when a remote device provides an identity address
8912 * make sure the long term key is stored. If the remote
8913 * identity is known, the long term keys are internally
8914 * mapped to the identity address. So allow static random
8915 * and public addresses here.
8917 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8918 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8919 ev.store_hint = 0x00;
8921 ev.store_hint = persistent;
8923 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8924 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8925 ev.key.type = mgmt_ltk_type(key);
8926 ev.key.enc_size = key->enc_size;
8927 ev.key.ediv = key->ediv;
8928 ev.key.rand = key->rand;
8930 if (key->type == SMP_LTK)
8931 ev.key.initiator = 1;
8933 /* Make sure we copy only the significant bytes based on the
8934 * encryption key size, and set the rest of the value to zeroes.
8936 memcpy(ev.key.val, key->val, key->enc_size);
8937 memset(ev.key.val + key->enc_size, 0,
8938 sizeof(ev.key.val) - key->enc_size);
8940 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8943 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8945 struct mgmt_ev_new_irk ev;
8947 memset(&ev, 0, sizeof(ev));
8949 ev.store_hint = persistent;
8951 bacpy(&ev.rpa, &irk->rpa);
8952 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8953 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8954 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8956 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8959 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8962 struct mgmt_ev_new_csrk ev;
8964 memset(&ev, 0, sizeof(ev));
8966 /* Devices using resolvable or non-resolvable random addresses
8967 * without providing an identity resolving key don't require
8968 * to store signature resolving keys. Their addresses will change
8969 * the next time around.
8971 * Only when a remote device provides an identity address
8972 * make sure the signature resolving key is stored. So allow
8973 * static random and public addresses here.
8975 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8976 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8977 ev.store_hint = 0x00;
8979 ev.store_hint = persistent;
8981 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8982 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8983 ev.key.type = csrk->type;
8984 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8986 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8989 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8990 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8991 u16 max_interval, u16 latency, u16 timeout)
8993 struct mgmt_ev_new_conn_param ev;
8995 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8998 memset(&ev, 0, sizeof(ev));
8999 bacpy(&ev.addr.bdaddr, bdaddr);
9000 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9001 ev.store_hint = store_hint;
9002 ev.min_interval = cpu_to_le16(min_interval);
9003 ev.max_interval = cpu_to_le16(max_interval);
9004 ev.latency = cpu_to_le16(latency);
9005 ev.timeout = cpu_to_le16(timeout);
9007 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9010 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9011 u8 *name, u8 name_len)
9014 struct mgmt_ev_device_connected *ev = (void *) buf;
9018 bacpy(&ev->addr.bdaddr, &conn->dst);
9019 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9022 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9024 ev->flags = __cpu_to_le32(flags);
9026 /* We must ensure that the EIR Data fields are ordered and
9027 * unique. Keep it simple for now and avoid the problem by not
9028 * adding any BR/EDR data to the LE adv.
9030 if (conn->le_adv_data_len > 0) {
9031 memcpy(&ev->eir[eir_len],
9032 conn->le_adv_data, conn->le_adv_data_len);
9033 eir_len = conn->le_adv_data_len;
9036 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9039 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
9040 eir_len = eir_append_data(ev->eir, eir_len,
9042 conn->dev_class, 3);
9045 ev->eir_len = cpu_to_le16(eir_len);
9047 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
9048 sizeof(*ev) + eir_len, NULL);
9051 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9053 struct sock **sk = data;
9055 cmd->cmd_complete(cmd, 0);
9060 mgmt_pending_remove(cmd);
9063 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9065 struct hci_dev *hdev = data;
9066 struct mgmt_cp_unpair_device *cp = cmd->param;
9068 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9070 cmd->cmd_complete(cmd, 0);
9071 mgmt_pending_remove(cmd);
9074 bool mgmt_powering_down(struct hci_dev *hdev)
9076 struct mgmt_pending_cmd *cmd;
9077 struct mgmt_mode *cp;
9079 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9090 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9091 u8 link_type, u8 addr_type, u8 reason,
9092 bool mgmt_connected)
9094 struct mgmt_ev_device_disconnected ev;
9095 struct sock *sk = NULL;
9097 /* The connection is still in hci_conn_hash so test for 1
9098 * instead of 0 to know if this is the last one.
9100 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9101 cancel_delayed_work(&hdev->power_off);
9102 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9105 if (!mgmt_connected)
9108 if (link_type != ACL_LINK && link_type != LE_LINK)
9111 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9113 bacpy(&ev.addr.bdaddr, bdaddr);
9114 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9117 /* Report disconnects due to suspend */
9118 if (hdev->suspended)
9119 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9121 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9126 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9130 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9131 u8 link_type, u8 addr_type, u8 status)
9133 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9134 struct mgmt_cp_disconnect *cp;
9135 struct mgmt_pending_cmd *cmd;
9137 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9140 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9146 if (bacmp(bdaddr, &cp->addr.bdaddr))
9149 if (cp->addr.type != bdaddr_type)
9152 cmd->cmd_complete(cmd, mgmt_status(status));
9153 mgmt_pending_remove(cmd);
9156 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9157 u8 addr_type, u8 status)
9159 struct mgmt_ev_connect_failed ev;
9161 /* The connection is still in hci_conn_hash so test for 1
9162 * instead of 0 to know if this is the last one.
9164 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9165 cancel_delayed_work(&hdev->power_off);
9166 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9169 bacpy(&ev.addr.bdaddr, bdaddr);
9170 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9171 ev.status = mgmt_status(status);
9173 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9176 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9178 struct mgmt_ev_pin_code_request ev;
9180 bacpy(&ev.addr.bdaddr, bdaddr);
9181 ev.addr.type = BDADDR_BREDR;
9184 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9187 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9190 struct mgmt_pending_cmd *cmd;
9192 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9196 cmd->cmd_complete(cmd, mgmt_status(status));
9197 mgmt_pending_remove(cmd);
9200 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9203 struct mgmt_pending_cmd *cmd;
9205 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9209 cmd->cmd_complete(cmd, mgmt_status(status));
9210 mgmt_pending_remove(cmd);
9213 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9214 u8 link_type, u8 addr_type, u32 value,
9217 struct mgmt_ev_user_confirm_request ev;
9219 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9221 bacpy(&ev.addr.bdaddr, bdaddr);
9222 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9223 ev.confirm_hint = confirm_hint;
9224 ev.value = cpu_to_le32(value);
9226 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9230 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9231 u8 link_type, u8 addr_type)
9233 struct mgmt_ev_user_passkey_request ev;
9235 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9237 bacpy(&ev.addr.bdaddr, bdaddr);
9238 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9240 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9244 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9245 u8 link_type, u8 addr_type, u8 status,
9248 struct mgmt_pending_cmd *cmd;
9250 cmd = pending_find(opcode, hdev);
9254 cmd->cmd_complete(cmd, mgmt_status(status));
9255 mgmt_pending_remove(cmd);
9260 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9261 u8 link_type, u8 addr_type, u8 status)
9263 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9264 status, MGMT_OP_USER_CONFIRM_REPLY);
9267 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9268 u8 link_type, u8 addr_type, u8 status)
9270 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9272 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9275 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9276 u8 link_type, u8 addr_type, u8 status)
9278 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9279 status, MGMT_OP_USER_PASSKEY_REPLY);
9282 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9283 u8 link_type, u8 addr_type, u8 status)
9285 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9287 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9290 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9291 u8 link_type, u8 addr_type, u32 passkey,
9294 struct mgmt_ev_passkey_notify ev;
9296 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9298 bacpy(&ev.addr.bdaddr, bdaddr);
9299 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9300 ev.passkey = __cpu_to_le32(passkey);
9301 ev.entered = entered;
9303 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9306 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9308 struct mgmt_ev_auth_failed ev;
9309 struct mgmt_pending_cmd *cmd;
9310 u8 status = mgmt_status(hci_status);
9312 bacpy(&ev.addr.bdaddr, &conn->dst);
9313 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9316 cmd = find_pairing(conn);
9318 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9319 cmd ? cmd->sk : NULL);
9322 cmd->cmd_complete(cmd, status);
9323 mgmt_pending_remove(cmd);
9327 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9329 struct cmd_lookup match = { NULL, hdev };
9333 u8 mgmt_err = mgmt_status(status);
9334 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9335 cmd_status_rsp, &mgmt_err);
9339 if (test_bit(HCI_AUTH, &hdev->flags))
9340 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9342 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9344 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9348 new_settings(hdev, match.sk);
9354 static void clear_eir(struct hci_request *req)
9356 struct hci_dev *hdev = req->hdev;
9357 struct hci_cp_write_eir cp;
9359 if (!lmp_ext_inq_capable(hdev))
9362 memset(hdev->eir, 0, sizeof(hdev->eir));
9364 memset(&cp, 0, sizeof(cp));
9366 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9369 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9371 struct cmd_lookup match = { NULL, hdev };
9372 struct hci_request req;
9373 bool changed = false;
9376 u8 mgmt_err = mgmt_status(status);
9378 if (enable && hci_dev_test_and_clear_flag(hdev,
9380 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9381 new_settings(hdev, NULL);
9384 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9390 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9392 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9394 changed = hci_dev_test_and_clear_flag(hdev,
9397 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9400 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9403 new_settings(hdev, match.sk);
9408 hci_req_init(&req, hdev);
9410 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9411 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9412 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9413 sizeof(enable), &enable);
9414 __hci_req_update_eir(&req);
9419 hci_req_run(&req, NULL);
9422 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9424 struct cmd_lookup *match = data;
9426 if (match->sk == NULL) {
9427 match->sk = cmd->sk;
9428 sock_hold(match->sk);
9432 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9435 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9437 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9438 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9439 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9442 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9443 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9444 ext_info_changed(hdev, NULL);
9451 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9453 struct mgmt_cp_set_local_name ev;
9454 struct mgmt_pending_cmd *cmd;
9459 memset(&ev, 0, sizeof(ev));
9460 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9461 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9463 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9465 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9467 /* If this is a HCI command related to powering on the
9468 * HCI dev don't send any mgmt signals.
9470 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9474 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9475 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9476 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9479 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9483 for (i = 0; i < uuid_count; i++) {
9484 if (!memcmp(uuid, uuids[i], 16))
9491 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9495 while (parsed < eir_len) {
9496 u8 field_len = eir[0];
9503 if (eir_len - parsed < field_len + 1)
9507 case EIR_UUID16_ALL:
9508 case EIR_UUID16_SOME:
9509 for (i = 0; i + 3 <= field_len; i += 2) {
9510 memcpy(uuid, bluetooth_base_uuid, 16);
9511 uuid[13] = eir[i + 3];
9512 uuid[12] = eir[i + 2];
9513 if (has_uuid(uuid, uuid_count, uuids))
9517 case EIR_UUID32_ALL:
9518 case EIR_UUID32_SOME:
9519 for (i = 0; i + 5 <= field_len; i += 4) {
9520 memcpy(uuid, bluetooth_base_uuid, 16);
9521 uuid[15] = eir[i + 5];
9522 uuid[14] = eir[i + 4];
9523 uuid[13] = eir[i + 3];
9524 uuid[12] = eir[i + 2];
9525 if (has_uuid(uuid, uuid_count, uuids))
9529 case EIR_UUID128_ALL:
9530 case EIR_UUID128_SOME:
9531 for (i = 0; i + 17 <= field_len; i += 16) {
9532 memcpy(uuid, eir + i + 2, 16);
9533 if (has_uuid(uuid, uuid_count, uuids))
9539 parsed += field_len + 1;
9540 eir += field_len + 1;
9546 static void restart_le_scan(struct hci_dev *hdev)
9548 /* If controller is not scanning we are done. */
9549 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9552 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9553 hdev->discovery.scan_start +
9554 hdev->discovery.scan_duration))
9557 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9558 DISCOV_LE_RESTART_DELAY);
9561 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9562 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9564 /* If a RSSI threshold has been specified, and
9565 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9566 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9567 * is set, let it through for further processing, as we might need to
9570 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9571 * the results are also dropped.
9573 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9574 (rssi == HCI_RSSI_INVALID ||
9575 (rssi < hdev->discovery.rssi &&
9576 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9579 if (hdev->discovery.uuid_count != 0) {
9580 /* If a list of UUIDs is provided in filter, results with no
9581 * matching UUID should be dropped.
9583 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9584 hdev->discovery.uuids) &&
9585 !eir_has_uuids(scan_rsp, scan_rsp_len,
9586 hdev->discovery.uuid_count,
9587 hdev->discovery.uuids))
9591 /* If duplicate filtering does not report RSSI changes, then restart
9592 * scanning to ensure updated result with updated RSSI values.
9594 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9595 restart_le_scan(hdev);
9597 /* Validate RSSI value against the RSSI threshold once more. */
9598 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9599 rssi < hdev->discovery.rssi)
9606 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9607 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9608 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9611 struct mgmt_ev_device_found *ev = (void *)buf;
9614 /* Don't send events for a non-kernel initiated discovery. With
9615 * LE one exception is if we have pend_le_reports > 0 in which
9616 * case we're doing passive scanning and want these events.
9618 if (!hci_discovery_active(hdev)) {
9619 if (link_type == ACL_LINK)
9621 if (link_type == LE_LINK &&
9622 list_empty(&hdev->pend_le_reports) &&
9623 !hci_is_adv_monitoring(hdev)) {
9628 if (hdev->discovery.result_filtering) {
9629 /* We are using service discovery */
9630 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9635 if (hdev->discovery.limited) {
9636 /* Check for limited discoverable bit */
9638 if (!(dev_class[1] & 0x20))
9641 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9642 if (!flags || !(flags[0] & LE_AD_LIMITED))
9647 /* Make sure that the buffer is big enough. The 5 extra bytes
9648 * are for the potential CoD field.
9650 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9653 memset(buf, 0, sizeof(buf));
9655 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9656 * RSSI value was reported as 0 when not available. This behavior
9657 * is kept when using device discovery. This is required for full
9658 * backwards compatibility with the API.
9660 * However when using service discovery, the value 127 will be
9661 * returned when the RSSI is not available.
9663 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9664 link_type == ACL_LINK)
9667 bacpy(&ev->addr.bdaddr, bdaddr);
9668 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9670 ev->flags = cpu_to_le32(flags);
9673 /* Copy EIR or advertising data into event */
9674 memcpy(ev->eir, eir, eir_len);
9676 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9678 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9681 if (scan_rsp_len > 0)
9682 /* Append scan response data to event */
9683 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9685 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9686 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9688 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9691 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9692 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9694 struct mgmt_ev_device_found *ev;
9695 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9698 ev = (struct mgmt_ev_device_found *) buf;
9700 memset(buf, 0, sizeof(buf));
9702 bacpy(&ev->addr.bdaddr, bdaddr);
9703 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9706 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9709 ev->eir_len = cpu_to_le16(eir_len);
9711 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9714 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9716 struct mgmt_ev_discovering ev;
9718 bt_dev_dbg(hdev, "discovering %u", discovering);
9720 memset(&ev, 0, sizeof(ev));
9721 ev.type = hdev->discovery.type;
9722 ev.discovering = discovering;
9724 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9727 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9729 struct mgmt_ev_controller_suspend ev;
9731 ev.suspend_state = state;
9732 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9735 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9738 struct mgmt_ev_controller_resume ev;
9740 ev.wake_reason = reason;
9742 bacpy(&ev.addr.bdaddr, bdaddr);
9743 ev.addr.type = addr_type;
9745 memset(&ev.addr, 0, sizeof(ev.addr));
9748 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9751 static struct hci_mgmt_chan chan = {
9752 .channel = HCI_CHANNEL_CONTROL,
9753 .handler_count = ARRAY_SIZE(mgmt_handlers),
9754 .handlers = mgmt_handlers,
9756 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
9757 .tizen_handlers = tizen_mgmt_handlers,
9759 .hdev_init = mgmt_init_hdev,
9764 return hci_mgmt_chan_register(&chan);
9767 void mgmt_exit(void)
9769 hci_mgmt_chan_unregister(&chan);