2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
39 #include "hci_request.h"
41 #include "mgmt_util.h"
42 #include "mgmt_config.h"
45 #define MGMT_VERSION 1
46 #define MGMT_REVISION 21
48 static const u16 mgmt_commands[] = {
49 MGMT_OP_READ_INDEX_LIST,
52 MGMT_OP_SET_DISCOVERABLE,
53 MGMT_OP_SET_CONNECTABLE,
54 MGMT_OP_SET_FAST_CONNECTABLE,
56 MGMT_OP_SET_LINK_SECURITY,
60 MGMT_OP_SET_DEV_CLASS,
61 MGMT_OP_SET_LOCAL_NAME,
64 MGMT_OP_LOAD_LINK_KEYS,
65 MGMT_OP_LOAD_LONG_TERM_KEYS,
67 MGMT_OP_GET_CONNECTIONS,
68 MGMT_OP_PIN_CODE_REPLY,
69 MGMT_OP_PIN_CODE_NEG_REPLY,
70 MGMT_OP_SET_IO_CAPABILITY,
72 MGMT_OP_CANCEL_PAIR_DEVICE,
73 MGMT_OP_UNPAIR_DEVICE,
74 MGMT_OP_USER_CONFIRM_REPLY,
75 MGMT_OP_USER_CONFIRM_NEG_REPLY,
76 MGMT_OP_USER_PASSKEY_REPLY,
77 MGMT_OP_USER_PASSKEY_NEG_REPLY,
78 MGMT_OP_READ_LOCAL_OOB_DATA,
79 MGMT_OP_ADD_REMOTE_OOB_DATA,
80 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
81 MGMT_OP_START_DISCOVERY,
82 MGMT_OP_STOP_DISCOVERY,
85 MGMT_OP_UNBLOCK_DEVICE,
86 MGMT_OP_SET_DEVICE_ID,
87 MGMT_OP_SET_ADVERTISING,
89 MGMT_OP_SET_STATIC_ADDRESS,
90 MGMT_OP_SET_SCAN_PARAMS,
91 MGMT_OP_SET_SECURE_CONN,
92 MGMT_OP_SET_DEBUG_KEYS,
95 MGMT_OP_GET_CONN_INFO,
96 MGMT_OP_GET_CLOCK_INFO,
98 MGMT_OP_REMOVE_DEVICE,
99 MGMT_OP_LOAD_CONN_PARAM,
100 MGMT_OP_READ_UNCONF_INDEX_LIST,
101 MGMT_OP_READ_CONFIG_INFO,
102 MGMT_OP_SET_EXTERNAL_CONFIG,
103 MGMT_OP_SET_PUBLIC_ADDRESS,
104 MGMT_OP_START_SERVICE_DISCOVERY,
105 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
106 MGMT_OP_READ_EXT_INDEX_LIST,
107 MGMT_OP_READ_ADV_FEATURES,
108 MGMT_OP_ADD_ADVERTISING,
109 MGMT_OP_REMOVE_ADVERTISING,
110 MGMT_OP_GET_ADV_SIZE_INFO,
111 MGMT_OP_START_LIMITED_DISCOVERY,
112 MGMT_OP_READ_EXT_INFO,
113 MGMT_OP_SET_APPEARANCE,
114 MGMT_OP_GET_PHY_CONFIGURATION,
115 MGMT_OP_SET_PHY_CONFIGURATION,
116 MGMT_OP_SET_BLOCKED_KEYS,
117 MGMT_OP_SET_WIDEBAND_SPEECH,
118 MGMT_OP_READ_CONTROLLER_CAP,
119 MGMT_OP_READ_EXP_FEATURES_INFO,
120 MGMT_OP_SET_EXP_FEATURE,
121 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
122 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
123 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
124 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
125 MGMT_OP_GET_DEVICE_FLAGS,
126 MGMT_OP_SET_DEVICE_FLAGS,
127 MGMT_OP_READ_ADV_MONITOR_FEATURES,
128 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
129 MGMT_OP_REMOVE_ADV_MONITOR,
130 MGMT_OP_ADD_EXT_ADV_PARAMS,
131 MGMT_OP_ADD_EXT_ADV_DATA,
132 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
135 static const u16 mgmt_events[] = {
136 MGMT_EV_CONTROLLER_ERROR,
138 MGMT_EV_INDEX_REMOVED,
139 MGMT_EV_NEW_SETTINGS,
140 MGMT_EV_CLASS_OF_DEV_CHANGED,
141 MGMT_EV_LOCAL_NAME_CHANGED,
142 MGMT_EV_NEW_LINK_KEY,
143 MGMT_EV_NEW_LONG_TERM_KEY,
144 MGMT_EV_DEVICE_CONNECTED,
145 MGMT_EV_DEVICE_DISCONNECTED,
146 MGMT_EV_CONNECT_FAILED,
147 MGMT_EV_PIN_CODE_REQUEST,
148 MGMT_EV_USER_CONFIRM_REQUEST,
149 MGMT_EV_USER_PASSKEY_REQUEST,
151 MGMT_EV_DEVICE_FOUND,
153 MGMT_EV_DEVICE_BLOCKED,
154 MGMT_EV_DEVICE_UNBLOCKED,
155 MGMT_EV_DEVICE_UNPAIRED,
156 MGMT_EV_PASSKEY_NOTIFY,
159 MGMT_EV_DEVICE_ADDED,
160 MGMT_EV_DEVICE_REMOVED,
161 MGMT_EV_NEW_CONN_PARAM,
162 MGMT_EV_UNCONF_INDEX_ADDED,
163 MGMT_EV_UNCONF_INDEX_REMOVED,
164 MGMT_EV_NEW_CONFIG_OPTIONS,
165 MGMT_EV_EXT_INDEX_ADDED,
166 MGMT_EV_EXT_INDEX_REMOVED,
167 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
168 MGMT_EV_ADVERTISING_ADDED,
169 MGMT_EV_ADVERTISING_REMOVED,
170 MGMT_EV_EXT_INFO_CHANGED,
171 MGMT_EV_PHY_CONFIGURATION_CHANGED,
172 MGMT_EV_EXP_FEATURE_CHANGED,
173 MGMT_EV_DEVICE_FLAGS_CHANGED,
174 MGMT_EV_ADV_MONITOR_ADDED,
175 MGMT_EV_ADV_MONITOR_REMOVED,
176 MGMT_EV_CONTROLLER_SUSPEND,
177 MGMT_EV_CONTROLLER_RESUME,
180 static const u16 mgmt_untrusted_commands[] = {
181 MGMT_OP_READ_INDEX_LIST,
183 MGMT_OP_READ_UNCONF_INDEX_LIST,
184 MGMT_OP_READ_CONFIG_INFO,
185 MGMT_OP_READ_EXT_INDEX_LIST,
186 MGMT_OP_READ_EXT_INFO,
187 MGMT_OP_READ_CONTROLLER_CAP,
188 MGMT_OP_READ_EXP_FEATURES_INFO,
189 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
190 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
193 static const u16 mgmt_untrusted_events[] = {
195 MGMT_EV_INDEX_REMOVED,
196 MGMT_EV_NEW_SETTINGS,
197 MGMT_EV_CLASS_OF_DEV_CHANGED,
198 MGMT_EV_LOCAL_NAME_CHANGED,
199 MGMT_EV_UNCONF_INDEX_ADDED,
200 MGMT_EV_UNCONF_INDEX_REMOVED,
201 MGMT_EV_NEW_CONFIG_OPTIONS,
202 MGMT_EV_EXT_INDEX_ADDED,
203 MGMT_EV_EXT_INDEX_REMOVED,
204 MGMT_EV_EXT_INFO_CHANGED,
205 MGMT_EV_EXP_FEATURE_CHANGED,
208 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
210 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
211 "\x00\x00\x00\x00\x00\x00\x00\x00"
213 /* HCI to MGMT error code conversion table */
214 static const u8 mgmt_status_table[] = {
216 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
217 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
218 MGMT_STATUS_FAILED, /* Hardware Failure */
219 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
220 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
221 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
222 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
223 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
224 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
225 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
226 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
227 MGMT_STATUS_BUSY, /* Command Disallowed */
228 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
229 MGMT_STATUS_REJECTED, /* Rejected Security */
230 MGMT_STATUS_REJECTED, /* Rejected Personal */
231 MGMT_STATUS_TIMEOUT, /* Host Timeout */
232 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
233 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
234 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
235 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
236 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
237 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
238 MGMT_STATUS_BUSY, /* Repeated Attempts */
239 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
240 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
241 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
242 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
243 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
244 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
245 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
246 MGMT_STATUS_FAILED, /* Unspecified Error */
247 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
248 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
249 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
250 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
251 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
252 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
253 MGMT_STATUS_FAILED, /* Unit Link Key Used */
254 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
255 MGMT_STATUS_TIMEOUT, /* Instant Passed */
256 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
257 MGMT_STATUS_FAILED, /* Transaction Collision */
258 MGMT_STATUS_FAILED, /* Reserved for future use */
259 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
260 MGMT_STATUS_REJECTED, /* QoS Rejected */
261 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
262 MGMT_STATUS_REJECTED, /* Insufficient Security */
263 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
264 MGMT_STATUS_FAILED, /* Reserved for future use */
265 MGMT_STATUS_BUSY, /* Role Switch Pending */
266 MGMT_STATUS_FAILED, /* Reserved for future use */
267 MGMT_STATUS_FAILED, /* Slot Violation */
268 MGMT_STATUS_FAILED, /* Role Switch Failed */
269 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
270 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
271 MGMT_STATUS_BUSY, /* Host Busy Pairing */
272 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
273 MGMT_STATUS_BUSY, /* Controller Busy */
274 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
275 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
276 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
277 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
278 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
281 static u8 mgmt_status(u8 hci_status)
283 if (hci_status < ARRAY_SIZE(mgmt_status_table))
284 return mgmt_status_table[hci_status];
286 return MGMT_STATUS_FAILED;
289 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
292 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
296 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
297 u16 len, int flag, struct sock *skip_sk)
299 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
303 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
304 struct sock *skip_sk)
306 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
307 HCI_SOCK_TRUSTED, skip_sk);
310 static u8 le_addr_type(u8 mgmt_addr_type)
312 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
313 return ADDR_LE_DEV_PUBLIC;
315 return ADDR_LE_DEV_RANDOM;
318 void mgmt_fill_version_info(void *ver)
320 struct mgmt_rp_read_version *rp = ver;
322 rp->version = MGMT_VERSION;
323 rp->revision = cpu_to_le16(MGMT_REVISION);
326 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
329 struct mgmt_rp_read_version rp;
331 bt_dev_dbg(hdev, "sock %p", sk);
333 mgmt_fill_version_info(&rp);
335 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
339 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
342 struct mgmt_rp_read_commands *rp;
343 u16 num_commands, num_events;
347 bt_dev_dbg(hdev, "sock %p", sk);
349 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
350 num_commands = ARRAY_SIZE(mgmt_commands);
351 num_events = ARRAY_SIZE(mgmt_events);
353 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
354 num_events = ARRAY_SIZE(mgmt_untrusted_events);
357 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
359 rp = kmalloc(rp_size, GFP_KERNEL);
363 rp->num_commands = cpu_to_le16(num_commands);
364 rp->num_events = cpu_to_le16(num_events);
366 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
367 __le16 *opcode = rp->opcodes;
369 for (i = 0; i < num_commands; i++, opcode++)
370 put_unaligned_le16(mgmt_commands[i], opcode);
372 for (i = 0; i < num_events; i++, opcode++)
373 put_unaligned_le16(mgmt_events[i], opcode);
375 __le16 *opcode = rp->opcodes;
377 for (i = 0; i < num_commands; i++, opcode++)
378 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
380 for (i = 0; i < num_events; i++, opcode++)
381 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
384 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
391 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
394 struct mgmt_rp_read_index_list *rp;
400 bt_dev_dbg(hdev, "sock %p", sk);
402 read_lock(&hci_dev_list_lock);
405 list_for_each_entry(d, &hci_dev_list, list) {
406 if (d->dev_type == HCI_PRIMARY &&
407 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
411 rp_len = sizeof(*rp) + (2 * count);
412 rp = kmalloc(rp_len, GFP_ATOMIC);
414 read_unlock(&hci_dev_list_lock);
419 list_for_each_entry(d, &hci_dev_list, list) {
420 if (hci_dev_test_flag(d, HCI_SETUP) ||
421 hci_dev_test_flag(d, HCI_CONFIG) ||
422 hci_dev_test_flag(d, HCI_USER_CHANNEL))
425 /* Devices marked as raw-only are neither configured
426 * nor unconfigured controllers.
428 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
431 if (d->dev_type == HCI_PRIMARY &&
432 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
433 rp->index[count++] = cpu_to_le16(d->id);
434 bt_dev_dbg(hdev, "Added hci%u", d->id);
438 rp->num_controllers = cpu_to_le16(count);
439 rp_len = sizeof(*rp) + (2 * count);
441 read_unlock(&hci_dev_list_lock);
443 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
451 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
452 void *data, u16 data_len)
454 struct mgmt_rp_read_unconf_index_list *rp;
460 bt_dev_dbg(hdev, "sock %p", sk);
462 read_lock(&hci_dev_list_lock);
465 list_for_each_entry(d, &hci_dev_list, list) {
466 if (d->dev_type == HCI_PRIMARY &&
467 hci_dev_test_flag(d, HCI_UNCONFIGURED))
471 rp_len = sizeof(*rp) + (2 * count);
472 rp = kmalloc(rp_len, GFP_ATOMIC);
474 read_unlock(&hci_dev_list_lock);
479 list_for_each_entry(d, &hci_dev_list, list) {
480 if (hci_dev_test_flag(d, HCI_SETUP) ||
481 hci_dev_test_flag(d, HCI_CONFIG) ||
482 hci_dev_test_flag(d, HCI_USER_CHANNEL))
485 /* Devices marked as raw-only are neither configured
486 * nor unconfigured controllers.
488 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
491 if (d->dev_type == HCI_PRIMARY &&
492 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
493 rp->index[count++] = cpu_to_le16(d->id);
494 bt_dev_dbg(hdev, "Added hci%u", d->id);
498 rp->num_controllers = cpu_to_le16(count);
499 rp_len = sizeof(*rp) + (2 * count);
501 read_unlock(&hci_dev_list_lock);
503 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
504 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
511 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
512 void *data, u16 data_len)
514 struct mgmt_rp_read_ext_index_list *rp;
519 bt_dev_dbg(hdev, "sock %p", sk);
521 read_lock(&hci_dev_list_lock);
524 list_for_each_entry(d, &hci_dev_list, list) {
525 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
529 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
531 read_unlock(&hci_dev_list_lock);
536 list_for_each_entry(d, &hci_dev_list, list) {
537 if (hci_dev_test_flag(d, HCI_SETUP) ||
538 hci_dev_test_flag(d, HCI_CONFIG) ||
539 hci_dev_test_flag(d, HCI_USER_CHANNEL))
542 /* Devices marked as raw-only are neither configured
543 * nor unconfigured controllers.
545 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
548 if (d->dev_type == HCI_PRIMARY) {
549 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
550 rp->entry[count].type = 0x01;
552 rp->entry[count].type = 0x00;
553 } else if (d->dev_type == HCI_AMP) {
554 rp->entry[count].type = 0x02;
559 rp->entry[count].bus = d->bus;
560 rp->entry[count++].index = cpu_to_le16(d->id);
561 bt_dev_dbg(hdev, "Added hci%u", d->id);
564 rp->num_controllers = cpu_to_le16(count);
566 read_unlock(&hci_dev_list_lock);
568 /* If this command is called at least once, then all the
569 * default index and unconfigured index events are disabled
570 * and from now on only extended index events are used.
572 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
573 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
574 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
576 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
577 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
578 struct_size(rp, entry, count));
585 static bool is_configured(struct hci_dev *hdev)
587 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
588 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
591 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
592 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
593 !bacmp(&hdev->public_addr, BDADDR_ANY))
599 static __le32 get_missing_options(struct hci_dev *hdev)
603 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
604 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
605 options |= MGMT_OPTION_EXTERNAL_CONFIG;
607 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
608 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
609 !bacmp(&hdev->public_addr, BDADDR_ANY))
610 options |= MGMT_OPTION_PUBLIC_ADDRESS;
612 return cpu_to_le32(options);
615 static int new_options(struct hci_dev *hdev, struct sock *skip)
617 __le32 options = get_missing_options(hdev);
619 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
620 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
623 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
625 __le32 options = get_missing_options(hdev);
627 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
631 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
632 void *data, u16 data_len)
634 struct mgmt_rp_read_config_info rp;
637 bt_dev_dbg(hdev, "sock %p", sk);
641 memset(&rp, 0, sizeof(rp));
642 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
644 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
647 if (hdev->set_bdaddr)
648 options |= MGMT_OPTION_PUBLIC_ADDRESS;
650 rp.supported_options = cpu_to_le32(options);
651 rp.missing_options = get_missing_options(hdev);
653 hci_dev_unlock(hdev);
655 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
659 static u32 get_supported_phys(struct hci_dev *hdev)
661 u32 supported_phys = 0;
663 if (lmp_bredr_capable(hdev)) {
664 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
666 if (hdev->features[0][0] & LMP_3SLOT)
667 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
669 if (hdev->features[0][0] & LMP_5SLOT)
670 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
672 if (lmp_edr_2m_capable(hdev)) {
673 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
675 if (lmp_edr_3slot_capable(hdev))
676 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
678 if (lmp_edr_5slot_capable(hdev))
679 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
681 if (lmp_edr_3m_capable(hdev)) {
682 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
684 if (lmp_edr_3slot_capable(hdev))
685 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
687 if (lmp_edr_5slot_capable(hdev))
688 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
693 if (lmp_le_capable(hdev)) {
694 supported_phys |= MGMT_PHY_LE_1M_TX;
695 supported_phys |= MGMT_PHY_LE_1M_RX;
697 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
698 supported_phys |= MGMT_PHY_LE_2M_TX;
699 supported_phys |= MGMT_PHY_LE_2M_RX;
702 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
703 supported_phys |= MGMT_PHY_LE_CODED_TX;
704 supported_phys |= MGMT_PHY_LE_CODED_RX;
708 return supported_phys;
711 static u32 get_selected_phys(struct hci_dev *hdev)
713 u32 selected_phys = 0;
715 if (lmp_bredr_capable(hdev)) {
716 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
718 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
719 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
721 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
722 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
724 if (lmp_edr_2m_capable(hdev)) {
725 if (!(hdev->pkt_type & HCI_2DH1))
726 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
728 if (lmp_edr_3slot_capable(hdev) &&
729 !(hdev->pkt_type & HCI_2DH3))
730 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
732 if (lmp_edr_5slot_capable(hdev) &&
733 !(hdev->pkt_type & HCI_2DH5))
734 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
736 if (lmp_edr_3m_capable(hdev)) {
737 if (!(hdev->pkt_type & HCI_3DH1))
738 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
740 if (lmp_edr_3slot_capable(hdev) &&
741 !(hdev->pkt_type & HCI_3DH3))
742 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
744 if (lmp_edr_5slot_capable(hdev) &&
745 !(hdev->pkt_type & HCI_3DH5))
746 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
751 if (lmp_le_capable(hdev)) {
752 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
753 selected_phys |= MGMT_PHY_LE_1M_TX;
755 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
756 selected_phys |= MGMT_PHY_LE_1M_RX;
758 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
759 selected_phys |= MGMT_PHY_LE_2M_TX;
761 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
762 selected_phys |= MGMT_PHY_LE_2M_RX;
764 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
765 selected_phys |= MGMT_PHY_LE_CODED_TX;
767 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
768 selected_phys |= MGMT_PHY_LE_CODED_RX;
771 return selected_phys;
774 static u32 get_configurable_phys(struct hci_dev *hdev)
776 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
777 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
780 static u32 get_supported_settings(struct hci_dev *hdev)
784 settings |= MGMT_SETTING_POWERED;
785 settings |= MGMT_SETTING_BONDABLE;
786 settings |= MGMT_SETTING_DEBUG_KEYS;
787 settings |= MGMT_SETTING_CONNECTABLE;
788 settings |= MGMT_SETTING_DISCOVERABLE;
790 if (lmp_bredr_capable(hdev)) {
791 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
792 settings |= MGMT_SETTING_FAST_CONNECTABLE;
793 settings |= MGMT_SETTING_BREDR;
794 settings |= MGMT_SETTING_LINK_SECURITY;
796 if (lmp_ssp_capable(hdev)) {
797 settings |= MGMT_SETTING_SSP;
798 if (IS_ENABLED(CONFIG_BT_HS))
799 settings |= MGMT_SETTING_HS;
802 if (lmp_sc_capable(hdev))
803 settings |= MGMT_SETTING_SECURE_CONN;
805 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
807 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
810 if (lmp_le_capable(hdev)) {
811 settings |= MGMT_SETTING_LE;
812 settings |= MGMT_SETTING_SECURE_CONN;
813 settings |= MGMT_SETTING_PRIVACY;
814 settings |= MGMT_SETTING_STATIC_ADDRESS;
816 /* When the experimental feature for LL Privacy support is
817 * enabled, then advertising is no longer supported.
819 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
820 settings |= MGMT_SETTING_ADVERTISING;
823 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
825 settings |= MGMT_SETTING_CONFIGURATION;
827 settings |= MGMT_SETTING_PHY_CONFIGURATION;
832 static u32 get_current_settings(struct hci_dev *hdev)
836 if (hdev_is_powered(hdev))
837 settings |= MGMT_SETTING_POWERED;
839 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
840 settings |= MGMT_SETTING_CONNECTABLE;
842 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
843 settings |= MGMT_SETTING_FAST_CONNECTABLE;
845 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
846 settings |= MGMT_SETTING_DISCOVERABLE;
848 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
849 settings |= MGMT_SETTING_BONDABLE;
851 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
852 settings |= MGMT_SETTING_BREDR;
854 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
855 settings |= MGMT_SETTING_LE;
857 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
858 settings |= MGMT_SETTING_LINK_SECURITY;
860 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
861 settings |= MGMT_SETTING_SSP;
863 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
864 settings |= MGMT_SETTING_HS;
866 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
867 settings |= MGMT_SETTING_ADVERTISING;
869 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
870 settings |= MGMT_SETTING_SECURE_CONN;
872 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
873 settings |= MGMT_SETTING_DEBUG_KEYS;
875 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
876 settings |= MGMT_SETTING_PRIVACY;
878 /* The current setting for static address has two purposes. The
879 * first is to indicate if the static address will be used and
880 * the second is to indicate if it is actually set.
882 * This means if the static address is not configured, this flag
883 * will never be set. If the address is configured, then if the
884 * address is actually used decides if the flag is set or not.
886 * For single mode LE only controllers and dual-mode controllers
887 * with BR/EDR disabled, the existence of the static address will
890 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
891 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
892 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
893 if (bacmp(&hdev->static_addr, BDADDR_ANY))
894 settings |= MGMT_SETTING_STATIC_ADDRESS;
897 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
898 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
903 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
905 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
908 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
909 struct hci_dev *hdev,
912 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
915 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
917 struct mgmt_pending_cmd *cmd;
919 /* If there's a pending mgmt command the flags will not yet have
920 * their final values, so check for this first.
922 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
924 struct mgmt_mode *cp = cmd->param;
926 return LE_AD_GENERAL;
927 else if (cp->val == 0x02)
928 return LE_AD_LIMITED;
930 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
931 return LE_AD_LIMITED;
932 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
933 return LE_AD_GENERAL;
939 bool mgmt_get_connectable(struct hci_dev *hdev)
941 struct mgmt_pending_cmd *cmd;
943 /* If there's a pending mgmt command the flag will not yet have
944 * it's final value, so check for this first.
946 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
948 struct mgmt_mode *cp = cmd->param;
953 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
956 static void service_cache_off(struct work_struct *work)
958 struct hci_dev *hdev = container_of(work, struct hci_dev,
960 struct hci_request req;
962 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
965 hci_req_init(&req, hdev);
969 __hci_req_update_eir(&req);
970 __hci_req_update_class(&req);
972 hci_dev_unlock(hdev);
974 hci_req_run(&req, NULL);
977 static void rpa_expired(struct work_struct *work)
979 struct hci_dev *hdev = container_of(work, struct hci_dev,
981 struct hci_request req;
983 bt_dev_dbg(hdev, "");
985 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
987 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
990 /* The generation of a new RPA and programming it into the
991 * controller happens in the hci_req_enable_advertising()
994 hci_req_init(&req, hdev);
995 if (ext_adv_capable(hdev))
996 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
998 __hci_req_enable_advertising(&req);
999 hci_req_run(&req, NULL);
1002 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1004 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1007 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1008 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1010 /* Non-mgmt controlled devices get this bit set
1011 * implicitly so that pairing works for them, however
1012 * for mgmt we require user-space to explicitly enable
1015 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1018 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1019 void *data, u16 data_len)
1021 struct mgmt_rp_read_info rp;
1023 bt_dev_dbg(hdev, "sock %p", sk);
1027 memset(&rp, 0, sizeof(rp));
1029 bacpy(&rp.bdaddr, &hdev->bdaddr);
1031 rp.version = hdev->hci_ver;
1032 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1034 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1035 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1037 memcpy(rp.dev_class, hdev->dev_class, 3);
1039 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1040 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1042 hci_dev_unlock(hdev);
1044 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1048 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1053 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1054 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1055 hdev->dev_class, 3);
1057 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1058 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1061 name_len = strlen(hdev->dev_name);
1062 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1063 hdev->dev_name, name_len);
1065 name_len = strlen(hdev->short_name);
1066 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1067 hdev->short_name, name_len);
1072 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1073 void *data, u16 data_len)
1076 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1079 bt_dev_dbg(hdev, "sock %p", sk);
1081 memset(&buf, 0, sizeof(buf));
1085 bacpy(&rp->bdaddr, &hdev->bdaddr);
1087 rp->version = hdev->hci_ver;
1088 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1090 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1091 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1094 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1095 rp->eir_len = cpu_to_le16(eir_len);
1097 hci_dev_unlock(hdev);
1099 /* If this command is called at least once, then the events
1100 * for class of device and local name changes are disabled
1101 * and only the new extended controller information event
1104 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1105 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1106 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1108 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1109 sizeof(*rp) + eir_len);
1112 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1115 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1118 memset(buf, 0, sizeof(buf));
1120 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1121 ev->eir_len = cpu_to_le16(eir_len);
1123 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1124 sizeof(*ev) + eir_len,
1125 HCI_MGMT_EXT_INFO_EVENTS, skip);
1128 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1130 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1132 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1136 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1138 bt_dev_dbg(hdev, "status 0x%02x", status);
1140 if (hci_conn_count(hdev) == 0) {
1141 cancel_delayed_work(&hdev->power_off);
1142 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1146 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1148 struct mgmt_ev_advertising_added ev;
1150 ev.instance = instance;
1152 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1155 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1158 struct mgmt_ev_advertising_removed ev;
1160 ev.instance = instance;
1162 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1165 static void cancel_adv_timeout(struct hci_dev *hdev)
1167 if (hdev->adv_instance_timeout) {
1168 hdev->adv_instance_timeout = 0;
1169 cancel_delayed_work(&hdev->adv_instance_expire);
1173 static int clean_up_hci_state(struct hci_dev *hdev)
1175 struct hci_request req;
1176 struct hci_conn *conn;
1177 bool discov_stopped;
1180 hci_req_init(&req, hdev);
1182 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1183 test_bit(HCI_PSCAN, &hdev->flags)) {
1185 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1188 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1190 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1191 __hci_req_disable_advertising(&req);
1193 discov_stopped = hci_req_stop_discovery(&req);
1195 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1196 /* 0x15 == Terminated due to Power Off */
1197 __hci_abort_conn(&req, conn, 0x15);
1200 err = hci_req_run(&req, clean_up_hci_complete);
1201 if (!err && discov_stopped)
1202 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1207 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1210 struct mgmt_mode *cp = data;
1211 struct mgmt_pending_cmd *cmd;
1214 bt_dev_dbg(hdev, "sock %p", sk);
1216 if (cp->val != 0x00 && cp->val != 0x01)
1217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1218 MGMT_STATUS_INVALID_PARAMS);
1222 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1223 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1228 if (!!cp->val == hdev_is_powered(hdev)) {
1229 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1233 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1240 queue_work(hdev->req_workqueue, &hdev->power_on);
1243 /* Disconnect connections, stop scans, etc */
1244 err = clean_up_hci_state(hdev);
1246 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1247 HCI_POWER_OFF_TIMEOUT);
1249 /* ENODATA means there were no HCI commands queued */
1250 if (err == -ENODATA) {
1251 cancel_delayed_work(&hdev->power_off);
1252 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1258 hci_dev_unlock(hdev);
1262 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1264 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1266 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1267 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1270 int mgmt_new_settings(struct hci_dev *hdev)
1272 return new_settings(hdev, NULL);
1277 struct hci_dev *hdev;
1281 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1283 struct cmd_lookup *match = data;
1285 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1287 list_del(&cmd->list);
1289 if (match->sk == NULL) {
1290 match->sk = cmd->sk;
1291 sock_hold(match->sk);
1294 mgmt_pending_free(cmd);
1297 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1301 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1302 mgmt_pending_remove(cmd);
1305 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1307 if (cmd->cmd_complete) {
1310 cmd->cmd_complete(cmd, *status);
1311 mgmt_pending_remove(cmd);
1316 cmd_status_rsp(cmd, data);
1319 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1321 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1322 cmd->param, cmd->param_len);
1325 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1327 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1328 cmd->param, sizeof(struct mgmt_addr_info));
1331 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1333 if (!lmp_bredr_capable(hdev))
1334 return MGMT_STATUS_NOT_SUPPORTED;
1335 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1336 return MGMT_STATUS_REJECTED;
1338 return MGMT_STATUS_SUCCESS;
1341 static u8 mgmt_le_support(struct hci_dev *hdev)
1343 if (!lmp_le_capable(hdev))
1344 return MGMT_STATUS_NOT_SUPPORTED;
1345 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1346 return MGMT_STATUS_REJECTED;
1348 return MGMT_STATUS_SUCCESS;
1351 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1353 struct mgmt_pending_cmd *cmd;
1355 bt_dev_dbg(hdev, "status 0x%02x", status);
1359 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1364 u8 mgmt_err = mgmt_status(status);
1365 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1366 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1370 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1371 hdev->discov_timeout > 0) {
1372 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1373 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1376 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1377 new_settings(hdev, cmd->sk);
1380 mgmt_pending_remove(cmd);
1383 hci_dev_unlock(hdev);
1386 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1389 struct mgmt_cp_set_discoverable *cp = data;
1390 struct mgmt_pending_cmd *cmd;
1394 bt_dev_dbg(hdev, "sock %p", sk);
1396 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1397 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1398 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1399 MGMT_STATUS_REJECTED);
1401 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1402 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1403 MGMT_STATUS_INVALID_PARAMS);
1405 timeout = __le16_to_cpu(cp->timeout);
1407 /* Disabling discoverable requires that no timeout is set,
1408 * and enabling limited discoverable requires a timeout.
1410 if ((cp->val == 0x00 && timeout > 0) ||
1411 (cp->val == 0x02 && timeout == 0))
1412 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1413 MGMT_STATUS_INVALID_PARAMS);
1417 if (!hdev_is_powered(hdev) && timeout > 0) {
1418 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1419 MGMT_STATUS_NOT_POWERED);
1423 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1424 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1425 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1430 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1431 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1432 MGMT_STATUS_REJECTED);
1436 if (hdev->advertising_paused) {
1437 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1442 if (!hdev_is_powered(hdev)) {
1443 bool changed = false;
1445 /* Setting limited discoverable when powered off is
1446 * not a valid operation since it requires a timeout
1447 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1449 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1450 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1454 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1459 err = new_settings(hdev, sk);
1464 /* If the current mode is the same, then just update the timeout
1465 * value with the new value. And if only the timeout gets updated,
1466 * then no need for any HCI transactions.
1468 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1469 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1470 HCI_LIMITED_DISCOVERABLE)) {
1471 cancel_delayed_work(&hdev->discov_off);
1472 hdev->discov_timeout = timeout;
1474 if (cp->val && hdev->discov_timeout > 0) {
1475 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1476 queue_delayed_work(hdev->req_workqueue,
1477 &hdev->discov_off, to);
1480 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1484 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1490 /* Cancel any potential discoverable timeout that might be
1491 * still active and store new timeout value. The arming of
1492 * the timeout happens in the complete handler.
1494 cancel_delayed_work(&hdev->discov_off);
1495 hdev->discov_timeout = timeout;
1498 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1500 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1502 /* Limited discoverable mode */
1503 if (cp->val == 0x02)
1504 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1506 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1508 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1512 hci_dev_unlock(hdev);
1516 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1518 struct mgmt_pending_cmd *cmd;
1520 bt_dev_dbg(hdev, "status 0x%02x", status);
1524 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1529 u8 mgmt_err = mgmt_status(status);
1530 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1534 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1535 new_settings(hdev, cmd->sk);
1538 mgmt_pending_remove(cmd);
1541 hci_dev_unlock(hdev);
1544 static int set_connectable_update_settings(struct hci_dev *hdev,
1545 struct sock *sk, u8 val)
1547 bool changed = false;
1550 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1554 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1556 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1557 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1560 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1565 hci_req_update_scan(hdev);
1566 hci_update_background_scan(hdev);
1567 return new_settings(hdev, sk);
1573 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1576 struct mgmt_mode *cp = data;
1577 struct mgmt_pending_cmd *cmd;
1580 bt_dev_dbg(hdev, "sock %p", sk);
1582 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1583 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1585 MGMT_STATUS_REJECTED);
1587 if (cp->val != 0x00 && cp->val != 0x01)
1588 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1589 MGMT_STATUS_INVALID_PARAMS);
1593 if (!hdev_is_powered(hdev)) {
1594 err = set_connectable_update_settings(hdev, sk, cp->val);
1598 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1599 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1605 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1612 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1614 if (hdev->discov_timeout > 0)
1615 cancel_delayed_work(&hdev->discov_off);
1617 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1618 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1619 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1622 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1626 hci_dev_unlock(hdev);
1630 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1633 struct mgmt_mode *cp = data;
1637 bt_dev_dbg(hdev, "sock %p", sk);
1639 if (cp->val != 0x00 && cp->val != 0x01)
1640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1641 MGMT_STATUS_INVALID_PARAMS);
1646 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1648 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1650 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1655 /* In limited privacy mode the change of bondable mode
1656 * may affect the local advertising address.
1658 if (hdev_is_powered(hdev) &&
1659 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1660 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1661 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1662 queue_work(hdev->req_workqueue,
1663 &hdev->discoverable_update);
1665 err = new_settings(hdev, sk);
1669 hci_dev_unlock(hdev);
1673 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1676 struct mgmt_mode *cp = data;
1677 struct mgmt_pending_cmd *cmd;
1681 bt_dev_dbg(hdev, "sock %p", sk);
1683 status = mgmt_bredr_support(hdev);
1685 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1688 if (cp->val != 0x00 && cp->val != 0x01)
1689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1690 MGMT_STATUS_INVALID_PARAMS);
1694 if (!hdev_is_powered(hdev)) {
1695 bool changed = false;
1697 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1698 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1702 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1707 err = new_settings(hdev, sk);
1712 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1713 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1720 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1721 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1725 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1731 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1733 mgmt_pending_remove(cmd);
1738 hci_dev_unlock(hdev);
1742 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1744 struct mgmt_mode *cp = data;
1745 struct mgmt_pending_cmd *cmd;
1749 bt_dev_dbg(hdev, "sock %p", sk);
1751 status = mgmt_bredr_support(hdev);
1753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1755 if (!lmp_ssp_capable(hdev))
1756 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1757 MGMT_STATUS_NOT_SUPPORTED);
1759 if (cp->val != 0x00 && cp->val != 0x01)
1760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1761 MGMT_STATUS_INVALID_PARAMS);
1765 if (!hdev_is_powered(hdev)) {
1769 changed = !hci_dev_test_and_set_flag(hdev,
1772 changed = hci_dev_test_and_clear_flag(hdev,
1775 changed = hci_dev_test_and_clear_flag(hdev,
1778 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1781 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1786 err = new_settings(hdev, sk);
1791 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1792 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1797 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1798 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1802 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1808 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1809 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1810 sizeof(cp->val), &cp->val);
1812 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1814 mgmt_pending_remove(cmd);
1819 hci_dev_unlock(hdev);
1823 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1825 struct mgmt_mode *cp = data;
1830 bt_dev_dbg(hdev, "sock %p", sk);
1832 if (!IS_ENABLED(CONFIG_BT_HS))
1833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1834 MGMT_STATUS_NOT_SUPPORTED);
1836 status = mgmt_bredr_support(hdev);
1838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1840 if (!lmp_ssp_capable(hdev))
1841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1842 MGMT_STATUS_NOT_SUPPORTED);
1844 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1846 MGMT_STATUS_REJECTED);
1848 if (cp->val != 0x00 && cp->val != 0x01)
1849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1850 MGMT_STATUS_INVALID_PARAMS);
1854 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1855 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1861 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1863 if (hdev_is_powered(hdev)) {
1864 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1865 MGMT_STATUS_REJECTED);
1869 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1872 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1877 err = new_settings(hdev, sk);
1880 hci_dev_unlock(hdev);
1884 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1886 struct cmd_lookup match = { NULL, hdev };
1891 u8 mgmt_err = mgmt_status(status);
1893 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1898 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1900 new_settings(hdev, match.sk);
1905 /* Make sure the controller has a good default for
1906 * advertising data. Restrict the update to when LE
1907 * has actually been enabled. During power on, the
1908 * update in powered_update_hci will take care of it.
1910 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1911 struct hci_request req;
1912 hci_req_init(&req, hdev);
1913 if (ext_adv_capable(hdev)) {
1916 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1918 __hci_req_update_scan_rsp_data(&req, 0x00);
1920 __hci_req_update_adv_data(&req, 0x00);
1921 __hci_req_update_scan_rsp_data(&req, 0x00);
1923 hci_req_run(&req, NULL);
1924 hci_update_background_scan(hdev);
1928 hci_dev_unlock(hdev);
1931 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1933 struct mgmt_mode *cp = data;
1934 struct hci_cp_write_le_host_supported hci_cp;
1935 struct mgmt_pending_cmd *cmd;
1936 struct hci_request req;
1940 bt_dev_dbg(hdev, "sock %p", sk);
1942 if (!lmp_le_capable(hdev))
1943 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1944 MGMT_STATUS_NOT_SUPPORTED);
1946 if (cp->val != 0x00 && cp->val != 0x01)
1947 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1948 MGMT_STATUS_INVALID_PARAMS);
1950 /* Bluetooth single mode LE only controllers or dual-mode
1951 * controllers configured as LE only devices, do not allow
1952 * switching LE off. These have either LE enabled explicitly
1953 * or BR/EDR has been previously switched off.
1955 * When trying to enable an already enabled LE, then gracefully
1956 * send a positive response. Trying to disable it however will
1957 * result into rejection.
1959 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1960 if (cp->val == 0x01)
1961 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1964 MGMT_STATUS_REJECTED);
1970 enabled = lmp_host_le_capable(hdev);
1973 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1975 if (!hdev_is_powered(hdev) || val == enabled) {
1976 bool changed = false;
1978 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1979 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1983 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1984 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1988 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1993 err = new_settings(hdev, sk);
1998 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1999 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2000 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2005 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2011 hci_req_init(&req, hdev);
2013 memset(&hci_cp, 0, sizeof(hci_cp));
2017 hci_cp.simul = 0x00;
2019 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2020 __hci_req_disable_advertising(&req);
2022 if (ext_adv_capable(hdev))
2023 __hci_req_clear_ext_adv_sets(&req);
2026 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2029 err = hci_req_run(&req, le_enable_complete);
2031 mgmt_pending_remove(cmd);
2034 hci_dev_unlock(hdev);
2038 /* This is a helper function to test for pending mgmt commands that can
2039 * cause CoD or EIR HCI commands. We can only allow one such pending
2040 * mgmt command at a time since otherwise we cannot easily track what
2041 * the current values are, will be, and based on that calculate if a new
2042 * HCI command needs to be sent and if yes with what value.
2044 static bool pending_eir_or_class(struct hci_dev *hdev)
2046 struct mgmt_pending_cmd *cmd;
2048 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2049 switch (cmd->opcode) {
2050 case MGMT_OP_ADD_UUID:
2051 case MGMT_OP_REMOVE_UUID:
2052 case MGMT_OP_SET_DEV_CLASS:
2053 case MGMT_OP_SET_POWERED:
2061 static const u8 bluetooth_base_uuid[] = {
2062 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2063 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2066 static u8 get_uuid_size(const u8 *uuid)
2070 if (memcmp(uuid, bluetooth_base_uuid, 12))
2073 val = get_unaligned_le32(&uuid[12]);
2080 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2082 struct mgmt_pending_cmd *cmd;
2086 cmd = pending_find(mgmt_op, hdev);
2090 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2091 mgmt_status(status), hdev->dev_class, 3);
2093 mgmt_pending_remove(cmd);
2096 hci_dev_unlock(hdev);
2099 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2101 bt_dev_dbg(hdev, "status 0x%02x", status);
2103 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2106 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2108 struct mgmt_cp_add_uuid *cp = data;
2109 struct mgmt_pending_cmd *cmd;
2110 struct hci_request req;
2111 struct bt_uuid *uuid;
2114 bt_dev_dbg(hdev, "sock %p", sk);
2118 if (pending_eir_or_class(hdev)) {
2119 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2124 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2130 memcpy(uuid->uuid, cp->uuid, 16);
2131 uuid->svc_hint = cp->svc_hint;
2132 uuid->size = get_uuid_size(cp->uuid);
2134 list_add_tail(&uuid->list, &hdev->uuids);
2136 hci_req_init(&req, hdev);
2138 __hci_req_update_class(&req);
2139 __hci_req_update_eir(&req);
2141 err = hci_req_run(&req, add_uuid_complete);
2143 if (err != -ENODATA)
2146 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2147 hdev->dev_class, 3);
2151 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2160 hci_dev_unlock(hdev);
2164 static bool enable_service_cache(struct hci_dev *hdev)
2166 if (!hdev_is_powered(hdev))
2169 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2170 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2178 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2180 bt_dev_dbg(hdev, "status 0x%02x", status);
2182 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2185 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2188 struct mgmt_cp_remove_uuid *cp = data;
2189 struct mgmt_pending_cmd *cmd;
2190 struct bt_uuid *match, *tmp;
2191 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2192 struct hci_request req;
2195 bt_dev_dbg(hdev, "sock %p", sk);
2199 if (pending_eir_or_class(hdev)) {
2200 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2205 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2206 hci_uuids_clear(hdev);
2208 if (enable_service_cache(hdev)) {
2209 err = mgmt_cmd_complete(sk, hdev->id,
2210 MGMT_OP_REMOVE_UUID,
2211 0, hdev->dev_class, 3);
2220 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2221 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2224 list_del(&match->list);
2230 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2231 MGMT_STATUS_INVALID_PARAMS);
2236 hci_req_init(&req, hdev);
2238 __hci_req_update_class(&req);
2239 __hci_req_update_eir(&req);
2241 err = hci_req_run(&req, remove_uuid_complete);
2243 if (err != -ENODATA)
2246 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2247 hdev->dev_class, 3);
2251 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2260 hci_dev_unlock(hdev);
2264 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2266 bt_dev_dbg(hdev, "status 0x%02x", status);
2268 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2271 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2274 struct mgmt_cp_set_dev_class *cp = data;
2275 struct mgmt_pending_cmd *cmd;
2276 struct hci_request req;
2279 bt_dev_dbg(hdev, "sock %p", sk);
2281 if (!lmp_bredr_capable(hdev))
2282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2283 MGMT_STATUS_NOT_SUPPORTED);
2287 if (pending_eir_or_class(hdev)) {
2288 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2293 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2294 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2295 MGMT_STATUS_INVALID_PARAMS);
2299 hdev->major_class = cp->major;
2300 hdev->minor_class = cp->minor;
2302 if (!hdev_is_powered(hdev)) {
2303 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2304 hdev->dev_class, 3);
2308 hci_req_init(&req, hdev);
2310 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2311 hci_dev_unlock(hdev);
2312 cancel_delayed_work_sync(&hdev->service_cache);
2314 __hci_req_update_eir(&req);
2317 __hci_req_update_class(&req);
2319 err = hci_req_run(&req, set_class_complete);
2321 if (err != -ENODATA)
2324 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2325 hdev->dev_class, 3);
2329 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2338 hci_dev_unlock(hdev);
2342 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2345 struct mgmt_cp_load_link_keys *cp = data;
2346 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2347 sizeof(struct mgmt_link_key_info));
2348 u16 key_count, expected_len;
2352 bt_dev_dbg(hdev, "sock %p", sk);
2354 if (!lmp_bredr_capable(hdev))
2355 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2356 MGMT_STATUS_NOT_SUPPORTED);
2358 key_count = __le16_to_cpu(cp->key_count);
2359 if (key_count > max_key_count) {
2360 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 MGMT_STATUS_INVALID_PARAMS);
2366 expected_len = struct_size(cp, keys, key_count);
2367 if (expected_len != len) {
2368 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2371 MGMT_STATUS_INVALID_PARAMS);
2374 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2376 MGMT_STATUS_INVALID_PARAMS);
2378 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2381 for (i = 0; i < key_count; i++) {
2382 struct mgmt_link_key_info *key = &cp->keys[i];
2384 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2385 return mgmt_cmd_status(sk, hdev->id,
2386 MGMT_OP_LOAD_LINK_KEYS,
2387 MGMT_STATUS_INVALID_PARAMS);
2392 hci_link_keys_clear(hdev);
2395 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2397 changed = hci_dev_test_and_clear_flag(hdev,
2398 HCI_KEEP_DEBUG_KEYS);
2401 new_settings(hdev, NULL);
2403 for (i = 0; i < key_count; i++) {
2404 struct mgmt_link_key_info *key = &cp->keys[i];
2406 if (hci_is_blocked_key(hdev,
2407 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2409 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2414 /* Always ignore debug keys and require a new pairing if
2415 * the user wants to use them.
2417 if (key->type == HCI_LK_DEBUG_COMBINATION)
2420 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2421 key->type, key->pin_len, NULL);
2424 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2426 hci_dev_unlock(hdev);
2431 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2432 u8 addr_type, struct sock *skip_sk)
2434 struct mgmt_ev_device_unpaired ev;
2436 bacpy(&ev.addr.bdaddr, bdaddr);
2437 ev.addr.type = addr_type;
2439 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2443 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2446 struct mgmt_cp_unpair_device *cp = data;
2447 struct mgmt_rp_unpair_device rp;
2448 struct hci_conn_params *params;
2449 struct mgmt_pending_cmd *cmd;
2450 struct hci_conn *conn;
2454 memset(&rp, 0, sizeof(rp));
2455 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2456 rp.addr.type = cp->addr.type;
2458 if (!bdaddr_type_is_valid(cp->addr.type))
2459 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2460 MGMT_STATUS_INVALID_PARAMS,
2463 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2464 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2465 MGMT_STATUS_INVALID_PARAMS,
2470 if (!hdev_is_powered(hdev)) {
2471 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2472 MGMT_STATUS_NOT_POWERED, &rp,
2477 if (cp->addr.type == BDADDR_BREDR) {
2478 /* If disconnection is requested, then look up the
2479 * connection. If the remote device is connected, it
2480 * will be later used to terminate the link.
2482 * Setting it to NULL explicitly will cause no
2483 * termination of the link.
2486 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2491 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2493 err = mgmt_cmd_complete(sk, hdev->id,
2494 MGMT_OP_UNPAIR_DEVICE,
2495 MGMT_STATUS_NOT_PAIRED, &rp,
2503 /* LE address type */
2504 addr_type = le_addr_type(cp->addr.type);
2506 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2507 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2509 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2510 MGMT_STATUS_NOT_PAIRED, &rp,
2515 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2517 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2522 /* Defer clearing up the connection parameters until closing to
2523 * give a chance of keeping them if a repairing happens.
2525 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2527 /* Disable auto-connection parameters if present */
2528 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2530 if (params->explicit_connect)
2531 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2533 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2536 /* If disconnection is not requested, then clear the connection
2537 * variable so that the link is not terminated.
2539 if (!cp->disconnect)
2543 /* If the connection variable is set, then termination of the
2544 * link is requested.
2547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2549 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2553 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2560 cmd->cmd_complete = addr_cmd_complete;
2562 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2564 mgmt_pending_remove(cmd);
2567 hci_dev_unlock(hdev);
2571 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2574 struct mgmt_cp_disconnect *cp = data;
2575 struct mgmt_rp_disconnect rp;
2576 struct mgmt_pending_cmd *cmd;
2577 struct hci_conn *conn;
2580 bt_dev_dbg(hdev, "sock %p", sk);
2582 memset(&rp, 0, sizeof(rp));
2583 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2584 rp.addr.type = cp->addr.type;
2586 if (!bdaddr_type_is_valid(cp->addr.type))
2587 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2588 MGMT_STATUS_INVALID_PARAMS,
2593 if (!test_bit(HCI_UP, &hdev->flags)) {
2594 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2595 MGMT_STATUS_NOT_POWERED, &rp,
2600 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2601 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2602 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2606 if (cp->addr.type == BDADDR_BREDR)
2607 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2610 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2611 le_addr_type(cp->addr.type));
2613 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2614 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2615 MGMT_STATUS_NOT_CONNECTED, &rp,
2620 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2626 cmd->cmd_complete = generic_cmd_complete;
2628 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2630 mgmt_pending_remove(cmd);
2633 hci_dev_unlock(hdev);
2637 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2639 switch (link_type) {
2641 switch (addr_type) {
2642 case ADDR_LE_DEV_PUBLIC:
2643 return BDADDR_LE_PUBLIC;
2646 /* Fallback to LE Random address type */
2647 return BDADDR_LE_RANDOM;
2651 /* Fallback to BR/EDR type */
2652 return BDADDR_BREDR;
2656 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2659 struct mgmt_rp_get_connections *rp;
2664 bt_dev_dbg(hdev, "sock %p", sk);
2668 if (!hdev_is_powered(hdev)) {
2669 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2670 MGMT_STATUS_NOT_POWERED);
2675 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2676 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2680 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2687 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2688 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2690 bacpy(&rp->addr[i].bdaddr, &c->dst);
2691 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2692 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2697 rp->conn_count = cpu_to_le16(i);
2699 /* Recalculate length in case of filtered SCO connections, etc */
2700 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2701 struct_size(rp, addr, i));
2706 hci_dev_unlock(hdev);
2710 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2711 struct mgmt_cp_pin_code_neg_reply *cp)
2713 struct mgmt_pending_cmd *cmd;
2716 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2721 cmd->cmd_complete = addr_cmd_complete;
2723 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2724 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2726 mgmt_pending_remove(cmd);
2731 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2734 struct hci_conn *conn;
2735 struct mgmt_cp_pin_code_reply *cp = data;
2736 struct hci_cp_pin_code_reply reply;
2737 struct mgmt_pending_cmd *cmd;
2740 bt_dev_dbg(hdev, "sock %p", sk);
2744 if (!hdev_is_powered(hdev)) {
2745 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2746 MGMT_STATUS_NOT_POWERED);
2750 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2752 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2753 MGMT_STATUS_NOT_CONNECTED);
2757 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2758 struct mgmt_cp_pin_code_neg_reply ncp;
2760 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2762 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2764 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2766 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2767 MGMT_STATUS_INVALID_PARAMS);
2772 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2778 cmd->cmd_complete = addr_cmd_complete;
2780 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2781 reply.pin_len = cp->pin_len;
2782 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2784 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2786 mgmt_pending_remove(cmd);
2789 hci_dev_unlock(hdev);
2793 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2796 struct mgmt_cp_set_io_capability *cp = data;
2798 bt_dev_dbg(hdev, "sock %p", sk);
2800 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2801 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2802 MGMT_STATUS_INVALID_PARAMS);
2806 hdev->io_capability = cp->io_capability;
2808 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2810 hci_dev_unlock(hdev);
2812 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2816 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2818 struct hci_dev *hdev = conn->hdev;
2819 struct mgmt_pending_cmd *cmd;
2821 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2822 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2825 if (cmd->user_data != conn)
2834 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2836 struct mgmt_rp_pair_device rp;
2837 struct hci_conn *conn = cmd->user_data;
2840 bacpy(&rp.addr.bdaddr, &conn->dst);
2841 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2843 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2844 status, &rp, sizeof(rp));
2846 /* So we don't get further callbacks for this connection */
2847 conn->connect_cfm_cb = NULL;
2848 conn->security_cfm_cb = NULL;
2849 conn->disconn_cfm_cb = NULL;
2851 hci_conn_drop(conn);
2853 /* The device is paired so there is no need to remove
2854 * its connection parameters anymore.
2856 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2863 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2865 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2866 struct mgmt_pending_cmd *cmd;
2868 cmd = find_pairing(conn);
2870 cmd->cmd_complete(cmd, status);
2871 mgmt_pending_remove(cmd);
2875 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2877 struct mgmt_pending_cmd *cmd;
2879 BT_DBG("status %u", status);
2881 cmd = find_pairing(conn);
2883 BT_DBG("Unable to find a pending command");
2887 cmd->cmd_complete(cmd, mgmt_status(status));
2888 mgmt_pending_remove(cmd);
2891 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2893 struct mgmt_pending_cmd *cmd;
2895 BT_DBG("status %u", status);
2900 cmd = find_pairing(conn);
2902 BT_DBG("Unable to find a pending command");
2906 cmd->cmd_complete(cmd, mgmt_status(status));
2907 mgmt_pending_remove(cmd);
2910 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2913 struct mgmt_cp_pair_device *cp = data;
2914 struct mgmt_rp_pair_device rp;
2915 struct mgmt_pending_cmd *cmd;
2916 u8 sec_level, auth_type;
2917 struct hci_conn *conn;
2920 bt_dev_dbg(hdev, "sock %p", sk);
2922 memset(&rp, 0, sizeof(rp));
2923 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2924 rp.addr.type = cp->addr.type;
2926 if (!bdaddr_type_is_valid(cp->addr.type))
2927 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2928 MGMT_STATUS_INVALID_PARAMS,
2931 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2932 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2933 MGMT_STATUS_INVALID_PARAMS,
2938 if (!hdev_is_powered(hdev)) {
2939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2940 MGMT_STATUS_NOT_POWERED, &rp,
2945 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2946 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2947 MGMT_STATUS_ALREADY_PAIRED, &rp,
2952 sec_level = BT_SECURITY_MEDIUM;
2953 auth_type = HCI_AT_DEDICATED_BONDING;
2955 if (cp->addr.type == BDADDR_BREDR) {
2956 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2957 auth_type, CONN_REASON_PAIR_DEVICE);
2959 u8 addr_type = le_addr_type(cp->addr.type);
2960 struct hci_conn_params *p;
2962 /* When pairing a new device, it is expected to remember
2963 * this device for future connections. Adding the connection
2964 * parameter information ahead of time allows tracking
2965 * of the peripheral preferred values and will speed up any
2966 * further connection establishment.
2968 * If connection parameters already exist, then they
2969 * will be kept and this function does nothing.
2971 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2973 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2974 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2976 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2977 sec_level, HCI_LE_CONN_TIMEOUT,
2978 CONN_REASON_PAIR_DEVICE);
2984 if (PTR_ERR(conn) == -EBUSY)
2985 status = MGMT_STATUS_BUSY;
2986 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2987 status = MGMT_STATUS_NOT_SUPPORTED;
2988 else if (PTR_ERR(conn) == -ECONNREFUSED)
2989 status = MGMT_STATUS_REJECTED;
2991 status = MGMT_STATUS_CONNECT_FAILED;
2993 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2994 status, &rp, sizeof(rp));
2998 if (conn->connect_cfm_cb) {
2999 hci_conn_drop(conn);
3000 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3001 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3005 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3008 hci_conn_drop(conn);
3012 cmd->cmd_complete = pairing_complete;
3014 /* For LE, just connecting isn't a proof that the pairing finished */
3015 if (cp->addr.type == BDADDR_BREDR) {
3016 conn->connect_cfm_cb = pairing_complete_cb;
3017 conn->security_cfm_cb = pairing_complete_cb;
3018 conn->disconn_cfm_cb = pairing_complete_cb;
3020 conn->connect_cfm_cb = le_pairing_complete_cb;
3021 conn->security_cfm_cb = le_pairing_complete_cb;
3022 conn->disconn_cfm_cb = le_pairing_complete_cb;
3025 conn->io_capability = cp->io_cap;
3026 cmd->user_data = hci_conn_get(conn);
3028 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3029 hci_conn_security(conn, sec_level, auth_type, true)) {
3030 cmd->cmd_complete(cmd, 0);
3031 mgmt_pending_remove(cmd);
3037 hci_dev_unlock(hdev);
3041 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3044 struct mgmt_addr_info *addr = data;
3045 struct mgmt_pending_cmd *cmd;
3046 struct hci_conn *conn;
3049 bt_dev_dbg(hdev, "sock %p", sk);
3053 if (!hdev_is_powered(hdev)) {
3054 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3055 MGMT_STATUS_NOT_POWERED);
3059 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3061 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3062 MGMT_STATUS_INVALID_PARAMS);
3066 conn = cmd->user_data;
3068 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3069 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3070 MGMT_STATUS_INVALID_PARAMS);
3074 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3075 mgmt_pending_remove(cmd);
3077 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3078 addr, sizeof(*addr));
3080 /* Since user doesn't want to proceed with the connection, abort any
3081 * ongoing pairing and then terminate the link if it was created
3082 * because of the pair device action.
3084 if (addr->type == BDADDR_BREDR)
3085 hci_remove_link_key(hdev, &addr->bdaddr);
3087 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3088 le_addr_type(addr->type));
3090 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3091 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3094 hci_dev_unlock(hdev);
3098 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3099 struct mgmt_addr_info *addr, u16 mgmt_op,
3100 u16 hci_op, __le32 passkey)
3102 struct mgmt_pending_cmd *cmd;
3103 struct hci_conn *conn;
3108 if (!hdev_is_powered(hdev)) {
3109 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3110 MGMT_STATUS_NOT_POWERED, addr,
3115 if (addr->type == BDADDR_BREDR)
3116 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3118 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3119 le_addr_type(addr->type));
3122 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3123 MGMT_STATUS_NOT_CONNECTED, addr,
3128 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3129 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3131 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3132 MGMT_STATUS_SUCCESS, addr,
3135 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3136 MGMT_STATUS_FAILED, addr,
3142 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3148 cmd->cmd_complete = addr_cmd_complete;
3150 /* Continue with pairing via HCI */
3151 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3152 struct hci_cp_user_passkey_reply cp;
3154 bacpy(&cp.bdaddr, &addr->bdaddr);
3155 cp.passkey = passkey;
3156 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3158 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3162 mgmt_pending_remove(cmd);
3165 hci_dev_unlock(hdev);
3169 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3170 void *data, u16 len)
3172 struct mgmt_cp_pin_code_neg_reply *cp = data;
3174 bt_dev_dbg(hdev, "sock %p", sk);
3176 return user_pairing_resp(sk, hdev, &cp->addr,
3177 MGMT_OP_PIN_CODE_NEG_REPLY,
3178 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3181 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3184 struct mgmt_cp_user_confirm_reply *cp = data;
3186 bt_dev_dbg(hdev, "sock %p", sk);
3188 if (len != sizeof(*cp))
3189 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3190 MGMT_STATUS_INVALID_PARAMS);
3192 return user_pairing_resp(sk, hdev, &cp->addr,
3193 MGMT_OP_USER_CONFIRM_REPLY,
3194 HCI_OP_USER_CONFIRM_REPLY, 0);
3197 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3198 void *data, u16 len)
3200 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3202 bt_dev_dbg(hdev, "sock %p", sk);
3204 return user_pairing_resp(sk, hdev, &cp->addr,
3205 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3206 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3209 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3212 struct mgmt_cp_user_passkey_reply *cp = data;
3214 bt_dev_dbg(hdev, "sock %p", sk);
3216 return user_pairing_resp(sk, hdev, &cp->addr,
3217 MGMT_OP_USER_PASSKEY_REPLY,
3218 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3221 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3222 void *data, u16 len)
3224 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3226 bt_dev_dbg(hdev, "sock %p", sk);
3228 return user_pairing_resp(sk, hdev, &cp->addr,
3229 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3230 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3233 static void adv_expire(struct hci_dev *hdev, u32 flags)
3235 struct adv_info *adv_instance;
3236 struct hci_request req;
3239 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3243 /* stop if current instance doesn't need to be changed */
3244 if (!(adv_instance->flags & flags))
3247 cancel_adv_timeout(hdev);
3249 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3253 hci_req_init(&req, hdev);
3254 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3259 hci_req_run(&req, NULL);
3262 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3264 struct mgmt_cp_set_local_name *cp;
3265 struct mgmt_pending_cmd *cmd;
3267 bt_dev_dbg(hdev, "status 0x%02x", status);
3271 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3278 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3279 mgmt_status(status));
3281 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3284 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3285 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3288 mgmt_pending_remove(cmd);
3291 hci_dev_unlock(hdev);
3294 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3297 struct mgmt_cp_set_local_name *cp = data;
3298 struct mgmt_pending_cmd *cmd;
3299 struct hci_request req;
3302 bt_dev_dbg(hdev, "sock %p", sk);
3306 /* If the old values are the same as the new ones just return a
3307 * direct command complete event.
3309 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3310 !memcmp(hdev->short_name, cp->short_name,
3311 sizeof(hdev->short_name))) {
3312 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3317 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3319 if (!hdev_is_powered(hdev)) {
3320 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3322 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3327 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3328 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3329 ext_info_changed(hdev, sk);
3334 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3340 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3342 hci_req_init(&req, hdev);
3344 if (lmp_bredr_capable(hdev)) {
3345 __hci_req_update_name(&req);
3346 __hci_req_update_eir(&req);
3349 /* The name is stored in the scan response data and so
3350 * no need to update the advertising data here.
3352 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3353 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3355 err = hci_req_run(&req, set_name_complete);
3357 mgmt_pending_remove(cmd);
3360 hci_dev_unlock(hdev);
3364 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3367 struct mgmt_cp_set_appearance *cp = data;
3371 bt_dev_dbg(hdev, "sock %p", sk);
3373 if (!lmp_le_capable(hdev))
3374 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3375 MGMT_STATUS_NOT_SUPPORTED);
3377 appearance = le16_to_cpu(cp->appearance);
3381 if (hdev->appearance != appearance) {
3382 hdev->appearance = appearance;
3384 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3385 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3387 ext_info_changed(hdev, sk);
3390 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3393 hci_dev_unlock(hdev);
3398 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3399 void *data, u16 len)
3401 struct mgmt_rp_get_phy_configuration rp;
3403 bt_dev_dbg(hdev, "sock %p", sk);
3407 memset(&rp, 0, sizeof(rp));
3409 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3410 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3411 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3413 hci_dev_unlock(hdev);
3415 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3419 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3421 struct mgmt_ev_phy_configuration_changed ev;
3423 memset(&ev, 0, sizeof(ev));
3425 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3427 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3431 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3432 u16 opcode, struct sk_buff *skb)
3434 struct mgmt_pending_cmd *cmd;
3436 bt_dev_dbg(hdev, "status 0x%02x", status);
3440 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3445 mgmt_cmd_status(cmd->sk, hdev->id,
3446 MGMT_OP_SET_PHY_CONFIGURATION,
3447 mgmt_status(status));
3449 mgmt_cmd_complete(cmd->sk, hdev->id,
3450 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3453 mgmt_phy_configuration_changed(hdev, cmd->sk);
3456 mgmt_pending_remove(cmd);
3459 hci_dev_unlock(hdev);
3462 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3463 void *data, u16 len)
3465 struct mgmt_cp_set_phy_configuration *cp = data;
3466 struct hci_cp_le_set_default_phy cp_phy;
3467 struct mgmt_pending_cmd *cmd;
3468 struct hci_request req;
3469 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3470 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3471 bool changed = false;
3474 bt_dev_dbg(hdev, "sock %p", sk);
3476 configurable_phys = get_configurable_phys(hdev);
3477 supported_phys = get_supported_phys(hdev);
3478 selected_phys = __le32_to_cpu(cp->selected_phys);
3480 if (selected_phys & ~supported_phys)
3481 return mgmt_cmd_status(sk, hdev->id,
3482 MGMT_OP_SET_PHY_CONFIGURATION,
3483 MGMT_STATUS_INVALID_PARAMS);
3485 unconfigure_phys = supported_phys & ~configurable_phys;
3487 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3488 return mgmt_cmd_status(sk, hdev->id,
3489 MGMT_OP_SET_PHY_CONFIGURATION,
3490 MGMT_STATUS_INVALID_PARAMS);
3492 if (selected_phys == get_selected_phys(hdev))
3493 return mgmt_cmd_complete(sk, hdev->id,
3494 MGMT_OP_SET_PHY_CONFIGURATION,
3499 if (!hdev_is_powered(hdev)) {
3500 err = mgmt_cmd_status(sk, hdev->id,
3501 MGMT_OP_SET_PHY_CONFIGURATION,
3502 MGMT_STATUS_REJECTED);
3506 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3507 err = mgmt_cmd_status(sk, hdev->id,
3508 MGMT_OP_SET_PHY_CONFIGURATION,
3513 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3514 pkt_type |= (HCI_DH3 | HCI_DM3);
3516 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3518 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3519 pkt_type |= (HCI_DH5 | HCI_DM5);
3521 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3523 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3524 pkt_type &= ~HCI_2DH1;
3526 pkt_type |= HCI_2DH1;
3528 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3529 pkt_type &= ~HCI_2DH3;
3531 pkt_type |= HCI_2DH3;
3533 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3534 pkt_type &= ~HCI_2DH5;
3536 pkt_type |= HCI_2DH5;
3538 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3539 pkt_type &= ~HCI_3DH1;
3541 pkt_type |= HCI_3DH1;
3543 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3544 pkt_type &= ~HCI_3DH3;
3546 pkt_type |= HCI_3DH3;
3548 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3549 pkt_type &= ~HCI_3DH5;
3551 pkt_type |= HCI_3DH5;
3553 if (pkt_type != hdev->pkt_type) {
3554 hdev->pkt_type = pkt_type;
3558 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3559 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3561 mgmt_phy_configuration_changed(hdev, sk);
3563 err = mgmt_cmd_complete(sk, hdev->id,
3564 MGMT_OP_SET_PHY_CONFIGURATION,
3570 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3577 hci_req_init(&req, hdev);
3579 memset(&cp_phy, 0, sizeof(cp_phy));
3581 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3582 cp_phy.all_phys |= 0x01;
3584 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3585 cp_phy.all_phys |= 0x02;
3587 if (selected_phys & MGMT_PHY_LE_1M_TX)
3588 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3590 if (selected_phys & MGMT_PHY_LE_2M_TX)
3591 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3593 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3594 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3596 if (selected_phys & MGMT_PHY_LE_1M_RX)
3597 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3599 if (selected_phys & MGMT_PHY_LE_2M_RX)
3600 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3602 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3603 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3605 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3607 err = hci_req_run_skb(&req, set_default_phy_complete);
3609 mgmt_pending_remove(cmd);
3612 hci_dev_unlock(hdev);
3617 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3620 int err = MGMT_STATUS_SUCCESS;
3621 struct mgmt_cp_set_blocked_keys *keys = data;
3622 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3623 sizeof(struct mgmt_blocked_key_info));
3624 u16 key_count, expected_len;
3627 bt_dev_dbg(hdev, "sock %p", sk);
3629 key_count = __le16_to_cpu(keys->key_count);
3630 if (key_count > max_key_count) {
3631 bt_dev_err(hdev, "too big key_count value %u", key_count);
3632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3633 MGMT_STATUS_INVALID_PARAMS);
3636 expected_len = struct_size(keys, keys, key_count);
3637 if (expected_len != len) {
3638 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3641 MGMT_STATUS_INVALID_PARAMS);
3646 hci_blocked_keys_clear(hdev);
3648 for (i = 0; i < keys->key_count; ++i) {
3649 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3652 err = MGMT_STATUS_NO_RESOURCES;
3656 b->type = keys->keys[i].type;
3657 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3658 list_add_rcu(&b->list, &hdev->blocked_keys);
3660 hci_dev_unlock(hdev);
3662 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3666 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3667 void *data, u16 len)
3669 struct mgmt_mode *cp = data;
3671 bool changed = false;
3673 bt_dev_dbg(hdev, "sock %p", sk);
3675 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3676 return mgmt_cmd_status(sk, hdev->id,
3677 MGMT_OP_SET_WIDEBAND_SPEECH,
3678 MGMT_STATUS_NOT_SUPPORTED);
3680 if (cp->val != 0x00 && cp->val != 0x01)
3681 return mgmt_cmd_status(sk, hdev->id,
3682 MGMT_OP_SET_WIDEBAND_SPEECH,
3683 MGMT_STATUS_INVALID_PARAMS);
3687 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3688 err = mgmt_cmd_status(sk, hdev->id,
3689 MGMT_OP_SET_WIDEBAND_SPEECH,
3694 if (hdev_is_powered(hdev) &&
3695 !!cp->val != hci_dev_test_flag(hdev,
3696 HCI_WIDEBAND_SPEECH_ENABLED)) {
3697 err = mgmt_cmd_status(sk, hdev->id,
3698 MGMT_OP_SET_WIDEBAND_SPEECH,
3699 MGMT_STATUS_REJECTED);
3704 changed = !hci_dev_test_and_set_flag(hdev,
3705 HCI_WIDEBAND_SPEECH_ENABLED);
3707 changed = hci_dev_test_and_clear_flag(hdev,
3708 HCI_WIDEBAND_SPEECH_ENABLED);
3710 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3715 err = new_settings(hdev, sk);
3718 hci_dev_unlock(hdev);
3722 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3723 void *data, u16 data_len)
3726 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3729 u8 tx_power_range[2];
3731 bt_dev_dbg(hdev, "sock %p", sk);
3733 memset(&buf, 0, sizeof(buf));
3737 /* When the Read Simple Pairing Options command is supported, then
3738 * the remote public key validation is supported.
3740 * Alternatively, when Microsoft extensions are available, they can
3741 * indicate support for public key validation as well.
3743 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3744 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3746 flags |= 0x02; /* Remote public key validation (LE) */
3748 /* When the Read Encryption Key Size command is supported, then the
3749 * encryption key size is enforced.
3751 if (hdev->commands[20] & 0x10)
3752 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3754 flags |= 0x08; /* Encryption key size enforcement (LE) */
3756 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3759 /* When the Read Simple Pairing Options command is supported, then
3760 * also max encryption key size information is provided.
3762 if (hdev->commands[41] & 0x08)
3763 cap_len = eir_append_le16(rp->cap, cap_len,
3764 MGMT_CAP_MAX_ENC_KEY_SIZE,
3765 hdev->max_enc_key_size);
3767 cap_len = eir_append_le16(rp->cap, cap_len,
3768 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3769 SMP_MAX_ENC_KEY_SIZE);
3771 /* Append the min/max LE tx power parameters if we were able to fetch
3772 * it from the controller
3774 if (hdev->commands[38] & 0x80) {
3775 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3776 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3777 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3781 rp->cap_len = cpu_to_le16(cap_len);
3783 hci_dev_unlock(hdev);
3785 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3786 rp, sizeof(*rp) + cap_len);
3789 #ifdef CONFIG_BT_FEATURE_DEBUG
3790 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3791 static const u8 debug_uuid[16] = {
3792 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3793 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3797 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3798 static const u8 simult_central_periph_uuid[16] = {
3799 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3800 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3803 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3804 static const u8 rpa_resolution_uuid[16] = {
3805 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3806 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3809 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3810 void *data, u16 data_len)
3812 char buf[62]; /* Enough space for 3 features */
3813 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3817 bt_dev_dbg(hdev, "sock %p", sk);
3819 memset(&buf, 0, sizeof(buf));
3821 #ifdef CONFIG_BT_FEATURE_DEBUG
3823 flags = bt_dbg_get() ? BIT(0) : 0;
3825 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3826 rp->features[idx].flags = cpu_to_le32(flags);
3832 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3833 (hdev->le_states[4] & 0x08) && /* Central */
3834 (hdev->le_states[4] & 0x40) && /* Peripheral */
3835 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3840 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3841 rp->features[idx].flags = cpu_to_le32(flags);
3845 if (hdev && use_ll_privacy(hdev)) {
3846 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3847 flags = BIT(0) | BIT(1);
3851 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3852 rp->features[idx].flags = cpu_to_le32(flags);
3856 rp->feature_count = cpu_to_le16(idx);
3858 /* After reading the experimental features information, enable
3859 * the events to update client on any future change.
3861 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3863 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3864 MGMT_OP_READ_EXP_FEATURES_INFO,
3865 0, rp, sizeof(*rp) + (20 * idx));
3868 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3871 struct mgmt_ev_exp_feature_changed ev;
3873 memset(&ev, 0, sizeof(ev));
3874 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3875 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3877 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3879 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3883 #ifdef CONFIG_BT_FEATURE_DEBUG
3884 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3886 struct mgmt_ev_exp_feature_changed ev;
3888 memset(&ev, 0, sizeof(ev));
3889 memcpy(ev.uuid, debug_uuid, 16);
3890 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3892 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3894 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3898 #define EXP_FEAT(_uuid, _set_func) \
3901 .set_func = _set_func, \
3904 /* The zero key uuid is special. Multiple exp features are set through it. */
3905 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
3906 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3908 struct mgmt_rp_set_exp_feature rp;
3910 memset(rp.uuid, 0, 16);
3911 rp.flags = cpu_to_le32(0);
3913 #ifdef CONFIG_BT_FEATURE_DEBUG
3915 bool changed = bt_dbg_get();
3920 exp_debug_feature_changed(false, sk);
3924 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3927 changed = hci_dev_test_and_clear_flag(hdev,
3928 HCI_ENABLE_LL_PRIVACY);
3930 exp_ll_privacy_feature_changed(false, hdev, sk);
3933 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3935 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3936 MGMT_OP_SET_EXP_FEATURE, 0,
3940 #ifdef CONFIG_BT_FEATURE_DEBUG
3941 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
3942 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3944 struct mgmt_rp_set_exp_feature rp;
3949 /* Command requires to use the non-controller index */
3951 return mgmt_cmd_status(sk, hdev->id,
3952 MGMT_OP_SET_EXP_FEATURE,
3953 MGMT_STATUS_INVALID_INDEX);
3955 /* Parameters are limited to a single octet */
3956 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3957 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3958 MGMT_OP_SET_EXP_FEATURE,
3959 MGMT_STATUS_INVALID_PARAMS);
3961 /* Only boolean on/off is supported */
3962 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3963 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3964 MGMT_OP_SET_EXP_FEATURE,
3965 MGMT_STATUS_INVALID_PARAMS);
3967 val = !!cp->param[0];
3968 changed = val ? !bt_dbg_get() : bt_dbg_get();
3971 memcpy(rp.uuid, debug_uuid, 16);
3972 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3974 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3976 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3977 MGMT_OP_SET_EXP_FEATURE, 0,
3981 exp_debug_feature_changed(val, sk);
3987 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
3988 struct mgmt_cp_set_exp_feature *cp,
3991 struct mgmt_rp_set_exp_feature rp;
3996 /* Command requires to use the controller index */
3998 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3999 MGMT_OP_SET_EXP_FEATURE,
4000 MGMT_STATUS_INVALID_INDEX);
4002 /* Changes can only be made when controller is powered down */
4003 if (hdev_is_powered(hdev))
4004 return mgmt_cmd_status(sk, hdev->id,
4005 MGMT_OP_SET_EXP_FEATURE,
4006 MGMT_STATUS_REJECTED);
4008 /* Parameters are limited to a single octet */
4009 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4010 return mgmt_cmd_status(sk, hdev->id,
4011 MGMT_OP_SET_EXP_FEATURE,
4012 MGMT_STATUS_INVALID_PARAMS);
4014 /* Only boolean on/off is supported */
4015 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4016 return mgmt_cmd_status(sk, hdev->id,
4017 MGMT_OP_SET_EXP_FEATURE,
4018 MGMT_STATUS_INVALID_PARAMS);
4020 val = !!cp->param[0];
4023 changed = !hci_dev_test_and_set_flag(hdev,
4024 HCI_ENABLE_LL_PRIVACY);
4025 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4027 /* Enable LL privacy + supported settings changed */
4028 flags = BIT(0) | BIT(1);
4030 changed = hci_dev_test_and_clear_flag(hdev,
4031 HCI_ENABLE_LL_PRIVACY);
4033 /* Disable LL privacy + supported settings changed */
4037 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4038 rp.flags = cpu_to_le32(flags);
4040 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4042 err = mgmt_cmd_complete(sk, hdev->id,
4043 MGMT_OP_SET_EXP_FEATURE, 0,
4047 exp_ll_privacy_feature_changed(val, hdev, sk);
4052 static const struct mgmt_exp_feature {
4054 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4055 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4056 } exp_features[] = {
4057 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4058 #ifdef CONFIG_BT_FEATURE_DEBUG
4059 EXP_FEAT(debug_uuid, set_debug_func),
4061 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4063 /* end with a null feature */
4064 EXP_FEAT(NULL, NULL)
4067 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4068 void *data, u16 data_len)
4070 struct mgmt_cp_set_exp_feature *cp = data;
4073 bt_dev_dbg(hdev, "sock %p", sk);
4075 for (i = 0; exp_features[i].uuid; i++) {
4076 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4077 return exp_features[i].set_func(sk, hdev, cp, data_len);
4080 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4081 MGMT_OP_SET_EXP_FEATURE,
4082 MGMT_STATUS_NOT_SUPPORTED);
4085 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4087 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4090 struct mgmt_cp_get_device_flags *cp = data;
4091 struct mgmt_rp_get_device_flags rp;
4092 struct bdaddr_list_with_flags *br_params;
4093 struct hci_conn_params *params;
4094 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4095 u32 current_flags = 0;
4096 u8 status = MGMT_STATUS_INVALID_PARAMS;
4098 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4099 &cp->addr.bdaddr, cp->addr.type);
4103 memset(&rp, 0, sizeof(rp));
4105 if (cp->addr.type == BDADDR_BREDR) {
4106 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4112 current_flags = br_params->current_flags;
4114 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4115 le_addr_type(cp->addr.type));
4120 current_flags = params->current_flags;
4123 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4124 rp.addr.type = cp->addr.type;
4125 rp.supported_flags = cpu_to_le32(supported_flags);
4126 rp.current_flags = cpu_to_le32(current_flags);
4128 status = MGMT_STATUS_SUCCESS;
4131 hci_dev_unlock(hdev);
4133 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4137 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4138 bdaddr_t *bdaddr, u8 bdaddr_type,
4139 u32 supported_flags, u32 current_flags)
4141 struct mgmt_ev_device_flags_changed ev;
4143 bacpy(&ev.addr.bdaddr, bdaddr);
4144 ev.addr.type = bdaddr_type;
4145 ev.supported_flags = cpu_to_le32(supported_flags);
4146 ev.current_flags = cpu_to_le32(current_flags);
4148 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4151 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4154 struct mgmt_cp_set_device_flags *cp = data;
4155 struct bdaddr_list_with_flags *br_params;
4156 struct hci_conn_params *params;
4157 u8 status = MGMT_STATUS_INVALID_PARAMS;
4158 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4159 u32 current_flags = __le32_to_cpu(cp->current_flags);
4161 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4162 &cp->addr.bdaddr, cp->addr.type,
4163 __le32_to_cpu(current_flags));
4165 if ((supported_flags | current_flags) != supported_flags) {
4166 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4167 current_flags, supported_flags);
4173 if (cp->addr.type == BDADDR_BREDR) {
4174 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4179 br_params->current_flags = current_flags;
4180 status = MGMT_STATUS_SUCCESS;
4182 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4183 &cp->addr.bdaddr, cp->addr.type);
4186 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4187 le_addr_type(cp->addr.type));
4189 params->current_flags = current_flags;
4190 status = MGMT_STATUS_SUCCESS;
4192 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4194 le_addr_type(cp->addr.type));
4199 hci_dev_unlock(hdev);
4201 if (status == MGMT_STATUS_SUCCESS)
4202 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4203 supported_flags, current_flags);
4205 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4206 &cp->addr, sizeof(cp->addr));
4209 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4212 struct mgmt_ev_adv_monitor_added ev;
4214 ev.monitor_handle = cpu_to_le16(handle);
4216 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4219 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4221 struct mgmt_ev_adv_monitor_removed ev;
4222 struct mgmt_pending_cmd *cmd;
4223 struct sock *sk_skip = NULL;
4224 struct mgmt_cp_remove_adv_monitor *cp;
4226 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4230 if (cp->monitor_handle)
4234 ev.monitor_handle = cpu_to_le16(handle);
4236 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4239 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4240 void *data, u16 len)
4242 struct adv_monitor *monitor = NULL;
4243 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4246 __u32 supported = 0;
4248 __u16 num_handles = 0;
4249 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4251 BT_DBG("request for %s", hdev->name);
4255 if (msft_monitor_supported(hdev))
4256 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4258 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4259 handles[num_handles++] = monitor->handle;
4261 hci_dev_unlock(hdev);
4263 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4264 rp = kmalloc(rp_size, GFP_KERNEL);
4268 /* All supported features are currently enabled */
4269 enabled = supported;
4271 rp->supported_features = cpu_to_le32(supported);
4272 rp->enabled_features = cpu_to_le32(enabled);
4273 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4274 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4275 rp->num_handles = cpu_to_le16(num_handles);
4277 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4279 err = mgmt_cmd_complete(sk, hdev->id,
4280 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4281 MGMT_STATUS_SUCCESS, rp, rp_size);
4288 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4290 struct mgmt_rp_add_adv_patterns_monitor rp;
4291 struct mgmt_pending_cmd *cmd;
4292 struct adv_monitor *monitor;
4297 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4299 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4304 monitor = cmd->user_data;
4305 rp.monitor_handle = cpu_to_le16(monitor->handle);
4308 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4309 hdev->adv_monitors_cnt++;
4310 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4311 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4312 hci_update_background_scan(hdev);
4315 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4316 mgmt_status(status), &rp, sizeof(rp));
4317 mgmt_pending_remove(cmd);
4320 hci_dev_unlock(hdev);
4321 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4322 rp.monitor_handle, status);
4327 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4328 struct adv_monitor *m, u8 status,
4329 void *data, u16 len, u16 op)
4331 struct mgmt_rp_add_adv_patterns_monitor rp;
4332 struct mgmt_pending_cmd *cmd;
4341 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4342 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4343 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4344 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4345 status = MGMT_STATUS_BUSY;
4349 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4351 status = MGMT_STATUS_NO_RESOURCES;
4356 pending = hci_add_adv_monitor(hdev, m, &err);
4358 if (err == -ENOSPC || err == -ENOMEM)
4359 status = MGMT_STATUS_NO_RESOURCES;
4360 else if (err == -EINVAL)
4361 status = MGMT_STATUS_INVALID_PARAMS;
4363 status = MGMT_STATUS_FAILED;
4365 mgmt_pending_remove(cmd);
4370 mgmt_pending_remove(cmd);
4371 rp.monitor_handle = cpu_to_le16(m->handle);
4372 mgmt_adv_monitor_added(sk, hdev, m->handle);
4373 m->state = ADV_MONITOR_STATE_REGISTERED;
4374 hdev->adv_monitors_cnt++;
4376 hci_dev_unlock(hdev);
4377 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4381 hci_dev_unlock(hdev);
4386 hci_free_adv_monitor(hdev, m);
4387 hci_dev_unlock(hdev);
4388 return mgmt_cmd_status(sk, hdev->id, op, status);
4391 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4392 struct mgmt_adv_rssi_thresholds *rssi)
4395 m->rssi.low_threshold = rssi->low_threshold;
4396 m->rssi.low_threshold_timeout =
4397 __le16_to_cpu(rssi->low_threshold_timeout);
4398 m->rssi.high_threshold = rssi->high_threshold;
4399 m->rssi.high_threshold_timeout =
4400 __le16_to_cpu(rssi->high_threshold_timeout);
4401 m->rssi.sampling_period = rssi->sampling_period;
4403 /* Default values. These numbers are the least constricting
4404 * parameters for MSFT API to work, so it behaves as if there
4405 * are no rssi parameter to consider. May need to be changed
4406 * if other API are to be supported.
4408 m->rssi.low_threshold = -127;
4409 m->rssi.low_threshold_timeout = 60;
4410 m->rssi.high_threshold = -127;
4411 m->rssi.high_threshold_timeout = 0;
4412 m->rssi.sampling_period = 0;
4416 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4417 struct mgmt_adv_pattern *patterns)
4419 u8 offset = 0, length = 0;
4420 struct adv_pattern *p = NULL;
4423 for (i = 0; i < pattern_count; i++) {
4424 offset = patterns[i].offset;
4425 length = patterns[i].length;
4426 if (offset >= HCI_MAX_AD_LENGTH ||
4427 length > HCI_MAX_AD_LENGTH ||
4428 (offset + length) > HCI_MAX_AD_LENGTH)
4429 return MGMT_STATUS_INVALID_PARAMS;
4431 p = kmalloc(sizeof(*p), GFP_KERNEL);
4433 return MGMT_STATUS_NO_RESOURCES;
4435 p->ad_type = patterns[i].ad_type;
4436 p->offset = patterns[i].offset;
4437 p->length = patterns[i].length;
4438 memcpy(p->value, patterns[i].value, p->length);
4440 INIT_LIST_HEAD(&p->list);
4441 list_add(&p->list, &m->patterns);
4444 return MGMT_STATUS_SUCCESS;
4447 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4448 void *data, u16 len)
4450 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4451 struct adv_monitor *m = NULL;
4452 u8 status = MGMT_STATUS_SUCCESS;
4453 size_t expected_size = sizeof(*cp);
4455 BT_DBG("request for %s", hdev->name);
4457 if (len <= sizeof(*cp)) {
4458 status = MGMT_STATUS_INVALID_PARAMS;
4462 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4463 if (len != expected_size) {
4464 status = MGMT_STATUS_INVALID_PARAMS;
4468 m = kzalloc(sizeof(*m), GFP_KERNEL);
4470 status = MGMT_STATUS_NO_RESOURCES;
4474 INIT_LIST_HEAD(&m->patterns);
4476 parse_adv_monitor_rssi(m, NULL);
4477 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4480 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4481 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4484 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4485 void *data, u16 len)
4487 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4488 struct adv_monitor *m = NULL;
4489 u8 status = MGMT_STATUS_SUCCESS;
4490 size_t expected_size = sizeof(*cp);
4492 BT_DBG("request for %s", hdev->name);
4494 if (len <= sizeof(*cp)) {
4495 status = MGMT_STATUS_INVALID_PARAMS;
4499 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4500 if (len != expected_size) {
4501 status = MGMT_STATUS_INVALID_PARAMS;
4505 m = kzalloc(sizeof(*m), GFP_KERNEL);
4507 status = MGMT_STATUS_NO_RESOURCES;
4511 INIT_LIST_HEAD(&m->patterns);
4513 parse_adv_monitor_rssi(m, &cp->rssi);
4514 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4517 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4518 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4521 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4523 struct mgmt_rp_remove_adv_monitor rp;
4524 struct mgmt_cp_remove_adv_monitor *cp;
4525 struct mgmt_pending_cmd *cmd;
4530 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4535 rp.monitor_handle = cp->monitor_handle;
4538 hci_update_background_scan(hdev);
4540 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4541 mgmt_status(status), &rp, sizeof(rp));
4542 mgmt_pending_remove(cmd);
4545 hci_dev_unlock(hdev);
4546 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4547 rp.monitor_handle, status);
4552 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4553 void *data, u16 len)
4555 struct mgmt_cp_remove_adv_monitor *cp = data;
4556 struct mgmt_rp_remove_adv_monitor rp;
4557 struct mgmt_pending_cmd *cmd;
4558 u16 handle = __le16_to_cpu(cp->monitor_handle);
4562 BT_DBG("request for %s", hdev->name);
4563 rp.monitor_handle = cp->monitor_handle;
4567 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4568 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4569 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4570 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4571 status = MGMT_STATUS_BUSY;
4575 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4577 status = MGMT_STATUS_NO_RESOURCES;
4582 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4584 pending = hci_remove_all_adv_monitor(hdev, &err);
4587 mgmt_pending_remove(cmd);
4590 status = MGMT_STATUS_INVALID_INDEX;
4592 status = MGMT_STATUS_FAILED;
4597 /* monitor can be removed without forwarding request to controller */
4599 mgmt_pending_remove(cmd);
4600 hci_dev_unlock(hdev);
4602 return mgmt_cmd_complete(sk, hdev->id,
4603 MGMT_OP_REMOVE_ADV_MONITOR,
4604 MGMT_STATUS_SUCCESS,
4608 hci_dev_unlock(hdev);
4612 hci_dev_unlock(hdev);
4613 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4617 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4618 u16 opcode, struct sk_buff *skb)
4620 struct mgmt_rp_read_local_oob_data mgmt_rp;
4621 size_t rp_size = sizeof(mgmt_rp);
4622 struct mgmt_pending_cmd *cmd;
4624 bt_dev_dbg(hdev, "status %u", status);
4626 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4630 if (status || !skb) {
4631 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4632 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4636 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4638 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4639 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4641 if (skb->len < sizeof(*rp)) {
4642 mgmt_cmd_status(cmd->sk, hdev->id,
4643 MGMT_OP_READ_LOCAL_OOB_DATA,
4644 MGMT_STATUS_FAILED);
4648 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4649 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4651 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4653 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4655 if (skb->len < sizeof(*rp)) {
4656 mgmt_cmd_status(cmd->sk, hdev->id,
4657 MGMT_OP_READ_LOCAL_OOB_DATA,
4658 MGMT_STATUS_FAILED);
4662 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4663 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4665 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4666 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4669 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4670 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4673 mgmt_pending_remove(cmd);
4676 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4677 void *data, u16 data_len)
4679 struct mgmt_pending_cmd *cmd;
4680 struct hci_request req;
4683 bt_dev_dbg(hdev, "sock %p", sk);
4687 if (!hdev_is_powered(hdev)) {
4688 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4689 MGMT_STATUS_NOT_POWERED);
4693 if (!lmp_ssp_capable(hdev)) {
4694 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4695 MGMT_STATUS_NOT_SUPPORTED);
4699 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4700 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4705 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4711 hci_req_init(&req, hdev);
4713 if (bredr_sc_enabled(hdev))
4714 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4716 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4718 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4720 mgmt_pending_remove(cmd);
4723 hci_dev_unlock(hdev);
4727 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4728 void *data, u16 len)
4730 struct mgmt_addr_info *addr = data;
4733 bt_dev_dbg(hdev, "sock %p", sk);
4735 if (!bdaddr_type_is_valid(addr->type))
4736 return mgmt_cmd_complete(sk, hdev->id,
4737 MGMT_OP_ADD_REMOTE_OOB_DATA,
4738 MGMT_STATUS_INVALID_PARAMS,
4739 addr, sizeof(*addr));
4743 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4744 struct mgmt_cp_add_remote_oob_data *cp = data;
4747 if (cp->addr.type != BDADDR_BREDR) {
4748 err = mgmt_cmd_complete(sk, hdev->id,
4749 MGMT_OP_ADD_REMOTE_OOB_DATA,
4750 MGMT_STATUS_INVALID_PARAMS,
4751 &cp->addr, sizeof(cp->addr));
4755 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4756 cp->addr.type, cp->hash,
4757 cp->rand, NULL, NULL);
4759 status = MGMT_STATUS_FAILED;
4761 status = MGMT_STATUS_SUCCESS;
4763 err = mgmt_cmd_complete(sk, hdev->id,
4764 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4765 &cp->addr, sizeof(cp->addr));
4766 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4767 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4768 u8 *rand192, *hash192, *rand256, *hash256;
4771 if (bdaddr_type_is_le(cp->addr.type)) {
4772 /* Enforce zero-valued 192-bit parameters as
4773 * long as legacy SMP OOB isn't implemented.
4775 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4776 memcmp(cp->hash192, ZERO_KEY, 16)) {
4777 err = mgmt_cmd_complete(sk, hdev->id,
4778 MGMT_OP_ADD_REMOTE_OOB_DATA,
4779 MGMT_STATUS_INVALID_PARAMS,
4780 addr, sizeof(*addr));
4787 /* In case one of the P-192 values is set to zero,
4788 * then just disable OOB data for P-192.
4790 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4791 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4795 rand192 = cp->rand192;
4796 hash192 = cp->hash192;
4800 /* In case one of the P-256 values is set to zero, then just
4801 * disable OOB data for P-256.
4803 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4804 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4808 rand256 = cp->rand256;
4809 hash256 = cp->hash256;
4812 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4813 cp->addr.type, hash192, rand192,
4816 status = MGMT_STATUS_FAILED;
4818 status = MGMT_STATUS_SUCCESS;
4820 err = mgmt_cmd_complete(sk, hdev->id,
4821 MGMT_OP_ADD_REMOTE_OOB_DATA,
4822 status, &cp->addr, sizeof(cp->addr));
4824 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4826 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4827 MGMT_STATUS_INVALID_PARAMS);
4831 hci_dev_unlock(hdev);
4835 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4836 void *data, u16 len)
4838 struct mgmt_cp_remove_remote_oob_data *cp = data;
4842 bt_dev_dbg(hdev, "sock %p", sk);
4844 if (cp->addr.type != BDADDR_BREDR)
4845 return mgmt_cmd_complete(sk, hdev->id,
4846 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4847 MGMT_STATUS_INVALID_PARAMS,
4848 &cp->addr, sizeof(cp->addr));
4852 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4853 hci_remote_oob_data_clear(hdev);
4854 status = MGMT_STATUS_SUCCESS;
4858 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4860 status = MGMT_STATUS_INVALID_PARAMS;
4862 status = MGMT_STATUS_SUCCESS;
4865 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4866 status, &cp->addr, sizeof(cp->addr));
4868 hci_dev_unlock(hdev);
4872 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4874 struct mgmt_pending_cmd *cmd;
4876 bt_dev_dbg(hdev, "status %u", status);
4880 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4882 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4885 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4888 cmd->cmd_complete(cmd, mgmt_status(status));
4889 mgmt_pending_remove(cmd);
4892 hci_dev_unlock(hdev);
4894 /* Handle suspend notifier */
4895 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4896 hdev->suspend_tasks)) {
4897 bt_dev_dbg(hdev, "Unpaused discovery");
4898 wake_up(&hdev->suspend_wait_q);
4902 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4903 uint8_t *mgmt_status)
4906 case DISCOV_TYPE_LE:
4907 *mgmt_status = mgmt_le_support(hdev);
4911 case DISCOV_TYPE_INTERLEAVED:
4912 *mgmt_status = mgmt_le_support(hdev);
4916 case DISCOV_TYPE_BREDR:
4917 *mgmt_status = mgmt_bredr_support(hdev);
4922 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4929 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4930 u16 op, void *data, u16 len)
4932 struct mgmt_cp_start_discovery *cp = data;
4933 struct mgmt_pending_cmd *cmd;
4937 bt_dev_dbg(hdev, "sock %p", sk);
4941 if (!hdev_is_powered(hdev)) {
4942 err = mgmt_cmd_complete(sk, hdev->id, op,
4943 MGMT_STATUS_NOT_POWERED,
4944 &cp->type, sizeof(cp->type));
4948 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4949 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4950 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4951 &cp->type, sizeof(cp->type));
4955 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4956 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4957 &cp->type, sizeof(cp->type));
4961 /* Can't start discovery when it is paused */
4962 if (hdev->discovery_paused) {
4963 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4964 &cp->type, sizeof(cp->type));
4968 /* Clear the discovery filter first to free any previously
4969 * allocated memory for the UUID list.
4971 hci_discovery_filter_clear(hdev);
4973 hdev->discovery.type = cp->type;
4974 hdev->discovery.report_invalid_rssi = false;
4975 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4976 hdev->discovery.limited = true;
4978 hdev->discovery.limited = false;
4980 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4986 cmd->cmd_complete = generic_cmd_complete;
4988 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4989 queue_work(hdev->req_workqueue, &hdev->discov_update);
4993 hci_dev_unlock(hdev);
4997 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4998 void *data, u16 len)
5000 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5004 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5005 void *data, u16 len)
5007 return start_discovery_internal(sk, hdev,
5008 MGMT_OP_START_LIMITED_DISCOVERY,
5012 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
5015 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
5019 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5020 void *data, u16 len)
5022 struct mgmt_cp_start_service_discovery *cp = data;
5023 struct mgmt_pending_cmd *cmd;
5024 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5025 u16 uuid_count, expected_len;
5029 bt_dev_dbg(hdev, "sock %p", sk);
5033 if (!hdev_is_powered(hdev)) {
5034 err = mgmt_cmd_complete(sk, hdev->id,
5035 MGMT_OP_START_SERVICE_DISCOVERY,
5036 MGMT_STATUS_NOT_POWERED,
5037 &cp->type, sizeof(cp->type));
5041 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5042 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5043 err = mgmt_cmd_complete(sk, hdev->id,
5044 MGMT_OP_START_SERVICE_DISCOVERY,
5045 MGMT_STATUS_BUSY, &cp->type,
5050 if (hdev->discovery_paused) {
5051 err = mgmt_cmd_complete(sk, hdev->id,
5052 MGMT_OP_START_SERVICE_DISCOVERY,
5053 MGMT_STATUS_BUSY, &cp->type,
5058 uuid_count = __le16_to_cpu(cp->uuid_count);
5059 if (uuid_count > max_uuid_count) {
5060 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5062 err = mgmt_cmd_complete(sk, hdev->id,
5063 MGMT_OP_START_SERVICE_DISCOVERY,
5064 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5069 expected_len = sizeof(*cp) + uuid_count * 16;
5070 if (expected_len != len) {
5071 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5073 err = mgmt_cmd_complete(sk, hdev->id,
5074 MGMT_OP_START_SERVICE_DISCOVERY,
5075 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5080 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5081 err = mgmt_cmd_complete(sk, hdev->id,
5082 MGMT_OP_START_SERVICE_DISCOVERY,
5083 status, &cp->type, sizeof(cp->type));
5087 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5094 cmd->cmd_complete = service_discovery_cmd_complete;
5096 /* Clear the discovery filter first to free any previously
5097 * allocated memory for the UUID list.
5099 hci_discovery_filter_clear(hdev);
5101 hdev->discovery.result_filtering = true;
5102 hdev->discovery.type = cp->type;
5103 hdev->discovery.rssi = cp->rssi;
5104 hdev->discovery.uuid_count = uuid_count;
5106 if (uuid_count > 0) {
5107 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5109 if (!hdev->discovery.uuids) {
5110 err = mgmt_cmd_complete(sk, hdev->id,
5111 MGMT_OP_START_SERVICE_DISCOVERY,
5113 &cp->type, sizeof(cp->type));
5114 mgmt_pending_remove(cmd);
5119 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5120 queue_work(hdev->req_workqueue, &hdev->discov_update);
5124 hci_dev_unlock(hdev);
5128 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5130 struct mgmt_pending_cmd *cmd;
5132 bt_dev_dbg(hdev, "status %u", status);
5136 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5138 cmd->cmd_complete(cmd, mgmt_status(status));
5139 mgmt_pending_remove(cmd);
5142 hci_dev_unlock(hdev);
5144 /* Handle suspend notifier */
5145 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5146 bt_dev_dbg(hdev, "Paused discovery");
5147 wake_up(&hdev->suspend_wait_q);
5151 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5154 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5155 struct mgmt_pending_cmd *cmd;
5158 bt_dev_dbg(hdev, "sock %p", sk);
5162 if (!hci_discovery_active(hdev)) {
5163 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5164 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5165 sizeof(mgmt_cp->type));
5169 if (hdev->discovery.type != mgmt_cp->type) {
5170 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5171 MGMT_STATUS_INVALID_PARAMS,
5172 &mgmt_cp->type, sizeof(mgmt_cp->type));
5176 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5182 cmd->cmd_complete = generic_cmd_complete;
5184 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5185 queue_work(hdev->req_workqueue, &hdev->discov_update);
5189 hci_dev_unlock(hdev);
5193 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5196 struct mgmt_cp_confirm_name *cp = data;
5197 struct inquiry_entry *e;
5200 bt_dev_dbg(hdev, "sock %p", sk);
5204 if (!hci_discovery_active(hdev)) {
5205 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5206 MGMT_STATUS_FAILED, &cp->addr,
5211 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5213 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5214 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5219 if (cp->name_known) {
5220 e->name_state = NAME_KNOWN;
5223 e->name_state = NAME_NEEDED;
5224 hci_inquiry_cache_update_resolve(hdev, e);
5227 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5228 &cp->addr, sizeof(cp->addr));
5231 hci_dev_unlock(hdev);
5235 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5238 struct mgmt_cp_block_device *cp = data;
5242 bt_dev_dbg(hdev, "sock %p", sk);
5244 if (!bdaddr_type_is_valid(cp->addr.type))
5245 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5246 MGMT_STATUS_INVALID_PARAMS,
5247 &cp->addr, sizeof(cp->addr));
5251 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5254 status = MGMT_STATUS_FAILED;
5258 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5260 status = MGMT_STATUS_SUCCESS;
5263 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5264 &cp->addr, sizeof(cp->addr));
5266 hci_dev_unlock(hdev);
5271 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5274 struct mgmt_cp_unblock_device *cp = data;
5278 bt_dev_dbg(hdev, "sock %p", sk);
5280 if (!bdaddr_type_is_valid(cp->addr.type))
5281 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5282 MGMT_STATUS_INVALID_PARAMS,
5283 &cp->addr, sizeof(cp->addr));
5287 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5290 status = MGMT_STATUS_INVALID_PARAMS;
5294 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5296 status = MGMT_STATUS_SUCCESS;
5299 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5300 &cp->addr, sizeof(cp->addr));
5302 hci_dev_unlock(hdev);
5307 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5310 struct mgmt_cp_set_device_id *cp = data;
5311 struct hci_request req;
5315 bt_dev_dbg(hdev, "sock %p", sk);
5317 source = __le16_to_cpu(cp->source);
5319 if (source > 0x0002)
5320 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5321 MGMT_STATUS_INVALID_PARAMS);
5325 hdev->devid_source = source;
5326 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5327 hdev->devid_product = __le16_to_cpu(cp->product);
5328 hdev->devid_version = __le16_to_cpu(cp->version);
5330 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5333 hci_req_init(&req, hdev);
5334 __hci_req_update_eir(&req);
5335 hci_req_run(&req, NULL);
5337 hci_dev_unlock(hdev);
5342 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5345 bt_dev_dbg(hdev, "status %u", status);
5348 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5351 struct cmd_lookup match = { NULL, hdev };
5352 struct hci_request req;
5354 struct adv_info *adv_instance;
5360 u8 mgmt_err = mgmt_status(status);
5362 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5363 cmd_status_rsp, &mgmt_err);
5367 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5368 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5370 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5372 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5375 new_settings(hdev, match.sk);
5380 /* Handle suspend notifier */
5381 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5382 hdev->suspend_tasks)) {
5383 bt_dev_dbg(hdev, "Paused advertising");
5384 wake_up(&hdev->suspend_wait_q);
5385 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5386 hdev->suspend_tasks)) {
5387 bt_dev_dbg(hdev, "Unpaused advertising");
5388 wake_up(&hdev->suspend_wait_q);
5391 /* If "Set Advertising" was just disabled and instance advertising was
5392 * set up earlier, then re-enable multi-instance advertising.
5394 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5395 list_empty(&hdev->adv_instances))
5398 instance = hdev->cur_adv_instance;
5400 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5401 struct adv_info, list);
5405 instance = adv_instance->instance;
5408 hci_req_init(&req, hdev);
5410 err = __hci_req_schedule_adv_instance(&req, instance, true);
5413 err = hci_req_run(&req, enable_advertising_instance);
5416 bt_dev_err(hdev, "failed to re-configure advertising");
5419 hci_dev_unlock(hdev);
5422 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5425 struct mgmt_mode *cp = data;
5426 struct mgmt_pending_cmd *cmd;
5427 struct hci_request req;
5431 bt_dev_dbg(hdev, "sock %p", sk);
5433 status = mgmt_le_support(hdev);
5435 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5438 /* Enabling the experimental LL Privay support disables support for
5441 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5442 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5443 MGMT_STATUS_NOT_SUPPORTED);
5445 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5446 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5447 MGMT_STATUS_INVALID_PARAMS);
5449 if (hdev->advertising_paused)
5450 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5457 /* The following conditions are ones which mean that we should
5458 * not do any HCI communication but directly send a mgmt
5459 * response to user space (after toggling the flag if
5462 if (!hdev_is_powered(hdev) ||
5463 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5464 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5465 hci_conn_num(hdev, LE_LINK) > 0 ||
5466 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5467 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5471 hdev->cur_adv_instance = 0x00;
5472 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5473 if (cp->val == 0x02)
5474 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5476 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5478 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5479 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5482 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5487 err = new_settings(hdev, sk);
5492 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5493 pending_find(MGMT_OP_SET_LE, hdev)) {
5494 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5499 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5505 hci_req_init(&req, hdev);
5507 if (cp->val == 0x02)
5508 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5510 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5512 cancel_adv_timeout(hdev);
5515 /* Switch to instance "0" for the Set Advertising setting.
5516 * We cannot use update_[adv|scan_rsp]_data() here as the
5517 * HCI_ADVERTISING flag is not yet set.
5519 hdev->cur_adv_instance = 0x00;
5521 if (ext_adv_capable(hdev)) {
5522 __hci_req_start_ext_adv(&req, 0x00);
5524 __hci_req_update_adv_data(&req, 0x00);
5525 __hci_req_update_scan_rsp_data(&req, 0x00);
5526 __hci_req_enable_advertising(&req);
5529 __hci_req_disable_advertising(&req);
5532 err = hci_req_run(&req, set_advertising_complete);
5534 mgmt_pending_remove(cmd);
5537 hci_dev_unlock(hdev);
5541 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5542 void *data, u16 len)
5544 struct mgmt_cp_set_static_address *cp = data;
5547 bt_dev_dbg(hdev, "sock %p", sk);
5549 if (!lmp_le_capable(hdev))
5550 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5551 MGMT_STATUS_NOT_SUPPORTED);
5553 if (hdev_is_powered(hdev))
5554 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5555 MGMT_STATUS_REJECTED);
5557 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5558 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5559 return mgmt_cmd_status(sk, hdev->id,
5560 MGMT_OP_SET_STATIC_ADDRESS,
5561 MGMT_STATUS_INVALID_PARAMS);
5563 /* Two most significant bits shall be set */
5564 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5565 return mgmt_cmd_status(sk, hdev->id,
5566 MGMT_OP_SET_STATIC_ADDRESS,
5567 MGMT_STATUS_INVALID_PARAMS);
5572 bacpy(&hdev->static_addr, &cp->bdaddr);
5574 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5578 err = new_settings(hdev, sk);
5581 hci_dev_unlock(hdev);
5585 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5586 void *data, u16 len)
5588 struct mgmt_cp_set_scan_params *cp = data;
5589 __u16 interval, window;
5592 bt_dev_dbg(hdev, "sock %p", sk);
5594 if (!lmp_le_capable(hdev))
5595 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5596 MGMT_STATUS_NOT_SUPPORTED);
5598 interval = __le16_to_cpu(cp->interval);
5600 if (interval < 0x0004 || interval > 0x4000)
5601 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5602 MGMT_STATUS_INVALID_PARAMS);
5604 window = __le16_to_cpu(cp->window);
5606 if (window < 0x0004 || window > 0x4000)
5607 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5608 MGMT_STATUS_INVALID_PARAMS);
5610 if (window > interval)
5611 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5612 MGMT_STATUS_INVALID_PARAMS);
5616 hdev->le_scan_interval = interval;
5617 hdev->le_scan_window = window;
5619 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5622 /* If background scan is running, restart it so new parameters are
5625 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5626 hdev->discovery.state == DISCOVERY_STOPPED) {
5627 struct hci_request req;
5629 hci_req_init(&req, hdev);
5631 hci_req_add_le_scan_disable(&req, false);
5632 hci_req_add_le_passive_scan(&req);
5634 hci_req_run(&req, NULL);
5637 hci_dev_unlock(hdev);
5642 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5645 struct mgmt_pending_cmd *cmd;
5647 bt_dev_dbg(hdev, "status 0x%02x", status);
5651 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5656 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5657 mgmt_status(status));
5659 struct mgmt_mode *cp = cmd->param;
5662 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5664 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5666 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5667 new_settings(hdev, cmd->sk);
5670 mgmt_pending_remove(cmd);
5673 hci_dev_unlock(hdev);
5676 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5677 void *data, u16 len)
5679 struct mgmt_mode *cp = data;
5680 struct mgmt_pending_cmd *cmd;
5681 struct hci_request req;
5684 bt_dev_dbg(hdev, "sock %p", sk);
5686 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5687 hdev->hci_ver < BLUETOOTH_VER_1_2)
5688 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5689 MGMT_STATUS_NOT_SUPPORTED);
5691 if (cp->val != 0x00 && cp->val != 0x01)
5692 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5693 MGMT_STATUS_INVALID_PARAMS);
5697 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5698 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5703 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5704 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5709 if (!hdev_is_powered(hdev)) {
5710 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5711 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5713 new_settings(hdev, sk);
5717 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5724 hci_req_init(&req, hdev);
5726 __hci_req_write_fast_connectable(&req, cp->val);
5728 err = hci_req_run(&req, fast_connectable_complete);
5730 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5731 MGMT_STATUS_FAILED);
5732 mgmt_pending_remove(cmd);
5736 hci_dev_unlock(hdev);
5741 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5743 struct mgmt_pending_cmd *cmd;
5745 bt_dev_dbg(hdev, "status 0x%02x", status);
5749 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5754 u8 mgmt_err = mgmt_status(status);
5756 /* We need to restore the flag if related HCI commands
5759 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5761 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5763 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5764 new_settings(hdev, cmd->sk);
5767 mgmt_pending_remove(cmd);
5770 hci_dev_unlock(hdev);
5773 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5775 struct mgmt_mode *cp = data;
5776 struct mgmt_pending_cmd *cmd;
5777 struct hci_request req;
5780 bt_dev_dbg(hdev, "sock %p", sk);
5782 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5783 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5784 MGMT_STATUS_NOT_SUPPORTED);
5786 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5787 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5788 MGMT_STATUS_REJECTED);
5790 if (cp->val != 0x00 && cp->val != 0x01)
5791 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5792 MGMT_STATUS_INVALID_PARAMS);
5796 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5797 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5801 if (!hdev_is_powered(hdev)) {
5803 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5804 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5805 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5806 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5807 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5810 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5812 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5816 err = new_settings(hdev, sk);
5820 /* Reject disabling when powered on */
5822 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5823 MGMT_STATUS_REJECTED);
5826 /* When configuring a dual-mode controller to operate
5827 * with LE only and using a static address, then switching
5828 * BR/EDR back on is not allowed.
5830 * Dual-mode controllers shall operate with the public
5831 * address as its identity address for BR/EDR and LE. So
5832 * reject the attempt to create an invalid configuration.
5834 * The same restrictions applies when secure connections
5835 * has been enabled. For BR/EDR this is a controller feature
5836 * while for LE it is a host stack feature. This means that
5837 * switching BR/EDR back on when secure connections has been
5838 * enabled is not a supported transaction.
5840 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5841 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5842 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5843 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5844 MGMT_STATUS_REJECTED);
5849 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5850 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5855 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5861 /* We need to flip the bit already here so that
5862 * hci_req_update_adv_data generates the correct flags.
5864 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5866 hci_req_init(&req, hdev);
5868 __hci_req_write_fast_connectable(&req, false);
5869 __hci_req_update_scan(&req);
5871 /* Since only the advertising data flags will change, there
5872 * is no need to update the scan response data.
5874 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5876 err = hci_req_run(&req, set_bredr_complete);
5878 mgmt_pending_remove(cmd);
5881 hci_dev_unlock(hdev);
5885 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5887 struct mgmt_pending_cmd *cmd;
5888 struct mgmt_mode *cp;
5890 bt_dev_dbg(hdev, "status %u", status);
5894 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5899 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5900 mgmt_status(status));
5908 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5909 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5912 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5913 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5916 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5917 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5921 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5922 new_settings(hdev, cmd->sk);
5925 mgmt_pending_remove(cmd);
5927 hci_dev_unlock(hdev);
5930 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5931 void *data, u16 len)
5933 struct mgmt_mode *cp = data;
5934 struct mgmt_pending_cmd *cmd;
5935 struct hci_request req;
5939 bt_dev_dbg(hdev, "sock %p", sk);
5941 if (!lmp_sc_capable(hdev) &&
5942 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5943 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5944 MGMT_STATUS_NOT_SUPPORTED);
5946 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5947 lmp_sc_capable(hdev) &&
5948 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5949 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5950 MGMT_STATUS_REJECTED);
5952 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5953 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5954 MGMT_STATUS_INVALID_PARAMS);
5958 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5959 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5963 changed = !hci_dev_test_and_set_flag(hdev,
5965 if (cp->val == 0x02)
5966 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5968 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5970 changed = hci_dev_test_and_clear_flag(hdev,
5972 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5975 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5980 err = new_settings(hdev, sk);
5985 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5986 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5993 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5994 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5995 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5999 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6005 hci_req_init(&req, hdev);
6006 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
6007 err = hci_req_run(&req, sc_enable_complete);
6009 mgmt_pending_remove(cmd);
6014 hci_dev_unlock(hdev);
6018 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6019 void *data, u16 len)
6021 struct mgmt_mode *cp = data;
6022 bool changed, use_changed;
6025 bt_dev_dbg(hdev, "sock %p", sk);
6027 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6028 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6029 MGMT_STATUS_INVALID_PARAMS);
6034 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6036 changed = hci_dev_test_and_clear_flag(hdev,
6037 HCI_KEEP_DEBUG_KEYS);
6039 if (cp->val == 0x02)
6040 use_changed = !hci_dev_test_and_set_flag(hdev,
6041 HCI_USE_DEBUG_KEYS);
6043 use_changed = hci_dev_test_and_clear_flag(hdev,
6044 HCI_USE_DEBUG_KEYS);
6046 if (hdev_is_powered(hdev) && use_changed &&
6047 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6048 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6049 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6050 sizeof(mode), &mode);
6053 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6058 err = new_settings(hdev, sk);
6061 hci_dev_unlock(hdev);
6065 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6068 struct mgmt_cp_set_privacy *cp = cp_data;
6072 bt_dev_dbg(hdev, "sock %p", sk);
6074 if (!lmp_le_capable(hdev))
6075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6076 MGMT_STATUS_NOT_SUPPORTED);
6078 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6080 MGMT_STATUS_INVALID_PARAMS);
6082 if (hdev_is_powered(hdev))
6083 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6084 MGMT_STATUS_REJECTED);
6088 /* If user space supports this command it is also expected to
6089 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6091 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6094 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6095 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6096 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6097 hci_adv_instances_set_rpa_expired(hdev, true);
6098 if (cp->privacy == 0x02)
6099 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6101 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6103 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6104 memset(hdev->irk, 0, sizeof(hdev->irk));
6105 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6106 hci_adv_instances_set_rpa_expired(hdev, false);
6107 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6110 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6115 err = new_settings(hdev, sk);
6118 hci_dev_unlock(hdev);
6122 static bool irk_is_valid(struct mgmt_irk_info *irk)
6124 switch (irk->addr.type) {
6125 case BDADDR_LE_PUBLIC:
6128 case BDADDR_LE_RANDOM:
6129 /* Two most significant bits shall be set */
6130 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6138 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6141 struct mgmt_cp_load_irks *cp = cp_data;
6142 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6143 sizeof(struct mgmt_irk_info));
6144 u16 irk_count, expected_len;
6147 bt_dev_dbg(hdev, "sock %p", sk);
6149 if (!lmp_le_capable(hdev))
6150 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6151 MGMT_STATUS_NOT_SUPPORTED);
6153 irk_count = __le16_to_cpu(cp->irk_count);
6154 if (irk_count > max_irk_count) {
6155 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6157 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6158 MGMT_STATUS_INVALID_PARAMS);
6161 expected_len = struct_size(cp, irks, irk_count);
6162 if (expected_len != len) {
6163 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6165 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6166 MGMT_STATUS_INVALID_PARAMS);
6169 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6171 for (i = 0; i < irk_count; i++) {
6172 struct mgmt_irk_info *key = &cp->irks[i];
6174 if (!irk_is_valid(key))
6175 return mgmt_cmd_status(sk, hdev->id,
6177 MGMT_STATUS_INVALID_PARAMS);
6182 hci_smp_irks_clear(hdev);
6184 for (i = 0; i < irk_count; i++) {
6185 struct mgmt_irk_info *irk = &cp->irks[i];
6187 if (hci_is_blocked_key(hdev,
6188 HCI_BLOCKED_KEY_TYPE_IRK,
6190 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6195 hci_add_irk(hdev, &irk->addr.bdaddr,
6196 le_addr_type(irk->addr.type), irk->val,
6200 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6202 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6204 hci_dev_unlock(hdev);
6210 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
6211 void *data, u16 len)
6213 struct mgmt_cp_set_advertising_params *cp = data;
6218 BT_DBG("%s", hdev->name);
6220 if (!lmp_le_capable(hdev))
6221 return mgmt_cmd_status(sk, hdev->id,
6222 MGMT_OP_SET_ADVERTISING_PARAMS,
6223 MGMT_STATUS_NOT_SUPPORTED);
6225 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6226 return mgmt_cmd_status(sk, hdev->id,
6227 MGMT_OP_SET_ADVERTISING_PARAMS,
6230 min_interval = __le16_to_cpu(cp->interval_min);
6231 max_interval = __le16_to_cpu(cp->interval_max);
6233 if (min_interval > max_interval ||
6234 min_interval < 0x0020 || max_interval > 0x4000)
6235 return mgmt_cmd_status(sk, hdev->id,
6236 MGMT_OP_SET_ADVERTISING_PARAMS,
6237 MGMT_STATUS_INVALID_PARAMS);
6241 hdev->le_adv_min_interval = min_interval;
6242 hdev->le_adv_max_interval = max_interval;
6243 hdev->adv_filter_policy = cp->filter_policy;
6244 hdev->adv_type = cp->type;
6246 err = mgmt_cmd_complete(sk, hdev->id,
6247 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
6249 hci_dev_unlock(hdev);
6254 static void set_advertising_data_complete(struct hci_dev *hdev,
6255 u8 status, u16 opcode)
6257 struct mgmt_cp_set_advertising_data *cp;
6258 struct mgmt_pending_cmd *cmd;
6260 BT_DBG("status 0x%02x", status);
6264 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
6271 mgmt_cmd_status(cmd->sk, hdev->id,
6272 MGMT_OP_SET_ADVERTISING_DATA,
6273 mgmt_status(status));
6275 mgmt_cmd_complete(cmd->sk, hdev->id,
6276 MGMT_OP_SET_ADVERTISING_DATA, 0,
6279 mgmt_pending_remove(cmd);
6282 hci_dev_unlock(hdev);
6285 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
6286 void *data, u16 len)
6288 struct mgmt_pending_cmd *cmd;
6289 struct hci_request req;
6290 struct mgmt_cp_set_advertising_data *cp = data;
6291 struct hci_cp_le_set_adv_data adv;
6294 BT_DBG("%s", hdev->name);
6296 if (!lmp_le_capable(hdev)) {
6297 return mgmt_cmd_status(sk, hdev->id,
6298 MGMT_OP_SET_ADVERTISING_DATA,
6299 MGMT_STATUS_NOT_SUPPORTED);
6304 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
6305 err = mgmt_cmd_status(sk, hdev->id,
6306 MGMT_OP_SET_ADVERTISING_DATA,
6311 if (len > HCI_MAX_AD_LENGTH) {
6312 err = mgmt_cmd_status(sk, hdev->id,
6313 MGMT_OP_SET_ADVERTISING_DATA,
6314 MGMT_STATUS_INVALID_PARAMS);
6318 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
6325 hci_req_init(&req, hdev);
6327 memset(&adv, 0, sizeof(adv));
6328 memcpy(adv.data, cp->data, len);
6331 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
6333 err = hci_req_run(&req, set_advertising_data_complete);
6335 mgmt_pending_remove(cmd);
6338 hci_dev_unlock(hdev);
6343 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
6346 struct mgmt_cp_set_scan_rsp_data *cp;
6347 struct mgmt_pending_cmd *cmd;
6349 BT_DBG("status 0x%02x", status);
6353 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
6360 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6361 mgmt_status(status));
6363 mgmt_cmd_complete(cmd->sk, hdev->id,
6364 MGMT_OP_SET_SCAN_RSP_DATA, 0,
6367 mgmt_pending_remove(cmd);
6370 hci_dev_unlock(hdev);
6373 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
6376 struct mgmt_pending_cmd *cmd;
6377 struct hci_request req;
6378 struct mgmt_cp_set_scan_rsp_data *cp = data;
6379 struct hci_cp_le_set_scan_rsp_data rsp;
6382 BT_DBG("%s", hdev->name);
6384 if (!lmp_le_capable(hdev))
6385 return mgmt_cmd_status(sk, hdev->id,
6386 MGMT_OP_SET_SCAN_RSP_DATA,
6387 MGMT_STATUS_NOT_SUPPORTED);
6391 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
6392 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6397 if (len > HCI_MAX_AD_LENGTH) {
6398 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6399 MGMT_STATUS_INVALID_PARAMS);
6403 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
6409 hci_req_init(&req, hdev);
6411 memset(&rsp, 0, sizeof(rsp));
6412 memcpy(rsp.data, cp->data, len);
6415 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
6417 err = hci_req_run(&req, set_scan_rsp_data_complete);
6419 mgmt_pending_remove(cmd);
6422 hci_dev_unlock(hdev);
6427 /* Adv White List feature */
6428 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6430 struct mgmt_cp_add_dev_white_list *cp;
6431 struct mgmt_pending_cmd *cmd;
6433 BT_DBG("status 0x%02x", status);
6437 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
6444 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6445 mgmt_status(status));
6447 mgmt_cmd_complete(cmd->sk, hdev->id,
6448 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
6450 mgmt_pending_remove(cmd);
6453 hci_dev_unlock(hdev);
6456 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
6457 void *data, u16 len)
6459 struct mgmt_pending_cmd *cmd;
6460 struct mgmt_cp_add_dev_white_list *cp = data;
6461 struct hci_request req;
6464 BT_DBG("%s", hdev->name);
6466 if (!lmp_le_capable(hdev))
6467 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6468 MGMT_STATUS_NOT_SUPPORTED);
6470 if (!hdev_is_powered(hdev))
6471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6472 MGMT_STATUS_REJECTED);
6476 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
6477 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6482 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
6488 hci_req_init(&req, hdev);
6490 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
6492 err = hci_req_run(&req, add_white_list_complete);
6494 mgmt_pending_remove(cmd);
6499 hci_dev_unlock(hdev);
6504 static void remove_from_white_list_complete(struct hci_dev *hdev,
6505 u8 status, u16 opcode)
6507 struct mgmt_cp_remove_dev_from_white_list *cp;
6508 struct mgmt_pending_cmd *cmd;
6510 BT_DBG("status 0x%02x", status);
6514 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
6521 mgmt_cmd_status(cmd->sk, hdev->id,
6522 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6523 mgmt_status(status));
6525 mgmt_cmd_complete(cmd->sk, hdev->id,
6526 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
6529 mgmt_pending_remove(cmd);
6532 hci_dev_unlock(hdev);
6535 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
6536 void *data, u16 len)
6538 struct mgmt_pending_cmd *cmd;
6539 struct mgmt_cp_remove_dev_from_white_list *cp = data;
6540 struct hci_request req;
6543 BT_DBG("%s", hdev->name);
6545 if (!lmp_le_capable(hdev))
6546 return mgmt_cmd_status(sk, hdev->id,
6547 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6548 MGMT_STATUS_NOT_SUPPORTED);
6550 if (!hdev_is_powered(hdev))
6551 return mgmt_cmd_status(sk, hdev->id,
6552 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6553 MGMT_STATUS_REJECTED);
6557 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
6558 err = mgmt_cmd_status(sk, hdev->id,
6559 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6564 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6571 hci_req_init(&req, hdev);
6573 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
6575 err = hci_req_run(&req, remove_from_white_list_complete);
6577 mgmt_pending_remove(cmd);
6582 hci_dev_unlock(hdev);
6587 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
6590 struct mgmt_pending_cmd *cmd;
6592 BT_DBG("status 0x%02x", status);
6596 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
6601 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6602 mgmt_status(status));
6604 mgmt_cmd_complete(cmd->sk, hdev->id,
6605 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6608 mgmt_pending_remove(cmd);
6611 hci_dev_unlock(hdev);
6614 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
6615 void *data, u16 len)
6617 struct mgmt_pending_cmd *cmd;
6618 struct hci_request req;
6621 BT_DBG("%s", hdev->name);
6623 if (!lmp_le_capable(hdev))
6624 return mgmt_cmd_status(sk, hdev->id,
6625 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6626 MGMT_STATUS_NOT_SUPPORTED);
6628 if (!hdev_is_powered(hdev))
6629 return mgmt_cmd_status(sk, hdev->id,
6630 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6631 MGMT_STATUS_REJECTED);
6635 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
6636 err = mgmt_cmd_status(sk, hdev->id,
6637 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6642 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6649 hci_req_init(&req, hdev);
6651 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
6653 err = hci_req_run(&req, clear_white_list_complete);
6655 mgmt_pending_remove(cmd);
6660 hci_dev_unlock(hdev);
6665 static void set_rssi_threshold_complete(struct hci_dev *hdev,
6666 u8 status, u16 opcode)
6668 struct mgmt_pending_cmd *cmd;
6670 BT_DBG("status 0x%02x", status);
6674 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6679 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6680 mgmt_status(status));
6682 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6685 mgmt_pending_remove(cmd);
6688 hci_dev_unlock(hdev);
6691 static void set_rssi_disable_complete(struct hci_dev *hdev,
6692 u8 status, u16 opcode)
6694 struct mgmt_pending_cmd *cmd;
6696 BT_DBG("status 0x%02x", status);
6700 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6705 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6706 mgmt_status(status));
6708 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6711 mgmt_pending_remove(cmd);
6714 hci_dev_unlock(hdev);
6717 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
6718 void *data, u16 len)
6721 struct hci_cp_set_rssi_threshold th = { 0, };
6722 struct mgmt_cp_set_enable_rssi *cp = data;
6723 struct hci_conn *conn;
6724 struct mgmt_pending_cmd *cmd;
6725 struct hci_request req;
6730 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6732 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6733 MGMT_STATUS_FAILED);
6737 if (!lmp_le_capable(hdev)) {
6738 mgmt_pending_remove(cmd);
6739 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6740 MGMT_STATUS_NOT_SUPPORTED);
6744 if (!hdev_is_powered(hdev)) {
6745 BT_DBG("%s", hdev->name);
6746 mgmt_pending_remove(cmd);
6747 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6748 MGMT_STATUS_NOT_POWERED);
6752 if (cp->link_type == 0x01)
6753 dest_type = LE_LINK;
6755 dest_type = ACL_LINK;
6757 /* Get LE/ACL link handle info */
6758 conn = hci_conn_hash_lookup_ba(hdev,
6759 dest_type, &cp->bdaddr);
6762 err = mgmt_cmd_complete(sk, hdev->id,
6763 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
6764 mgmt_pending_remove(cmd);
6768 hci_req_init(&req, hdev);
6770 th.hci_le_ext_opcode = 0x0B;
6772 th.conn_handle = conn->handle;
6773 th.alert_mask = 0x07;
6774 th.low_th = cp->low_th;
6775 th.in_range_th = cp->in_range_th;
6776 th.high_th = cp->high_th;
6778 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6779 err = hci_req_run(&req, set_rssi_threshold_complete);
6782 mgmt_pending_remove(cmd);
6783 BT_ERR("Error in requesting hci_req_run");
6788 hci_dev_unlock(hdev);
6792 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
6793 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6795 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
6796 struct mgmt_cp_set_enable_rssi *cp = data;
6797 struct mgmt_pending_cmd *cmd;
6802 mgmt_rp.status = rp->status;
6803 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6804 mgmt_rp.bt_address = cp->bdaddr;
6805 mgmt_rp.link_type = cp->link_type;
6807 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6808 MGMT_STATUS_SUCCESS, &mgmt_rp,
6809 sizeof(struct mgmt_cc_rsp_enable_rssi));
6811 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
6812 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6814 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
6815 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6816 &mgmt_rp.bt_address, true);
6820 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6822 mgmt_pending_remove(cmd);
6824 hci_dev_unlock(hdev);
6827 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
6828 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6830 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
6831 struct mgmt_cp_disable_rssi *cp = data;
6832 struct mgmt_pending_cmd *cmd;
6837 mgmt_rp.status = rp->status;
6838 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6839 mgmt_rp.bt_address = cp->bdaddr;
6840 mgmt_rp.link_type = cp->link_type;
6842 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6843 MGMT_STATUS_SUCCESS, &mgmt_rp,
6844 sizeof(struct mgmt_cc_rsp_enable_rssi));
6846 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
6847 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6849 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6850 &mgmt_rp.bt_address, false);
6854 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6856 mgmt_pending_remove(cmd);
6858 hci_dev_unlock(hdev);
6861 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
6862 void *data, u16 len)
6864 struct mgmt_pending_cmd *cmd;
6865 struct hci_request req;
6866 struct hci_cp_set_enable_rssi cp_en = { 0, };
6869 BT_DBG("Set Disable RSSI.");
6871 cp_en.hci_le_ext_opcode = 0x01;
6872 cp_en.le_enable_cs_Features = 0x00;
6873 cp_en.data[0] = 0x00;
6874 cp_en.data[1] = 0x00;
6875 cp_en.data[2] = 0x00;
6879 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6881 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6882 MGMT_STATUS_FAILED);
6886 if (!lmp_le_capable(hdev)) {
6887 mgmt_pending_remove(cmd);
6888 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6889 MGMT_STATUS_NOT_SUPPORTED);
6893 if (!hdev_is_powered(hdev)) {
6894 BT_DBG("%s", hdev->name);
6895 mgmt_pending_remove(cmd);
6896 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6897 MGMT_STATUS_NOT_POWERED);
6901 hci_req_init(&req, hdev);
6903 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6904 sizeof(struct hci_cp_set_enable_rssi),
6905 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6906 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6908 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6909 err = hci_req_run(&req, set_rssi_disable_complete);
6912 mgmt_pending_remove(cmd);
6913 BT_ERR("Error in requesting hci_req_run");
6918 hci_dev_unlock(hdev);
6922 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
6924 struct hci_cc_rsp_enable_rssi *rp = response;
6925 struct mgmt_pending_cmd *cmd_enable = NULL;
6926 struct mgmt_pending_cmd *cmd_disable = NULL;
6927 struct mgmt_cp_set_enable_rssi *cp_en;
6928 struct mgmt_cp_disable_rssi *cp_dis;
6931 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6932 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6933 hci_dev_unlock(hdev);
6936 BT_DBG("Enable Request");
6939 BT_DBG("Disable Request");
6942 cp_en = cmd_enable->param;
6947 switch (rp->le_ext_opcode) {
6949 BT_DBG("RSSI enabled.. Setting Threshold...");
6950 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
6951 cp_en, sizeof(*cp_en));
6955 BT_DBG("Sending RSSI enable success");
6956 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
6957 cp_en, rp, rp->status);
6961 } else if (cmd_disable) {
6962 cp_dis = cmd_disable->param;
6967 switch (rp->le_ext_opcode) {
6969 BT_DBG("Sending RSSI disable success");
6970 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6971 cp_dis, rp, rp->status);
6976 * Only unset RSSI Threshold values for the Link if
6977 * RSSI is monitored for other BREDR or LE Links
6979 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
6980 BT_DBG("Unset Threshold. Other links being monitored");
6981 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6982 cp_dis, rp, rp->status);
6984 BT_DBG("Unset Threshold. Disabling...");
6985 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
6986 cp_dis, sizeof(*cp_dis));
6993 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
6996 struct mgmt_pending_cmd *cmd;
6998 BT_DBG("status 0x%02x", status);
7002 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7007 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7008 mgmt_status(status));
7010 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7013 mgmt_pending_remove(cmd);
7016 hci_dev_unlock(hdev);
7019 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7020 void *data, u16 len)
7022 struct mgmt_pending_cmd *cmd;
7023 struct hci_request req;
7024 struct mgmt_cp_set_enable_rssi *cp = data;
7025 struct hci_cp_set_enable_rssi cp_en = { 0, };
7028 BT_DBG("Set Enable RSSI.");
7030 cp_en.hci_le_ext_opcode = 0x01;
7031 cp_en.le_enable_cs_Features = 0x04;
7032 cp_en.data[0] = 0x00;
7033 cp_en.data[1] = 0x00;
7034 cp_en.data[2] = 0x00;
7038 if (!lmp_le_capable(hdev)) {
7039 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7040 MGMT_STATUS_NOT_SUPPORTED);
7044 if (!hdev_is_powered(hdev)) {
7045 BT_DBG("%s", hdev->name);
7046 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7047 MGMT_STATUS_NOT_POWERED);
7051 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
7052 BT_DBG("%s", hdev->name);
7053 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7058 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
7061 BT_DBG("%s", hdev->name);
7066 /* If RSSI is already enabled directly set Threshold values */
7067 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
7068 hci_dev_unlock(hdev);
7069 BT_DBG("RSSI Enabled. Directly set Threshold");
7070 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
7074 hci_req_init(&req, hdev);
7076 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7077 sizeof(struct hci_cp_set_enable_rssi),
7078 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7079 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7081 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7082 err = hci_req_run(&req, set_rssi_enable_complete);
7085 mgmt_pending_remove(cmd);
7086 BT_ERR("Error in requesting hci_req_run");
7091 hci_dev_unlock(hdev);
7096 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7098 struct mgmt_pending_cmd *cmd;
7100 BT_DBG("status 0x%02x", status);
7104 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
7108 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7109 MGMT_STATUS_SUCCESS, &status, 1);
7111 mgmt_pending_remove(cmd);
7114 hci_dev_unlock(hdev);
7117 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
7120 struct mgmt_pending_cmd *cmd;
7121 struct hci_request req;
7122 struct mgmt_cp_get_raw_rssi *cp = data;
7123 struct hci_cp_get_raw_rssi hci_cp;
7125 struct hci_conn *conn;
7129 BT_DBG("Get Raw RSSI.");
7133 if (!lmp_le_capable(hdev)) {
7134 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7135 MGMT_STATUS_NOT_SUPPORTED);
7139 if (cp->link_type == 0x01)
7140 dest_type = LE_LINK;
7142 dest_type = ACL_LINK;
7144 /* Get LE/BREDR link handle info */
7145 conn = hci_conn_hash_lookup_ba(hdev,
7146 dest_type, &cp->bt_address);
7148 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7149 MGMT_STATUS_NOT_CONNECTED);
7152 hci_cp.conn_handle = conn->handle;
7154 if (!hdev_is_powered(hdev)) {
7155 BT_DBG("%s", hdev->name);
7156 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7157 MGMT_STATUS_NOT_POWERED);
7161 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
7162 BT_DBG("%s", hdev->name);
7163 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
7168 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
7170 BT_DBG("%s", hdev->name);
7175 hci_req_init(&req, hdev);
7177 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
7178 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
7179 err = hci_req_run(&req, get_raw_rssi_complete);
7182 mgmt_pending_remove(cmd);
7183 BT_ERR("Error in requesting hci_req_run");
7187 hci_dev_unlock(hdev);
7192 void mgmt_raw_rssi_response(struct hci_dev *hdev,
7193 struct hci_cc_rp_get_raw_rssi *rp, int success)
7195 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
7196 struct hci_conn *conn;
7198 mgmt_rp.status = rp->status;
7199 mgmt_rp.rssi_dbm = rp->rssi_dbm;
7201 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
7205 bacpy(&mgmt_rp.bt_address, &conn->dst);
7206 if (conn->type == LE_LINK)
7207 mgmt_rp.link_type = 0x01;
7209 mgmt_rp.link_type = 0x00;
7211 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
7212 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
7215 static void set_disable_threshold_complete(struct hci_dev *hdev,
7216 u8 status, u16 opcode)
7218 struct mgmt_pending_cmd *cmd;
7220 BT_DBG("status 0x%02x", status);
7224 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7228 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7229 MGMT_STATUS_SUCCESS, &status, 1);
7231 mgmt_pending_remove(cmd);
7234 hci_dev_unlock(hdev);
7237 /** Removes monitoring for a link*/
7238 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
7239 void *data, u16 len)
7242 struct hci_cp_set_rssi_threshold th = { 0, };
7243 struct mgmt_cp_disable_rssi *cp = data;
7244 struct hci_conn *conn;
7245 struct mgmt_pending_cmd *cmd;
7246 struct hci_request req;
7249 BT_DBG("Set Disable RSSI.");
7253 if (!lmp_le_capable(hdev)) {
7254 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7255 MGMT_STATUS_NOT_SUPPORTED);
7259 /* Get LE/ACL link handle info*/
7260 if (cp->link_type == 0x01)
7261 dest_type = LE_LINK;
7263 dest_type = ACL_LINK;
7265 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
7267 err = mgmt_cmd_complete(sk, hdev->id,
7268 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
7272 th.hci_le_ext_opcode = 0x0B;
7274 th.conn_handle = conn->handle;
7275 th.alert_mask = 0x00;
7277 th.in_range_th = 0x00;
7280 if (!hdev_is_powered(hdev)) {
7281 BT_DBG("%s", hdev->name);
7282 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7287 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
7288 BT_DBG("%s", hdev->name);
7289 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7294 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
7297 BT_DBG("%s", hdev->name);
7302 hci_req_init(&req, hdev);
7304 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7305 err = hci_req_run(&req, set_disable_threshold_complete);
7307 mgmt_pending_remove(cmd);
7308 BT_ERR("Error in requesting hci_req_run");
7313 hci_dev_unlock(hdev);
7318 void mgmt_rssi_alert_evt(struct hci_dev *hdev, struct sk_buff *skb)
7320 struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
7321 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
7322 struct hci_conn *conn;
7324 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
7325 ev->conn_handle, ev->alert_type, ev->rssi_dbm);
7327 conn = hci_conn_hash_lookup_handle(hdev, ev->conn_handle);
7330 BT_ERR("RSSI alert Error: Device not found for handle");
7333 bacpy(&mgmt_ev.bdaddr, &conn->dst);
7335 if (conn->type == LE_LINK)
7336 mgmt_ev.link_type = 0x01;
7338 mgmt_ev.link_type = 0x00;
7340 mgmt_ev.alert_type = ev->alert_type;
7341 mgmt_ev.rssi_dbm = ev->rssi_dbm;
7343 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
7344 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
7347 #endif /* TIZEN_BT */
7349 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7351 if (key->initiator != 0x00 && key->initiator != 0x01)
7354 switch (key->addr.type) {
7355 case BDADDR_LE_PUBLIC:
7358 case BDADDR_LE_RANDOM:
7359 /* Two most significant bits shall be set */
7360 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7368 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7369 void *cp_data, u16 len)
7371 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7372 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7373 sizeof(struct mgmt_ltk_info));
7374 u16 key_count, expected_len;
7377 bt_dev_dbg(hdev, "sock %p", sk);
7379 if (!lmp_le_capable(hdev))
7380 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7381 MGMT_STATUS_NOT_SUPPORTED);
7383 key_count = __le16_to_cpu(cp->key_count);
7384 if (key_count > max_key_count) {
7385 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7387 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7388 MGMT_STATUS_INVALID_PARAMS);
7391 expected_len = struct_size(cp, keys, key_count);
7392 if (expected_len != len) {
7393 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7395 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7396 MGMT_STATUS_INVALID_PARAMS);
7399 bt_dev_dbg(hdev, "key_count %u", key_count);
7401 for (i = 0; i < key_count; i++) {
7402 struct mgmt_ltk_info *key = &cp->keys[i];
7404 if (!ltk_is_valid(key))
7405 return mgmt_cmd_status(sk, hdev->id,
7406 MGMT_OP_LOAD_LONG_TERM_KEYS,
7407 MGMT_STATUS_INVALID_PARAMS);
7412 hci_smp_ltks_clear(hdev);
7414 for (i = 0; i < key_count; i++) {
7415 struct mgmt_ltk_info *key = &cp->keys[i];
7416 u8 type, authenticated;
7418 if (hci_is_blocked_key(hdev,
7419 HCI_BLOCKED_KEY_TYPE_LTK,
7421 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7426 switch (key->type) {
7427 case MGMT_LTK_UNAUTHENTICATED:
7428 authenticated = 0x00;
7429 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7431 case MGMT_LTK_AUTHENTICATED:
7432 authenticated = 0x01;
7433 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7435 case MGMT_LTK_P256_UNAUTH:
7436 authenticated = 0x00;
7437 type = SMP_LTK_P256;
7439 case MGMT_LTK_P256_AUTH:
7440 authenticated = 0x01;
7441 type = SMP_LTK_P256;
7443 case MGMT_LTK_P256_DEBUG:
7444 authenticated = 0x00;
7445 type = SMP_LTK_P256_DEBUG;
7451 hci_add_ltk(hdev, &key->addr.bdaddr,
7452 le_addr_type(key->addr.type), type, authenticated,
7453 key->val, key->enc_size, key->ediv, key->rand);
7456 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7459 hci_dev_unlock(hdev);
7464 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
7466 struct hci_conn *conn = cmd->user_data;
7467 struct mgmt_rp_get_conn_info rp;
7470 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
7472 if (status == MGMT_STATUS_SUCCESS) {
7473 rp.rssi = conn->rssi;
7474 rp.tx_power = conn->tx_power;
7475 rp.max_tx_power = conn->max_tx_power;
7477 rp.rssi = HCI_RSSI_INVALID;
7478 rp.tx_power = HCI_TX_POWER_INVALID;
7479 rp.max_tx_power = HCI_TX_POWER_INVALID;
7482 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
7483 status, &rp, sizeof(rp));
7485 hci_conn_drop(conn);
7491 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
7494 struct hci_cp_read_rssi *cp;
7495 struct mgmt_pending_cmd *cmd;
7496 struct hci_conn *conn;
7500 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
7504 /* Commands sent in request are either Read RSSI or Read Transmit Power
7505 * Level so we check which one was last sent to retrieve connection
7506 * handle. Both commands have handle as first parameter so it's safe to
7507 * cast data on the same command struct.
7509 * First command sent is always Read RSSI and we fail only if it fails.
7510 * In other case we simply override error to indicate success as we
7511 * already remembered if TX power value is actually valid.
7513 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
7515 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
7516 status = MGMT_STATUS_SUCCESS;
7518 status = mgmt_status(hci_status);
7522 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
7526 handle = __le16_to_cpu(cp->handle);
7527 conn = hci_conn_hash_lookup_handle(hdev, handle);
7529 bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
7534 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
7538 cmd->cmd_complete(cmd, status);
7539 mgmt_pending_remove(cmd);
7542 hci_dev_unlock(hdev);
7545 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7548 struct mgmt_cp_get_conn_info *cp = data;
7549 struct mgmt_rp_get_conn_info rp;
7550 struct hci_conn *conn;
7551 unsigned long conn_info_age;
7554 bt_dev_dbg(hdev, "sock %p", sk);
7556 memset(&rp, 0, sizeof(rp));
7557 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7558 rp.addr.type = cp->addr.type;
7560 if (!bdaddr_type_is_valid(cp->addr.type))
7561 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7562 MGMT_STATUS_INVALID_PARAMS,
7567 if (!hdev_is_powered(hdev)) {
7568 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7569 MGMT_STATUS_NOT_POWERED, &rp,
7574 if (cp->addr.type == BDADDR_BREDR)
7575 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7578 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7580 if (!conn || conn->state != BT_CONNECTED) {
7581 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7582 MGMT_STATUS_NOT_CONNECTED, &rp,
7587 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
7588 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7589 MGMT_STATUS_BUSY, &rp, sizeof(rp));
7593 /* To avoid client trying to guess when to poll again for information we
7594 * calculate conn info age as random value between min/max set in hdev.
7596 conn_info_age = hdev->conn_info_min_age +
7597 prandom_u32_max(hdev->conn_info_max_age -
7598 hdev->conn_info_min_age);
7600 /* Query controller to refresh cached values if they are too old or were
7603 if (time_after(jiffies, conn->conn_info_timestamp +
7604 msecs_to_jiffies(conn_info_age)) ||
7605 !conn->conn_info_timestamp) {
7606 struct hci_request req;
7607 struct hci_cp_read_tx_power req_txp_cp;
7608 struct hci_cp_read_rssi req_rssi_cp;
7609 struct mgmt_pending_cmd *cmd;
7611 hci_req_init(&req, hdev);
7612 req_rssi_cp.handle = cpu_to_le16(conn->handle);
7613 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
7616 /* For LE links TX power does not change thus we don't need to
7617 * query for it once value is known.
7619 if (!bdaddr_type_is_le(cp->addr.type) ||
7620 conn->tx_power == HCI_TX_POWER_INVALID) {
7621 req_txp_cp.handle = cpu_to_le16(conn->handle);
7622 req_txp_cp.type = 0x00;
7623 hci_req_add(&req, HCI_OP_READ_TX_POWER,
7624 sizeof(req_txp_cp), &req_txp_cp);
7627 /* Max TX power needs to be read only once per connection */
7628 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
7629 req_txp_cp.handle = cpu_to_le16(conn->handle);
7630 req_txp_cp.type = 0x01;
7631 hci_req_add(&req, HCI_OP_READ_TX_POWER,
7632 sizeof(req_txp_cp), &req_txp_cp);
7635 err = hci_req_run(&req, conn_info_refresh_complete);
7639 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
7646 hci_conn_hold(conn);
7647 cmd->user_data = hci_conn_get(conn);
7648 cmd->cmd_complete = conn_info_cmd_complete;
7650 conn->conn_info_timestamp = jiffies;
7652 /* Cache is valid, just reply with values cached in hci_conn */
7653 rp.rssi = conn->rssi;
7654 rp.tx_power = conn->tx_power;
7655 rp.max_tx_power = conn->max_tx_power;
7657 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7658 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7662 hci_dev_unlock(hdev);
7666 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
7668 struct hci_conn *conn = cmd->user_data;
7669 struct mgmt_rp_get_clock_info rp;
7670 struct hci_dev *hdev;
7673 memset(&rp, 0, sizeof(rp));
7674 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
7679 hdev = hci_dev_get(cmd->index);
7681 rp.local_clock = cpu_to_le32(hdev->clock);
7686 rp.piconet_clock = cpu_to_le32(conn->clock);
7687 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7691 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7695 hci_conn_drop(conn);
7702 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7704 struct hci_cp_read_clock *hci_cp;
7705 struct mgmt_pending_cmd *cmd;
7706 struct hci_conn *conn;
7708 bt_dev_dbg(hdev, "status %u", status);
7712 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
7716 if (hci_cp->which) {
7717 u16 handle = __le16_to_cpu(hci_cp->handle);
7718 conn = hci_conn_hash_lookup_handle(hdev, handle);
7723 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
7727 cmd->cmd_complete(cmd, mgmt_status(status));
7728 mgmt_pending_remove(cmd);
7731 hci_dev_unlock(hdev);
7734 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7737 struct mgmt_cp_get_clock_info *cp = data;
7738 struct mgmt_rp_get_clock_info rp;
7739 struct hci_cp_read_clock hci_cp;
7740 struct mgmt_pending_cmd *cmd;
7741 struct hci_request req;
7742 struct hci_conn *conn;
7745 bt_dev_dbg(hdev, "sock %p", sk);
7747 memset(&rp, 0, sizeof(rp));
7748 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7749 rp.addr.type = cp->addr.type;
7751 if (cp->addr.type != BDADDR_BREDR)
7752 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7753 MGMT_STATUS_INVALID_PARAMS,
7758 if (!hdev_is_powered(hdev)) {
7759 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7760 MGMT_STATUS_NOT_POWERED, &rp,
7765 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7766 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7768 if (!conn || conn->state != BT_CONNECTED) {
7769 err = mgmt_cmd_complete(sk, hdev->id,
7770 MGMT_OP_GET_CLOCK_INFO,
7771 MGMT_STATUS_NOT_CONNECTED,
7779 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7785 cmd->cmd_complete = clock_info_cmd_complete;
7787 hci_req_init(&req, hdev);
7789 memset(&hci_cp, 0, sizeof(hci_cp));
7790 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
7793 hci_conn_hold(conn);
7794 cmd->user_data = hci_conn_get(conn);
7796 hci_cp.handle = cpu_to_le16(conn->handle);
7797 hci_cp.which = 0x01; /* Piconet clock */
7798 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
7801 err = hci_req_run(&req, get_clock_info_complete);
7803 mgmt_pending_remove(cmd);
7806 hci_dev_unlock(hdev);
7810 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7812 struct hci_conn *conn;
7814 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7818 if (conn->dst_type != type)
7821 if (conn->state != BT_CONNECTED)
7827 /* This function requires the caller holds hdev->lock */
7828 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7829 u8 addr_type, u8 auto_connect)
7831 struct hci_conn_params *params;
7833 params = hci_conn_params_add(hdev, addr, addr_type);
7837 if (params->auto_connect == auto_connect)
7840 list_del_init(¶ms->action);
7842 switch (auto_connect) {
7843 case HCI_AUTO_CONN_DISABLED:
7844 case HCI_AUTO_CONN_LINK_LOSS:
7845 /* If auto connect is being disabled when we're trying to
7846 * connect to device, keep connecting.
7848 if (params->explicit_connect)
7849 list_add(¶ms->action, &hdev->pend_le_conns);
7851 case HCI_AUTO_CONN_REPORT:
7852 if (params->explicit_connect)
7853 list_add(¶ms->action, &hdev->pend_le_conns);
7855 list_add(¶ms->action, &hdev->pend_le_reports);
7857 case HCI_AUTO_CONN_DIRECT:
7858 case HCI_AUTO_CONN_ALWAYS:
7859 if (!is_connected(hdev, addr, addr_type))
7860 list_add(¶ms->action, &hdev->pend_le_conns);
7864 params->auto_connect = auto_connect;
7866 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7867 addr, addr_type, auto_connect);
7872 static void device_added(struct sock *sk, struct hci_dev *hdev,
7873 bdaddr_t *bdaddr, u8 type, u8 action)
7875 struct mgmt_ev_device_added ev;
7877 bacpy(&ev.addr.bdaddr, bdaddr);
7878 ev.addr.type = type;
7881 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7884 static int add_device(struct sock *sk, struct hci_dev *hdev,
7885 void *data, u16 len)
7887 struct mgmt_cp_add_device *cp = data;
7888 u8 auto_conn, addr_type;
7889 struct hci_conn_params *params;
7891 u32 current_flags = 0;
7893 bt_dev_dbg(hdev, "sock %p", sk);
7895 if (!bdaddr_type_is_valid(cp->addr.type) ||
7896 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7897 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7898 MGMT_STATUS_INVALID_PARAMS,
7899 &cp->addr, sizeof(cp->addr));
7901 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7902 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7903 MGMT_STATUS_INVALID_PARAMS,
7904 &cp->addr, sizeof(cp->addr));
7908 if (cp->addr.type == BDADDR_BREDR) {
7909 /* Only incoming connections action is supported for now */
7910 if (cp->action != 0x01) {
7911 err = mgmt_cmd_complete(sk, hdev->id,
7913 MGMT_STATUS_INVALID_PARAMS,
7914 &cp->addr, sizeof(cp->addr));
7918 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7924 hci_req_update_scan(hdev);
7929 addr_type = le_addr_type(cp->addr.type);
7931 if (cp->action == 0x02)
7932 auto_conn = HCI_AUTO_CONN_ALWAYS;
7933 else if (cp->action == 0x01)
7934 auto_conn = HCI_AUTO_CONN_DIRECT;
7936 auto_conn = HCI_AUTO_CONN_REPORT;
7938 /* Kernel internally uses conn_params with resolvable private
7939 * address, but Add Device allows only identity addresses.
7940 * Make sure it is enforced before calling
7941 * hci_conn_params_lookup.
7943 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7944 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7945 MGMT_STATUS_INVALID_PARAMS,
7946 &cp->addr, sizeof(cp->addr));
7950 /* If the connection parameters don't exist for this device,
7951 * they will be created and configured with defaults.
7953 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7955 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7956 MGMT_STATUS_FAILED, &cp->addr,
7960 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7963 current_flags = params->current_flags;
7966 hci_update_background_scan(hdev);
7969 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7970 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7971 SUPPORTED_DEVICE_FLAGS(), current_flags);
7973 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7974 MGMT_STATUS_SUCCESS, &cp->addr,
7978 hci_dev_unlock(hdev);
7982 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7983 bdaddr_t *bdaddr, u8 type)
7985 struct mgmt_ev_device_removed ev;
7987 bacpy(&ev.addr.bdaddr, bdaddr);
7988 ev.addr.type = type;
7990 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7993 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7994 void *data, u16 len)
7996 struct mgmt_cp_remove_device *cp = data;
7999 bt_dev_dbg(hdev, "sock %p", sk);
8003 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8004 struct hci_conn_params *params;
8007 if (!bdaddr_type_is_valid(cp->addr.type)) {
8008 err = mgmt_cmd_complete(sk, hdev->id,
8009 MGMT_OP_REMOVE_DEVICE,
8010 MGMT_STATUS_INVALID_PARAMS,
8011 &cp->addr, sizeof(cp->addr));
8015 if (cp->addr.type == BDADDR_BREDR) {
8016 err = hci_bdaddr_list_del(&hdev->accept_list,
8020 err = mgmt_cmd_complete(sk, hdev->id,
8021 MGMT_OP_REMOVE_DEVICE,
8022 MGMT_STATUS_INVALID_PARAMS,
8028 hci_req_update_scan(hdev);
8030 device_removed(sk, hdev, &cp->addr.bdaddr,
8035 addr_type = le_addr_type(cp->addr.type);
8037 /* Kernel internally uses conn_params with resolvable private
8038 * address, but Remove Device allows only identity addresses.
8039 * Make sure it is enforced before calling
8040 * hci_conn_params_lookup.
8042 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8043 err = mgmt_cmd_complete(sk, hdev->id,
8044 MGMT_OP_REMOVE_DEVICE,
8045 MGMT_STATUS_INVALID_PARAMS,
8046 &cp->addr, sizeof(cp->addr));
8050 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8053 err = mgmt_cmd_complete(sk, hdev->id,
8054 MGMT_OP_REMOVE_DEVICE,
8055 MGMT_STATUS_INVALID_PARAMS,
8056 &cp->addr, sizeof(cp->addr));
8060 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
8061 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
8062 err = mgmt_cmd_complete(sk, hdev->id,
8063 MGMT_OP_REMOVE_DEVICE,
8064 MGMT_STATUS_INVALID_PARAMS,
8065 &cp->addr, sizeof(cp->addr));
8069 list_del(¶ms->action);
8070 list_del(¶ms->list);
8072 hci_update_background_scan(hdev);
8074 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
8076 struct hci_conn_params *p, *tmp;
8077 struct bdaddr_list *b, *btmp;
8079 if (cp->addr.type) {
8080 err = mgmt_cmd_complete(sk, hdev->id,
8081 MGMT_OP_REMOVE_DEVICE,
8082 MGMT_STATUS_INVALID_PARAMS,
8083 &cp->addr, sizeof(cp->addr));
8087 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
8088 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
8093 hci_req_update_scan(hdev);
8095 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
8096 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
8098 device_removed(sk, hdev, &p->addr, p->addr_type);
8099 if (p->explicit_connect) {
8100 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
8103 list_del(&p->action);
8108 bt_dev_dbg(hdev, "All LE connection parameters were removed");
8110 hci_update_background_scan(hdev);
8114 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
8115 MGMT_STATUS_SUCCESS, &cp->addr,
8118 hci_dev_unlock(hdev);
8122 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
8125 struct mgmt_cp_load_conn_param *cp = data;
8126 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
8127 sizeof(struct mgmt_conn_param));
8128 u16 param_count, expected_len;
8131 if (!lmp_le_capable(hdev))
8132 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8133 MGMT_STATUS_NOT_SUPPORTED);
8135 param_count = __le16_to_cpu(cp->param_count);
8136 if (param_count > max_param_count) {
8137 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
8139 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8140 MGMT_STATUS_INVALID_PARAMS);
8143 expected_len = struct_size(cp, params, param_count);
8144 if (expected_len != len) {
8145 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
8147 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8148 MGMT_STATUS_INVALID_PARAMS);
8151 bt_dev_dbg(hdev, "param_count %u", param_count);
8155 hci_conn_params_clear_disabled(hdev);
8157 for (i = 0; i < param_count; i++) {
8158 struct mgmt_conn_param *param = &cp->params[i];
8159 struct hci_conn_params *hci_param;
8160 u16 min, max, latency, timeout;
8163 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
8166 if (param->addr.type == BDADDR_LE_PUBLIC) {
8167 addr_type = ADDR_LE_DEV_PUBLIC;
8168 } else if (param->addr.type == BDADDR_LE_RANDOM) {
8169 addr_type = ADDR_LE_DEV_RANDOM;
8171 bt_dev_err(hdev, "ignoring invalid connection parameters");
8175 min = le16_to_cpu(param->min_interval);
8176 max = le16_to_cpu(param->max_interval);
8177 latency = le16_to_cpu(param->latency);
8178 timeout = le16_to_cpu(param->timeout);
8180 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
8181 min, max, latency, timeout);
8183 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8184 bt_dev_err(hdev, "ignoring invalid connection parameters");
8188 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
8191 bt_dev_err(hdev, "failed to add connection parameters");
8195 hci_param->conn_min_interval = min;
8196 hci_param->conn_max_interval = max;
8197 hci_param->conn_latency = latency;
8198 hci_param->supervision_timeout = timeout;
8201 hci_dev_unlock(hdev);
8203 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8207 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8208 void *data, u16 len)
8210 struct mgmt_cp_set_external_config *cp = data;
8214 bt_dev_dbg(hdev, "sock %p", sk);
8216 if (hdev_is_powered(hdev))
8217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8218 MGMT_STATUS_REJECTED);
8220 if (cp->config != 0x00 && cp->config != 0x01)
8221 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8222 MGMT_STATUS_INVALID_PARAMS);
8224 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
8225 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8226 MGMT_STATUS_NOT_SUPPORTED);
8231 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8233 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8235 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8242 err = new_options(hdev, sk);
8244 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8245 mgmt_index_removed(hdev);
8247 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8248 hci_dev_set_flag(hdev, HCI_CONFIG);
8249 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8251 queue_work(hdev->req_workqueue, &hdev->power_on);
8253 set_bit(HCI_RAW, &hdev->flags);
8254 mgmt_index_added(hdev);
8259 hci_dev_unlock(hdev);
8263 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8264 void *data, u16 len)
8266 struct mgmt_cp_set_public_address *cp = data;
8270 bt_dev_dbg(hdev, "sock %p", sk);
8272 if (hdev_is_powered(hdev))
8273 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8274 MGMT_STATUS_REJECTED);
8276 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8277 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8278 MGMT_STATUS_INVALID_PARAMS);
8280 if (!hdev->set_bdaddr)
8281 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8282 MGMT_STATUS_NOT_SUPPORTED);
8286 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8287 bacpy(&hdev->public_addr, &cp->bdaddr);
8289 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8296 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8297 err = new_options(hdev, sk);
8299 if (is_configured(hdev)) {
8300 mgmt_index_removed(hdev);
8302 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8304 hci_dev_set_flag(hdev, HCI_CONFIG);
8305 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8307 queue_work(hdev->req_workqueue, &hdev->power_on);
8311 hci_dev_unlock(hdev);
8315 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
8316 u16 opcode, struct sk_buff *skb)
8318 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8319 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8320 u8 *h192, *r192, *h256, *r256;
8321 struct mgmt_pending_cmd *cmd;
8325 bt_dev_dbg(hdev, "status %u", status);
8327 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
8331 mgmt_cp = cmd->param;
8334 status = mgmt_status(status);
8341 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
8342 struct hci_rp_read_local_oob_data *rp;
8344 if (skb->len != sizeof(*rp)) {
8345 status = MGMT_STATUS_FAILED;
8348 status = MGMT_STATUS_SUCCESS;
8349 rp = (void *)skb->data;
8351 eir_len = 5 + 18 + 18;
8358 struct hci_rp_read_local_oob_ext_data *rp;
8360 if (skb->len != sizeof(*rp)) {
8361 status = MGMT_STATUS_FAILED;
8364 status = MGMT_STATUS_SUCCESS;
8365 rp = (void *)skb->data;
8367 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8368 eir_len = 5 + 18 + 18;
8372 eir_len = 5 + 18 + 18 + 18 + 18;
8382 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8389 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8390 hdev->dev_class, 3);
8393 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8394 EIR_SSP_HASH_C192, h192, 16);
8395 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8396 EIR_SSP_RAND_R192, r192, 16);
8400 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8401 EIR_SSP_HASH_C256, h256, 16);
8402 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8403 EIR_SSP_RAND_R256, r256, 16);
8407 mgmt_rp->type = mgmt_cp->type;
8408 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8410 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8411 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8412 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8413 if (err < 0 || status)
8416 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8418 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8419 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8420 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8423 mgmt_pending_remove(cmd);
8426 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8427 struct mgmt_cp_read_local_oob_ext_data *cp)
8429 struct mgmt_pending_cmd *cmd;
8430 struct hci_request req;
8433 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8438 hci_req_init(&req, hdev);
8440 if (bredr_sc_enabled(hdev))
8441 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
8443 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
8445 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
8447 mgmt_pending_remove(cmd);
8454 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8455 void *data, u16 data_len)
8457 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8458 struct mgmt_rp_read_local_oob_ext_data *rp;
8461 u8 status, flags, role, addr[7], hash[16], rand[16];
8464 bt_dev_dbg(hdev, "sock %p", sk);
8466 if (hdev_is_powered(hdev)) {
8468 case BIT(BDADDR_BREDR):
8469 status = mgmt_bredr_support(hdev);
8475 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8476 status = mgmt_le_support(hdev);
8480 eir_len = 9 + 3 + 18 + 18 + 3;
8483 status = MGMT_STATUS_INVALID_PARAMS;
8488 status = MGMT_STATUS_NOT_POWERED;
8492 rp_len = sizeof(*rp) + eir_len;
8493 rp = kmalloc(rp_len, GFP_ATOMIC);
8504 case BIT(BDADDR_BREDR):
8505 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8506 err = read_local_ssp_oob_req(hdev, sk, cp);
8507 hci_dev_unlock(hdev);
8511 status = MGMT_STATUS_FAILED;
8514 eir_len = eir_append_data(rp->eir, eir_len,
8516 hdev->dev_class, 3);
8519 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8520 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8521 smp_generate_oob(hdev, hash, rand) < 0) {
8522 hci_dev_unlock(hdev);
8523 status = MGMT_STATUS_FAILED;
8527 /* This should return the active RPA, but since the RPA
8528 * is only programmed on demand, it is really hard to fill
8529 * this in at the moment. For now disallow retrieving
8530 * local out-of-band data when privacy is in use.
8532 * Returning the identity address will not help here since
8533 * pairing happens before the identity resolving key is
8534 * known and thus the connection establishment happens
8535 * based on the RPA and not the identity address.
8537 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8538 hci_dev_unlock(hdev);
8539 status = MGMT_STATUS_REJECTED;
8543 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8544 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8545 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8546 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8547 memcpy(addr, &hdev->static_addr, 6);
8550 memcpy(addr, &hdev->bdaddr, 6);
8554 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8555 addr, sizeof(addr));
8557 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8562 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8563 &role, sizeof(role));
8565 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8566 eir_len = eir_append_data(rp->eir, eir_len,
8568 hash, sizeof(hash));
8570 eir_len = eir_append_data(rp->eir, eir_len,
8572 rand, sizeof(rand));
8575 flags = mgmt_get_adv_discov_flags(hdev);
8577 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8578 flags |= LE_AD_NO_BREDR;
8580 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8581 &flags, sizeof(flags));
8585 hci_dev_unlock(hdev);
8587 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8589 status = MGMT_STATUS_SUCCESS;
8592 rp->type = cp->type;
8593 rp->eir_len = cpu_to_le16(eir_len);
8595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8596 status, rp, sizeof(*rp) + eir_len);
8597 if (err < 0 || status)
8600 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8601 rp, sizeof(*rp) + eir_len,
8602 HCI_MGMT_OOB_DATA_EVENTS, sk);
8610 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8614 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8615 flags |= MGMT_ADV_FLAG_DISCOV;
8616 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8617 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8618 flags |= MGMT_ADV_FLAG_APPEARANCE;
8619 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8620 flags |= MGMT_ADV_PARAM_DURATION;
8621 flags |= MGMT_ADV_PARAM_TIMEOUT;
8622 flags |= MGMT_ADV_PARAM_INTERVALS;
8623 flags |= MGMT_ADV_PARAM_TX_POWER;
8624 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8626 /* In extended adv TX_POWER returned from Set Adv Param
8627 * will be always valid.
8629 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
8630 ext_adv_capable(hdev))
8631 flags |= MGMT_ADV_FLAG_TX_POWER;
8633 if (ext_adv_capable(hdev)) {
8634 flags |= MGMT_ADV_FLAG_SEC_1M;
8635 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8636 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8638 if (hdev->le_features[1] & HCI_LE_PHY_2M)
8639 flags |= MGMT_ADV_FLAG_SEC_2M;
8641 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8642 flags |= MGMT_ADV_FLAG_SEC_CODED;
8648 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8649 void *data, u16 data_len)
8651 struct mgmt_rp_read_adv_features *rp;
8654 struct adv_info *adv_instance;
8655 u32 supported_flags;
8658 bt_dev_dbg(hdev, "sock %p", sk);
8660 if (!lmp_le_capable(hdev))
8661 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8662 MGMT_STATUS_REJECTED);
8664 /* Enabling the experimental LL Privay support disables support for
8667 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8668 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8669 MGMT_STATUS_NOT_SUPPORTED);
8673 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8674 rp = kmalloc(rp_len, GFP_ATOMIC);
8676 hci_dev_unlock(hdev);
8680 supported_flags = get_supported_adv_flags(hdev);
8682 rp->supported_flags = cpu_to_le32(supported_flags);
8683 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
8684 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
8685 rp->max_instances = hdev->le_num_of_adv_sets;
8686 rp->num_instances = hdev->adv_instance_cnt;
8688 instance = rp->instance;
8689 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8690 *instance = adv_instance->instance;
8694 hci_dev_unlock(hdev);
8696 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8697 MGMT_STATUS_SUCCESS, rp, rp_len);
8704 static u8 calculate_name_len(struct hci_dev *hdev)
8706 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8708 return append_local_name(hdev, buf, 0);
8711 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8714 u8 max_len = HCI_MAX_AD_LENGTH;
8717 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8718 MGMT_ADV_FLAG_LIMITED_DISCOV |
8719 MGMT_ADV_FLAG_MANAGED_FLAGS))
8722 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8725 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8726 max_len -= calculate_name_len(hdev);
8728 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8735 static bool flags_managed(u32 adv_flags)
8737 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8738 MGMT_ADV_FLAG_LIMITED_DISCOV |
8739 MGMT_ADV_FLAG_MANAGED_FLAGS);
8742 static bool tx_power_managed(u32 adv_flags)
8744 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8747 static bool name_managed(u32 adv_flags)
8749 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8752 static bool appearance_managed(u32 adv_flags)
8754 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8757 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8758 u8 len, bool is_adv_data)
8763 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8768 /* Make sure that the data is correctly formatted. */
8769 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
8775 if (data[i + 1] == EIR_FLAGS &&
8776 (!is_adv_data || flags_managed(adv_flags)))
8779 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8782 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8785 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8788 if (data[i + 1] == EIR_APPEARANCE &&
8789 appearance_managed(adv_flags))
8792 /* If the current field length would exceed the total data
8793 * length, then it's invalid.
8795 if (i + cur_len >= len)
8802 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8804 u32 supported_flags, phy_flags;
8806 /* The current implementation only supports a subset of the specified
8807 * flags. Also need to check mutual exclusiveness of sec flags.
8809 supported_flags = get_supported_adv_flags(hdev);
8810 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8811 if (adv_flags & ~supported_flags ||
8812 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8818 static bool adv_busy(struct hci_dev *hdev)
8820 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8821 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8822 pending_find(MGMT_OP_SET_LE, hdev) ||
8823 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
8824 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
8827 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
8830 struct mgmt_pending_cmd *cmd;
8831 struct mgmt_cp_add_advertising *cp;
8832 struct mgmt_rp_add_advertising rp;
8833 struct adv_info *adv_instance, *n;
8836 bt_dev_dbg(hdev, "status %u", status);
8840 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
8842 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
8844 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
8845 if (!adv_instance->pending)
8849 adv_instance->pending = false;
8853 instance = adv_instance->instance;
8855 if (hdev->cur_adv_instance == instance)
8856 cancel_adv_timeout(hdev);
8858 hci_remove_adv_instance(hdev, instance);
8859 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
8866 rp.instance = cp->instance;
8869 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8870 mgmt_status(status));
8872 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8873 mgmt_status(status), &rp, sizeof(rp));
8875 mgmt_pending_remove(cmd);
8878 hci_dev_unlock(hdev);
8881 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8882 void *data, u16 data_len)
8884 struct mgmt_cp_add_advertising *cp = data;
8885 struct mgmt_rp_add_advertising rp;
8888 u16 timeout, duration;
8889 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
8890 u8 schedule_instance = 0;
8891 struct adv_info *next_instance;
8893 struct mgmt_pending_cmd *cmd;
8894 struct hci_request req;
8896 bt_dev_dbg(hdev, "sock %p", sk);
8898 status = mgmt_le_support(hdev);
8900 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8903 /* Enabling the experimental LL Privay support disables support for
8906 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8908 MGMT_STATUS_NOT_SUPPORTED);
8910 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8911 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8912 MGMT_STATUS_INVALID_PARAMS);
8914 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8915 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8916 MGMT_STATUS_INVALID_PARAMS);
8918 flags = __le32_to_cpu(cp->flags);
8919 timeout = __le16_to_cpu(cp->timeout);
8920 duration = __le16_to_cpu(cp->duration);
8922 if (!requested_adv_flags_are_valid(hdev, flags))
8923 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8924 MGMT_STATUS_INVALID_PARAMS);
8928 if (timeout && !hdev_is_powered(hdev)) {
8929 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8930 MGMT_STATUS_REJECTED);
8934 if (adv_busy(hdev)) {
8935 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8940 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8941 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8942 cp->scan_rsp_len, false)) {
8943 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8944 MGMT_STATUS_INVALID_PARAMS);
8948 err = hci_add_adv_instance(hdev, cp->instance, flags,
8949 cp->adv_data_len, cp->data,
8951 cp->data + cp->adv_data_len,
8953 HCI_ADV_TX_POWER_NO_PREFERENCE,
8954 hdev->le_adv_min_interval,
8955 hdev->le_adv_max_interval);
8957 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8958 MGMT_STATUS_FAILED);
8962 /* Only trigger an advertising added event if a new instance was
8965 if (hdev->adv_instance_cnt > prev_instance_cnt)
8966 mgmt_advertising_added(sk, hdev, cp->instance);
8968 if (hdev->cur_adv_instance == cp->instance) {
8969 /* If the currently advertised instance is being changed then
8970 * cancel the current advertising and schedule the next
8971 * instance. If there is only one instance then the overridden
8972 * advertising data will be visible right away.
8974 cancel_adv_timeout(hdev);
8976 next_instance = hci_get_next_instance(hdev, cp->instance);
8978 schedule_instance = next_instance->instance;
8979 } else if (!hdev->adv_instance_timeout) {
8980 /* Immediately advertise the new instance if no other
8981 * instance is currently being advertised.
8983 schedule_instance = cp->instance;
8986 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8987 * there is no instance to be advertised then we have no HCI
8988 * communication to make. Simply return.
8990 if (!hdev_is_powered(hdev) ||
8991 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8992 !schedule_instance) {
8993 rp.instance = cp->instance;
8994 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8995 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8999 /* We're good to go, update advertising data, parameters, and start
9002 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
9009 hci_req_init(&req, hdev);
9011 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
9014 err = hci_req_run(&req, add_advertising_complete);
9017 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9018 MGMT_STATUS_FAILED);
9019 mgmt_pending_remove(cmd);
9023 hci_dev_unlock(hdev);
9028 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
9031 struct mgmt_pending_cmd *cmd;
9032 struct mgmt_cp_add_ext_adv_params *cp;
9033 struct mgmt_rp_add_ext_adv_params rp;
9034 struct adv_info *adv_instance;
9037 BT_DBG("%s", hdev->name);
9041 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
9046 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9050 rp.instance = cp->instance;
9051 rp.tx_power = adv_instance->tx_power;
9053 /* While we're at it, inform userspace of the available space for this
9054 * advertisement, given the flags that will be used.
9056 flags = __le32_to_cpu(cp->flags);
9057 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9058 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9061 /* If this advertisement was previously advertising and we
9062 * failed to update it, we signal that it has been removed and
9063 * delete its structure
9065 if (!adv_instance->pending)
9066 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
9068 hci_remove_adv_instance(hdev, cp->instance);
9070 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9071 mgmt_status(status));
9074 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9075 mgmt_status(status), &rp, sizeof(rp));
9080 mgmt_pending_remove(cmd);
9082 hci_dev_unlock(hdev);
9085 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
9086 void *data, u16 data_len)
9088 struct mgmt_cp_add_ext_adv_params *cp = data;
9089 struct mgmt_rp_add_ext_adv_params rp;
9090 struct mgmt_pending_cmd *cmd = NULL;
9091 struct adv_info *adv_instance;
9092 struct hci_request req;
9093 u32 flags, min_interval, max_interval;
9094 u16 timeout, duration;
9099 BT_DBG("%s", hdev->name);
9101 status = mgmt_le_support(hdev);
9103 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9106 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9107 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9108 MGMT_STATUS_INVALID_PARAMS);
9110 /* The purpose of breaking add_advertising into two separate MGMT calls
9111 * for params and data is to allow more parameters to be added to this
9112 * structure in the future. For this reason, we verify that we have the
9113 * bare minimum structure we know of when the interface was defined. Any
9114 * extra parameters we don't know about will be ignored in this request.
9116 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
9117 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9118 MGMT_STATUS_INVALID_PARAMS);
9120 flags = __le32_to_cpu(cp->flags);
9122 if (!requested_adv_flags_are_valid(hdev, flags))
9123 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9124 MGMT_STATUS_INVALID_PARAMS);
9128 /* In new interface, we require that we are powered to register */
9129 if (!hdev_is_powered(hdev)) {
9130 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9131 MGMT_STATUS_REJECTED);
9135 if (adv_busy(hdev)) {
9136 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9141 /* Parse defined parameters from request, use defaults otherwise */
9142 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
9143 __le16_to_cpu(cp->timeout) : 0;
9145 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
9146 __le16_to_cpu(cp->duration) :
9147 hdev->def_multi_adv_rotation_duration;
9149 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9150 __le32_to_cpu(cp->min_interval) :
9151 hdev->le_adv_min_interval;
9153 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9154 __le32_to_cpu(cp->max_interval) :
9155 hdev->le_adv_max_interval;
9157 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
9159 HCI_ADV_TX_POWER_NO_PREFERENCE;
9161 /* Create advertising instance with no advertising or response data */
9162 err = hci_add_adv_instance(hdev, cp->instance, flags,
9163 0, NULL, 0, NULL, timeout, duration,
9164 tx_power, min_interval, max_interval);
9167 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9168 MGMT_STATUS_FAILED);
9172 /* Submit request for advertising params if ext adv available */
9173 if (ext_adv_capable(hdev)) {
9174 hci_req_init(&req, hdev);
9175 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9177 /* Updating parameters of an active instance will return a
9178 * Command Disallowed error, so we must first disable the
9179 * instance if it is active.
9181 if (!adv_instance->pending)
9182 __hci_req_disable_ext_adv_instance(&req, cp->instance);
9184 __hci_req_setup_ext_adv_instance(&req, cp->instance);
9186 err = hci_req_run(&req, add_ext_adv_params_complete);
9189 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
9190 hdev, data, data_len);
9193 hci_remove_adv_instance(hdev, cp->instance);
9198 rp.instance = cp->instance;
9199 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9200 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9201 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9202 err = mgmt_cmd_complete(sk, hdev->id,
9203 MGMT_OP_ADD_EXT_ADV_PARAMS,
9204 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9208 hci_dev_unlock(hdev);
9213 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9216 struct mgmt_cp_add_ext_adv_data *cp = data;
9217 struct mgmt_rp_add_ext_adv_data rp;
9218 u8 schedule_instance = 0;
9219 struct adv_info *next_instance;
9220 struct adv_info *adv_instance;
9222 struct mgmt_pending_cmd *cmd;
9223 struct hci_request req;
9225 BT_DBG("%s", hdev->name);
9229 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9231 if (!adv_instance) {
9232 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9233 MGMT_STATUS_INVALID_PARAMS);
9237 /* In new interface, we require that we are powered to register */
9238 if (!hdev_is_powered(hdev)) {
9239 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9240 MGMT_STATUS_REJECTED);
9241 goto clear_new_instance;
9244 if (adv_busy(hdev)) {
9245 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9247 goto clear_new_instance;
9250 /* Validate new data */
9251 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9252 cp->adv_data_len, true) ||
9253 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9254 cp->adv_data_len, cp->scan_rsp_len, false)) {
9255 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9256 MGMT_STATUS_INVALID_PARAMS);
9257 goto clear_new_instance;
9260 /* Set the data in the advertising instance */
9261 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9262 cp->data, cp->scan_rsp_len,
9263 cp->data + cp->adv_data_len);
9265 /* We're good to go, update advertising data, parameters, and start
9269 hci_req_init(&req, hdev);
9271 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
9273 if (ext_adv_capable(hdev)) {
9274 __hci_req_update_adv_data(&req, cp->instance);
9275 __hci_req_update_scan_rsp_data(&req, cp->instance);
9276 __hci_req_enable_ext_advertising(&req, cp->instance);
9279 /* If using software rotation, determine next instance to use */
9281 if (hdev->cur_adv_instance == cp->instance) {
9282 /* If the currently advertised instance is being changed
9283 * then cancel the current advertising and schedule the
9284 * next instance. If there is only one instance then the
9285 * overridden advertising data will be visible right
9288 cancel_adv_timeout(hdev);
9290 next_instance = hci_get_next_instance(hdev,
9293 schedule_instance = next_instance->instance;
9294 } else if (!hdev->adv_instance_timeout) {
9295 /* Immediately advertise the new instance if no other
9296 * instance is currently being advertised.
9298 schedule_instance = cp->instance;
9301 /* If the HCI_ADVERTISING flag is set or there is no instance to
9302 * be advertised then we have no HCI communication to make.
9305 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
9306 !schedule_instance) {
9307 if (adv_instance->pending) {
9308 mgmt_advertising_added(sk, hdev, cp->instance);
9309 adv_instance->pending = false;
9311 rp.instance = cp->instance;
9312 err = mgmt_cmd_complete(sk, hdev->id,
9313 MGMT_OP_ADD_EXT_ADV_DATA,
9314 MGMT_STATUS_SUCCESS, &rp,
9319 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
9323 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9327 goto clear_new_instance;
9331 err = hci_req_run(&req, add_advertising_complete);
9334 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9335 MGMT_STATUS_FAILED);
9336 mgmt_pending_remove(cmd);
9337 goto clear_new_instance;
9340 /* We were successful in updating data, so trigger advertising_added
9341 * event if this is an instance that wasn't previously advertising. If
9342 * a failure occurs in the requests we initiated, we will remove the
9343 * instance again in add_advertising_complete
9345 if (adv_instance->pending)
9346 mgmt_advertising_added(sk, hdev, cp->instance);
9351 hci_remove_adv_instance(hdev, cp->instance);
9354 hci_dev_unlock(hdev);
9359 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
9362 struct mgmt_pending_cmd *cmd;
9363 struct mgmt_cp_remove_advertising *cp;
9364 struct mgmt_rp_remove_advertising rp;
9366 bt_dev_dbg(hdev, "status %u", status);
9370 /* A failure status here only means that we failed to disable
9371 * advertising. Otherwise, the advertising instance has been removed,
9372 * so report success.
9374 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
9379 rp.instance = cp->instance;
9381 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
9383 mgmt_pending_remove(cmd);
9386 hci_dev_unlock(hdev);
9389 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9390 void *data, u16 data_len)
9392 struct mgmt_cp_remove_advertising *cp = data;
9393 struct mgmt_rp_remove_advertising rp;
9394 struct mgmt_pending_cmd *cmd;
9395 struct hci_request req;
9398 bt_dev_dbg(hdev, "sock %p", sk);
9400 /* Enabling the experimental LL Privay support disables support for
9403 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9404 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
9405 MGMT_STATUS_NOT_SUPPORTED);
9409 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9410 err = mgmt_cmd_status(sk, hdev->id,
9411 MGMT_OP_REMOVE_ADVERTISING,
9412 MGMT_STATUS_INVALID_PARAMS);
9416 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
9417 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
9418 pending_find(MGMT_OP_SET_LE, hdev)) {
9419 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9424 if (list_empty(&hdev->adv_instances)) {
9425 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9426 MGMT_STATUS_INVALID_PARAMS);
9430 hci_req_init(&req, hdev);
9432 /* If we use extended advertising, instance is disabled and removed */
9433 if (ext_adv_capable(hdev)) {
9434 __hci_req_disable_ext_adv_instance(&req, cp->instance);
9435 __hci_req_remove_ext_adv_instance(&req, cp->instance);
9438 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
9440 if (list_empty(&hdev->adv_instances))
9441 __hci_req_disable_advertising(&req);
9443 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
9444 * flag is set or the device isn't powered then we have no HCI
9445 * communication to make. Simply return.
9447 if (skb_queue_empty(&req.cmd_q) ||
9448 !hdev_is_powered(hdev) ||
9449 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
9450 hci_req_purge(&req);
9451 rp.instance = cp->instance;
9452 err = mgmt_cmd_complete(sk, hdev->id,
9453 MGMT_OP_REMOVE_ADVERTISING,
9454 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9458 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9465 err = hci_req_run(&req, remove_advertising_complete);
9467 mgmt_pending_remove(cmd);
9470 hci_dev_unlock(hdev);
9475 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9476 void *data, u16 data_len)
9478 struct mgmt_cp_get_adv_size_info *cp = data;
9479 struct mgmt_rp_get_adv_size_info rp;
9480 u32 flags, supported_flags;
9483 bt_dev_dbg(hdev, "sock %p", sk);
9485 if (!lmp_le_capable(hdev))
9486 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9487 MGMT_STATUS_REJECTED);
9489 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9490 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9491 MGMT_STATUS_INVALID_PARAMS);
9493 flags = __le32_to_cpu(cp->flags);
9495 /* The current implementation only supports a subset of the specified
9498 supported_flags = get_supported_adv_flags(hdev);
9499 if (flags & ~supported_flags)
9500 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9501 MGMT_STATUS_INVALID_PARAMS);
9503 rp.instance = cp->instance;
9504 rp.flags = cp->flags;
9505 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9506 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9508 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9509 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9514 static const struct hci_mgmt_handler mgmt_handlers[] = {
9515 { NULL }, /* 0x0000 (no command) */
9516 { read_version, MGMT_READ_VERSION_SIZE,
9518 HCI_MGMT_UNTRUSTED },
9519 { read_commands, MGMT_READ_COMMANDS_SIZE,
9521 HCI_MGMT_UNTRUSTED },
9522 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9524 HCI_MGMT_UNTRUSTED },
9525 { read_controller_info, MGMT_READ_INFO_SIZE,
9526 HCI_MGMT_UNTRUSTED },
9527 { set_powered, MGMT_SETTING_SIZE },
9528 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9529 { set_connectable, MGMT_SETTING_SIZE },
9530 { set_fast_connectable, MGMT_SETTING_SIZE },
9531 { set_bondable, MGMT_SETTING_SIZE },
9532 { set_link_security, MGMT_SETTING_SIZE },
9533 { set_ssp, MGMT_SETTING_SIZE },
9534 { set_hs, MGMT_SETTING_SIZE },
9535 { set_le, MGMT_SETTING_SIZE },
9536 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9537 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9538 { add_uuid, MGMT_ADD_UUID_SIZE },
9539 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9540 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9542 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9544 { disconnect, MGMT_DISCONNECT_SIZE },
9545 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9546 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9547 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9548 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9549 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9550 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9551 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9552 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9553 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9554 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9555 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9556 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9557 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9559 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9560 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9561 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9562 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9563 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9564 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9565 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9566 { set_advertising, MGMT_SETTING_SIZE },
9567 { set_bredr, MGMT_SETTING_SIZE },
9568 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9569 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9570 { set_secure_conn, MGMT_SETTING_SIZE },
9571 { set_debug_keys, MGMT_SETTING_SIZE },
9572 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9573 { load_irks, MGMT_LOAD_IRKS_SIZE,
9575 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9576 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9577 { add_device, MGMT_ADD_DEVICE_SIZE },
9578 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9579 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9581 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9583 HCI_MGMT_UNTRUSTED },
9584 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9585 HCI_MGMT_UNCONFIGURED |
9586 HCI_MGMT_UNTRUSTED },
9587 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9588 HCI_MGMT_UNCONFIGURED },
9589 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9590 HCI_MGMT_UNCONFIGURED },
9591 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9593 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9594 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9596 HCI_MGMT_UNTRUSTED },
9597 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9598 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9600 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9601 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9602 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9603 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9604 HCI_MGMT_UNTRUSTED },
9605 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9606 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9607 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9608 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9610 { set_wideband_speech, MGMT_SETTING_SIZE },
9611 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9612 HCI_MGMT_UNTRUSTED },
9613 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9614 HCI_MGMT_UNTRUSTED |
9615 HCI_MGMT_HDEV_OPTIONAL },
9616 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9618 HCI_MGMT_HDEV_OPTIONAL },
9619 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9620 HCI_MGMT_UNTRUSTED },
9621 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9623 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9624 HCI_MGMT_UNTRUSTED },
9625 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9627 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9628 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9629 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9630 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9632 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9633 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9635 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9637 { add_adv_patterns_monitor_rssi,
9638 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9643 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
9644 { NULL }, /* 0x0000 (no command) */
9645 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
9646 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
9648 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
9650 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
9651 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
9652 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
9653 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
9654 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
9655 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
9659 void mgmt_index_added(struct hci_dev *hdev)
9661 struct mgmt_ev_ext_index ev;
9663 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9666 switch (hdev->dev_type) {
9668 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9669 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9670 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9673 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9674 HCI_MGMT_INDEX_EVENTS);
9687 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9688 HCI_MGMT_EXT_INDEX_EVENTS);
9691 void mgmt_index_removed(struct hci_dev *hdev)
9693 struct mgmt_ev_ext_index ev;
9694 u8 status = MGMT_STATUS_INVALID_INDEX;
9696 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9699 switch (hdev->dev_type) {
9701 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9703 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9704 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9705 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9708 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9709 HCI_MGMT_INDEX_EVENTS);
9722 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9723 HCI_MGMT_EXT_INDEX_EVENTS);
9726 /* This function requires the caller holds hdev->lock */
9727 static void restart_le_actions(struct hci_dev *hdev)
9729 struct hci_conn_params *p;
9731 list_for_each_entry(p, &hdev->le_conn_params, list) {
9732 /* Needed for AUTO_OFF case where might not "really"
9733 * have been powered off.
9735 list_del_init(&p->action);
9737 switch (p->auto_connect) {
9738 case HCI_AUTO_CONN_DIRECT:
9739 case HCI_AUTO_CONN_ALWAYS:
9740 list_add(&p->action, &hdev->pend_le_conns);
9742 case HCI_AUTO_CONN_REPORT:
9743 list_add(&p->action, &hdev->pend_le_reports);
9751 void mgmt_power_on(struct hci_dev *hdev, int err)
9753 struct cmd_lookup match = { NULL, hdev };
9755 bt_dev_dbg(hdev, "err %d", err);
9760 restart_le_actions(hdev);
9761 hci_update_background_scan(hdev);
9764 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9766 new_settings(hdev, match.sk);
9771 hci_dev_unlock(hdev);
9774 void __mgmt_power_off(struct hci_dev *hdev)
9776 struct cmd_lookup match = { NULL, hdev };
9777 u8 status, zero_cod[] = { 0, 0, 0 };
9779 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9781 /* If the power off is because of hdev unregistration let
9782 * use the appropriate INVALID_INDEX status. Otherwise use
9783 * NOT_POWERED. We cover both scenarios here since later in
9784 * mgmt_index_removed() any hci_conn callbacks will have already
9785 * been triggered, potentially causing misleading DISCONNECTED
9788 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9789 status = MGMT_STATUS_INVALID_INDEX;
9791 status = MGMT_STATUS_NOT_POWERED;
9793 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9795 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9796 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9797 zero_cod, sizeof(zero_cod),
9798 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9799 ext_info_changed(hdev, NULL);
9802 new_settings(hdev, match.sk);
9808 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9810 struct mgmt_pending_cmd *cmd;
9813 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9817 if (err == -ERFKILL)
9818 status = MGMT_STATUS_RFKILLED;
9820 status = MGMT_STATUS_FAILED;
9822 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9824 mgmt_pending_remove(cmd);
9827 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9830 struct mgmt_ev_new_link_key ev;
9832 memset(&ev, 0, sizeof(ev));
9834 ev.store_hint = persistent;
9835 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9836 ev.key.addr.type = BDADDR_BREDR;
9837 ev.key.type = key->type;
9838 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9839 ev.key.pin_len = key->pin_len;
9841 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9844 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9846 switch (ltk->type) {
9848 case SMP_LTK_RESPONDER:
9849 if (ltk->authenticated)
9850 return MGMT_LTK_AUTHENTICATED;
9851 return MGMT_LTK_UNAUTHENTICATED;
9853 if (ltk->authenticated)
9854 return MGMT_LTK_P256_AUTH;
9855 return MGMT_LTK_P256_UNAUTH;
9856 case SMP_LTK_P256_DEBUG:
9857 return MGMT_LTK_P256_DEBUG;
9860 return MGMT_LTK_UNAUTHENTICATED;
9863 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9865 struct mgmt_ev_new_long_term_key ev;
9867 memset(&ev, 0, sizeof(ev));
9869 /* Devices using resolvable or non-resolvable random addresses
9870 * without providing an identity resolving key don't require
9871 * to store long term keys. Their addresses will change the
9874 * Only when a remote device provides an identity address
9875 * make sure the long term key is stored. If the remote
9876 * identity is known, the long term keys are internally
9877 * mapped to the identity address. So allow static random
9878 * and public addresses here.
9880 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9881 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9882 ev.store_hint = 0x00;
9884 ev.store_hint = persistent;
9886 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9887 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9888 ev.key.type = mgmt_ltk_type(key);
9889 ev.key.enc_size = key->enc_size;
9890 ev.key.ediv = key->ediv;
9891 ev.key.rand = key->rand;
9893 if (key->type == SMP_LTK)
9894 ev.key.initiator = 1;
9896 /* Make sure we copy only the significant bytes based on the
9897 * encryption key size, and set the rest of the value to zeroes.
9899 memcpy(ev.key.val, key->val, key->enc_size);
9900 memset(ev.key.val + key->enc_size, 0,
9901 sizeof(ev.key.val) - key->enc_size);
9903 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9906 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9908 struct mgmt_ev_new_irk ev;
9910 memset(&ev, 0, sizeof(ev));
9912 ev.store_hint = persistent;
9914 bacpy(&ev.rpa, &irk->rpa);
9915 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9916 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9917 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9919 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9922 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9925 struct mgmt_ev_new_csrk ev;
9927 memset(&ev, 0, sizeof(ev));
9929 /* Devices using resolvable or non-resolvable random addresses
9930 * without providing an identity resolving key don't require
9931 * to store signature resolving keys. Their addresses will change
9932 * the next time around.
9934 * Only when a remote device provides an identity address
9935 * make sure the signature resolving key is stored. So allow
9936 * static random and public addresses here.
9938 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9939 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9940 ev.store_hint = 0x00;
9942 ev.store_hint = persistent;
9944 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9945 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9946 ev.key.type = csrk->type;
9947 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9949 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9952 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9953 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9954 u16 max_interval, u16 latency, u16 timeout)
9956 struct mgmt_ev_new_conn_param ev;
9958 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9961 memset(&ev, 0, sizeof(ev));
9962 bacpy(&ev.addr.bdaddr, bdaddr);
9963 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9964 ev.store_hint = store_hint;
9965 ev.min_interval = cpu_to_le16(min_interval);
9966 ev.max_interval = cpu_to_le16(max_interval);
9967 ev.latency = cpu_to_le16(latency);
9968 ev.timeout = cpu_to_le16(timeout);
9970 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9973 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9974 u8 *name, u8 name_len)
9977 struct mgmt_ev_device_connected *ev = (void *) buf;
9981 bacpy(&ev->addr.bdaddr, &conn->dst);
9982 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9985 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9987 ev->flags = __cpu_to_le32(flags);
9989 /* We must ensure that the EIR Data fields are ordered and
9990 * unique. Keep it simple for now and avoid the problem by not
9991 * adding any BR/EDR data to the LE adv.
9993 if (conn->le_adv_data_len > 0) {
9994 memcpy(&ev->eir[eir_len],
9995 conn->le_adv_data, conn->le_adv_data_len);
9996 eir_len = conn->le_adv_data_len;
9999 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
10002 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
10003 eir_len = eir_append_data(ev->eir, eir_len,
10005 conn->dev_class, 3);
10008 ev->eir_len = cpu_to_le16(eir_len);
10010 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
10011 sizeof(*ev) + eir_len, NULL);
10014 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
10016 struct sock **sk = data;
10018 cmd->cmd_complete(cmd, 0);
10023 mgmt_pending_remove(cmd);
10026 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
10028 struct hci_dev *hdev = data;
10029 struct mgmt_cp_unpair_device *cp = cmd->param;
10031 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
10033 cmd->cmd_complete(cmd, 0);
10034 mgmt_pending_remove(cmd);
10037 bool mgmt_powering_down(struct hci_dev *hdev)
10039 struct mgmt_pending_cmd *cmd;
10040 struct mgmt_mode *cp;
10042 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10053 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
10054 u8 link_type, u8 addr_type, u8 reason,
10055 bool mgmt_connected)
10057 struct mgmt_ev_device_disconnected ev;
10058 struct sock *sk = NULL;
10060 /* The connection is still in hci_conn_hash so test for 1
10061 * instead of 0 to know if this is the last one.
10063 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10064 cancel_delayed_work(&hdev->power_off);
10065 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10068 if (!mgmt_connected)
10071 if (link_type != ACL_LINK && link_type != LE_LINK)
10074 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
10076 bacpy(&ev.addr.bdaddr, bdaddr);
10077 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10078 ev.reason = reason;
10080 /* Report disconnects due to suspend */
10081 if (hdev->suspended)
10082 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
10084 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
10089 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10093 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
10094 u8 link_type, u8 addr_type, u8 status)
10096 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
10097 struct mgmt_cp_disconnect *cp;
10098 struct mgmt_pending_cmd *cmd;
10100 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10103 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
10109 if (bacmp(bdaddr, &cp->addr.bdaddr))
10112 if (cp->addr.type != bdaddr_type)
10115 cmd->cmd_complete(cmd, mgmt_status(status));
10116 mgmt_pending_remove(cmd);
10119 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10120 u8 addr_type, u8 status)
10122 struct mgmt_ev_connect_failed ev;
10124 /* The connection is still in hci_conn_hash so test for 1
10125 * instead of 0 to know if this is the last one.
10127 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10128 cancel_delayed_work(&hdev->power_off);
10129 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10132 bacpy(&ev.addr.bdaddr, bdaddr);
10133 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10134 ev.status = mgmt_status(status);
10136 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
10139 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
10141 struct mgmt_ev_pin_code_request ev;
10143 bacpy(&ev.addr.bdaddr, bdaddr);
10144 ev.addr.type = BDADDR_BREDR;
10145 ev.secure = secure;
10147 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
10150 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10153 struct mgmt_pending_cmd *cmd;
10155 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
10159 cmd->cmd_complete(cmd, mgmt_status(status));
10160 mgmt_pending_remove(cmd);
10163 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10166 struct mgmt_pending_cmd *cmd;
10168 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
10172 cmd->cmd_complete(cmd, mgmt_status(status));
10173 mgmt_pending_remove(cmd);
10176 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10177 u8 link_type, u8 addr_type, u32 value,
10180 struct mgmt_ev_user_confirm_request ev;
10182 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10184 bacpy(&ev.addr.bdaddr, bdaddr);
10185 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10186 ev.confirm_hint = confirm_hint;
10187 ev.value = cpu_to_le32(value);
10189 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
10193 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10194 u8 link_type, u8 addr_type)
10196 struct mgmt_ev_user_passkey_request ev;
10198 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10200 bacpy(&ev.addr.bdaddr, bdaddr);
10201 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10203 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
10207 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10208 u8 link_type, u8 addr_type, u8 status,
10211 struct mgmt_pending_cmd *cmd;
10213 cmd = pending_find(opcode, hdev);
10217 cmd->cmd_complete(cmd, mgmt_status(status));
10218 mgmt_pending_remove(cmd);
10223 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10224 u8 link_type, u8 addr_type, u8 status)
10226 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10227 status, MGMT_OP_USER_CONFIRM_REPLY);
10230 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10231 u8 link_type, u8 addr_type, u8 status)
10233 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10235 MGMT_OP_USER_CONFIRM_NEG_REPLY);
10238 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10239 u8 link_type, u8 addr_type, u8 status)
10241 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10242 status, MGMT_OP_USER_PASSKEY_REPLY);
10245 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10246 u8 link_type, u8 addr_type, u8 status)
10248 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10250 MGMT_OP_USER_PASSKEY_NEG_REPLY);
10253 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
10254 u8 link_type, u8 addr_type, u32 passkey,
10257 struct mgmt_ev_passkey_notify ev;
10259 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10261 bacpy(&ev.addr.bdaddr, bdaddr);
10262 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10263 ev.passkey = __cpu_to_le32(passkey);
10264 ev.entered = entered;
10266 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10269 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10271 struct mgmt_ev_auth_failed ev;
10272 struct mgmt_pending_cmd *cmd;
10273 u8 status = mgmt_status(hci_status);
10275 bacpy(&ev.addr.bdaddr, &conn->dst);
10276 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10277 ev.status = status;
10279 cmd = find_pairing(conn);
10281 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10282 cmd ? cmd->sk : NULL);
10285 cmd->cmd_complete(cmd, status);
10286 mgmt_pending_remove(cmd);
10290 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10292 struct cmd_lookup match = { NULL, hdev };
10296 u8 mgmt_err = mgmt_status(status);
10297 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10298 cmd_status_rsp, &mgmt_err);
10302 if (test_bit(HCI_AUTH, &hdev->flags))
10303 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10305 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10307 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10311 new_settings(hdev, match.sk);
10314 sock_put(match.sk);
10317 static void clear_eir(struct hci_request *req)
10319 struct hci_dev *hdev = req->hdev;
10320 struct hci_cp_write_eir cp;
10322 if (!lmp_ext_inq_capable(hdev))
10325 memset(hdev->eir, 0, sizeof(hdev->eir));
10327 memset(&cp, 0, sizeof(cp));
10329 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
10332 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
10334 struct cmd_lookup match = { NULL, hdev };
10335 struct hci_request req;
10336 bool changed = false;
10339 u8 mgmt_err = mgmt_status(status);
10341 if (enable && hci_dev_test_and_clear_flag(hdev,
10342 HCI_SSP_ENABLED)) {
10343 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
10344 new_settings(hdev, NULL);
10347 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
10353 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
10355 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
10357 changed = hci_dev_test_and_clear_flag(hdev,
10360 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
10363 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
10366 new_settings(hdev, match.sk);
10369 sock_put(match.sk);
10371 hci_req_init(&req, hdev);
10373 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
10374 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
10375 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
10376 sizeof(enable), &enable);
10377 __hci_req_update_eir(&req);
10382 hci_req_run(&req, NULL);
10385 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10387 struct cmd_lookup *match = data;
10389 if (match->sk == NULL) {
10390 match->sk = cmd->sk;
10391 sock_hold(match->sk);
10395 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10398 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10400 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10401 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10402 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10405 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10406 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10407 ext_info_changed(hdev, NULL);
10411 sock_put(match.sk);
10414 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10416 struct mgmt_cp_set_local_name ev;
10417 struct mgmt_pending_cmd *cmd;
10422 memset(&ev, 0, sizeof(ev));
10423 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10424 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10426 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10428 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10430 /* If this is a HCI command related to powering on the
10431 * HCI dev don't send any mgmt signals.
10433 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10437 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10438 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10439 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10442 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10446 for (i = 0; i < uuid_count; i++) {
10447 if (!memcmp(uuid, uuids[i], 16))
10454 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10458 while (parsed < eir_len) {
10459 u8 field_len = eir[0];
10463 if (field_len == 0)
10466 if (eir_len - parsed < field_len + 1)
10470 case EIR_UUID16_ALL:
10471 case EIR_UUID16_SOME:
10472 for (i = 0; i + 3 <= field_len; i += 2) {
10473 memcpy(uuid, bluetooth_base_uuid, 16);
10474 uuid[13] = eir[i + 3];
10475 uuid[12] = eir[i + 2];
10476 if (has_uuid(uuid, uuid_count, uuids))
10480 case EIR_UUID32_ALL:
10481 case EIR_UUID32_SOME:
10482 for (i = 0; i + 5 <= field_len; i += 4) {
10483 memcpy(uuid, bluetooth_base_uuid, 16);
10484 uuid[15] = eir[i + 5];
10485 uuid[14] = eir[i + 4];
10486 uuid[13] = eir[i + 3];
10487 uuid[12] = eir[i + 2];
10488 if (has_uuid(uuid, uuid_count, uuids))
10492 case EIR_UUID128_ALL:
10493 case EIR_UUID128_SOME:
10494 for (i = 0; i + 17 <= field_len; i += 16) {
10495 memcpy(uuid, eir + i + 2, 16);
10496 if (has_uuid(uuid, uuid_count, uuids))
10502 parsed += field_len + 1;
10503 eir += field_len + 1;
10509 static void restart_le_scan(struct hci_dev *hdev)
10511 /* If controller is not scanning we are done. */
10512 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10515 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10516 hdev->discovery.scan_start +
10517 hdev->discovery.scan_duration))
10520 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10521 DISCOV_LE_RESTART_DELAY);
10524 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10525 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10527 /* If a RSSI threshold has been specified, and
10528 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10529 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10530 * is set, let it through for further processing, as we might need to
10531 * restart the scan.
10533 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10534 * the results are also dropped.
10536 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10537 (rssi == HCI_RSSI_INVALID ||
10538 (rssi < hdev->discovery.rssi &&
10539 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10542 if (hdev->discovery.uuid_count != 0) {
10543 /* If a list of UUIDs is provided in filter, results with no
10544 * matching UUID should be dropped.
10546 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10547 hdev->discovery.uuids) &&
10548 !eir_has_uuids(scan_rsp, scan_rsp_len,
10549 hdev->discovery.uuid_count,
10550 hdev->discovery.uuids))
10554 /* If duplicate filtering does not report RSSI changes, then restart
10555 * scanning to ensure updated result with updated RSSI values.
10557 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10558 restart_le_scan(hdev);
10560 /* Validate RSSI value against the RSSI threshold once more. */
10561 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10562 rssi < hdev->discovery.rssi)
10569 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10570 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10571 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10574 struct mgmt_ev_device_found *ev = (void *)buf;
10577 /* Don't send events for a non-kernel initiated discovery. With
10578 * LE one exception is if we have pend_le_reports > 0 in which
10579 * case we're doing passive scanning and want these events.
10581 if (!hci_discovery_active(hdev)) {
10582 if (link_type == ACL_LINK)
10584 if (link_type == LE_LINK &&
10585 list_empty(&hdev->pend_le_reports) &&
10586 !hci_is_adv_monitoring(hdev)) {
10591 if (hdev->discovery.result_filtering) {
10592 /* We are using service discovery */
10593 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10598 if (hdev->discovery.limited) {
10599 /* Check for limited discoverable bit */
10601 if (!(dev_class[1] & 0x20))
10604 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10605 if (!flags || !(flags[0] & LE_AD_LIMITED))
10610 /* Make sure that the buffer is big enough. The 5 extra bytes
10611 * are for the potential CoD field.
10613 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
10616 memset(buf, 0, sizeof(buf));
10618 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10619 * RSSI value was reported as 0 when not available. This behavior
10620 * is kept when using device discovery. This is required for full
10621 * backwards compatibility with the API.
10623 * However when using service discovery, the value 127 will be
10624 * returned when the RSSI is not available.
10626 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10627 link_type == ACL_LINK)
10630 bacpy(&ev->addr.bdaddr, bdaddr);
10631 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10633 ev->flags = cpu_to_le32(flags);
10636 /* Copy EIR or advertising data into event */
10637 memcpy(ev->eir, eir, eir_len);
10639 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
10641 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
10644 if (scan_rsp_len > 0)
10645 /* Append scan response data to event */
10646 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
10648 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10649 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
10651 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
10654 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10655 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10657 struct mgmt_ev_device_found *ev;
10658 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
10661 ev = (struct mgmt_ev_device_found *) buf;
10663 memset(buf, 0, sizeof(buf));
10665 bacpy(&ev->addr.bdaddr, bdaddr);
10666 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10669 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
10672 ev->eir_len = cpu_to_le16(eir_len);
10674 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
10677 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10679 struct mgmt_ev_discovering ev;
10681 bt_dev_dbg(hdev, "discovering %u", discovering);
10683 memset(&ev, 0, sizeof(ev));
10684 ev.type = hdev->discovery.type;
10685 ev.discovering = discovering;
10687 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10690 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10692 struct mgmt_ev_controller_suspend ev;
10694 ev.suspend_state = state;
10695 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10698 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10701 struct mgmt_ev_controller_resume ev;
10703 ev.wake_reason = reason;
10705 bacpy(&ev.addr.bdaddr, bdaddr);
10706 ev.addr.type = addr_type;
10708 memset(&ev.addr, 0, sizeof(ev.addr));
10711 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10714 static struct hci_mgmt_chan chan = {
10715 .channel = HCI_CHANNEL_CONTROL,
10716 .handler_count = ARRAY_SIZE(mgmt_handlers),
10717 .handlers = mgmt_handlers,
10719 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
10720 .tizen_handlers = tizen_mgmt_handlers,
10722 .hdev_init = mgmt_init_hdev,
10725 int mgmt_init(void)
10727 return hci_mgmt_chan_register(&chan);
10730 void mgmt_exit(void)
10732 hci_mgmt_chan_unregister(&chan);