2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
39 #include "hci_request.h"
41 #include "mgmt_util.h"
42 #include "mgmt_config.h"
47 #define MGMT_VERSION 1
48 #define MGMT_REVISION 22
50 static const u16 mgmt_commands[] = {
51 MGMT_OP_READ_INDEX_LIST,
54 MGMT_OP_SET_DISCOVERABLE,
55 MGMT_OP_SET_CONNECTABLE,
56 MGMT_OP_SET_FAST_CONNECTABLE,
58 MGMT_OP_SET_LINK_SECURITY,
62 MGMT_OP_SET_DEV_CLASS,
63 MGMT_OP_SET_LOCAL_NAME,
66 MGMT_OP_LOAD_LINK_KEYS,
67 MGMT_OP_LOAD_LONG_TERM_KEYS,
69 MGMT_OP_GET_CONNECTIONS,
70 MGMT_OP_PIN_CODE_REPLY,
71 MGMT_OP_PIN_CODE_NEG_REPLY,
72 MGMT_OP_SET_IO_CAPABILITY,
74 MGMT_OP_CANCEL_PAIR_DEVICE,
75 MGMT_OP_UNPAIR_DEVICE,
76 MGMT_OP_USER_CONFIRM_REPLY,
77 MGMT_OP_USER_CONFIRM_NEG_REPLY,
78 MGMT_OP_USER_PASSKEY_REPLY,
79 MGMT_OP_USER_PASSKEY_NEG_REPLY,
80 MGMT_OP_READ_LOCAL_OOB_DATA,
81 MGMT_OP_ADD_REMOTE_OOB_DATA,
82 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
83 MGMT_OP_START_DISCOVERY,
84 MGMT_OP_STOP_DISCOVERY,
87 MGMT_OP_UNBLOCK_DEVICE,
88 MGMT_OP_SET_DEVICE_ID,
89 MGMT_OP_SET_ADVERTISING,
91 MGMT_OP_SET_STATIC_ADDRESS,
92 MGMT_OP_SET_SCAN_PARAMS,
93 MGMT_OP_SET_SECURE_CONN,
94 MGMT_OP_SET_DEBUG_KEYS,
97 MGMT_OP_GET_CONN_INFO,
98 MGMT_OP_GET_CLOCK_INFO,
100 MGMT_OP_REMOVE_DEVICE,
101 MGMT_OP_LOAD_CONN_PARAM,
102 MGMT_OP_READ_UNCONF_INDEX_LIST,
103 MGMT_OP_READ_CONFIG_INFO,
104 MGMT_OP_SET_EXTERNAL_CONFIG,
105 MGMT_OP_SET_PUBLIC_ADDRESS,
106 MGMT_OP_START_SERVICE_DISCOVERY,
107 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
108 MGMT_OP_READ_EXT_INDEX_LIST,
109 MGMT_OP_READ_ADV_FEATURES,
110 MGMT_OP_ADD_ADVERTISING,
111 MGMT_OP_REMOVE_ADVERTISING,
112 MGMT_OP_GET_ADV_SIZE_INFO,
113 MGMT_OP_START_LIMITED_DISCOVERY,
114 MGMT_OP_READ_EXT_INFO,
115 MGMT_OP_SET_APPEARANCE,
116 MGMT_OP_GET_PHY_CONFIGURATION,
117 MGMT_OP_SET_PHY_CONFIGURATION,
118 MGMT_OP_SET_BLOCKED_KEYS,
119 MGMT_OP_SET_WIDEBAND_SPEECH,
120 MGMT_OP_READ_CONTROLLER_CAP,
121 MGMT_OP_READ_EXP_FEATURES_INFO,
122 MGMT_OP_SET_EXP_FEATURE,
123 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
124 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
125 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
126 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
127 MGMT_OP_GET_DEVICE_FLAGS,
128 MGMT_OP_SET_DEVICE_FLAGS,
129 MGMT_OP_READ_ADV_MONITOR_FEATURES,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
131 MGMT_OP_REMOVE_ADV_MONITOR,
132 MGMT_OP_ADD_EXT_ADV_PARAMS,
133 MGMT_OP_ADD_EXT_ADV_DATA,
134 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
135 MGMT_OP_SET_MESH_RECEIVER,
136 MGMT_OP_MESH_READ_FEATURES,
138 MGMT_OP_MESH_SEND_CANCEL,
141 static const u16 mgmt_events[] = {
142 MGMT_EV_CONTROLLER_ERROR,
144 MGMT_EV_INDEX_REMOVED,
145 MGMT_EV_NEW_SETTINGS,
146 MGMT_EV_CLASS_OF_DEV_CHANGED,
147 MGMT_EV_LOCAL_NAME_CHANGED,
148 MGMT_EV_NEW_LINK_KEY,
149 MGMT_EV_NEW_LONG_TERM_KEY,
150 MGMT_EV_DEVICE_CONNECTED,
151 MGMT_EV_DEVICE_DISCONNECTED,
152 MGMT_EV_CONNECT_FAILED,
153 MGMT_EV_PIN_CODE_REQUEST,
154 MGMT_EV_USER_CONFIRM_REQUEST,
155 MGMT_EV_USER_PASSKEY_REQUEST,
157 MGMT_EV_DEVICE_FOUND,
159 MGMT_EV_DEVICE_BLOCKED,
160 MGMT_EV_DEVICE_UNBLOCKED,
161 MGMT_EV_DEVICE_UNPAIRED,
162 MGMT_EV_PASSKEY_NOTIFY,
165 MGMT_EV_DEVICE_ADDED,
166 MGMT_EV_DEVICE_REMOVED,
167 MGMT_EV_NEW_CONN_PARAM,
168 MGMT_EV_UNCONF_INDEX_ADDED,
169 MGMT_EV_UNCONF_INDEX_REMOVED,
170 MGMT_EV_NEW_CONFIG_OPTIONS,
171 MGMT_EV_EXT_INDEX_ADDED,
172 MGMT_EV_EXT_INDEX_REMOVED,
173 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
174 MGMT_EV_ADVERTISING_ADDED,
175 MGMT_EV_ADVERTISING_REMOVED,
176 MGMT_EV_EXT_INFO_CHANGED,
177 MGMT_EV_PHY_CONFIGURATION_CHANGED,
178 MGMT_EV_EXP_FEATURE_CHANGED,
179 MGMT_EV_DEVICE_FLAGS_CHANGED,
180 MGMT_EV_ADV_MONITOR_ADDED,
181 MGMT_EV_ADV_MONITOR_REMOVED,
182 MGMT_EV_CONTROLLER_SUSPEND,
183 MGMT_EV_CONTROLLER_RESUME,
184 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
185 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
188 static const u16 mgmt_untrusted_commands[] = {
189 MGMT_OP_READ_INDEX_LIST,
191 MGMT_OP_READ_UNCONF_INDEX_LIST,
192 MGMT_OP_READ_CONFIG_INFO,
193 MGMT_OP_READ_EXT_INDEX_LIST,
194 MGMT_OP_READ_EXT_INFO,
195 MGMT_OP_READ_CONTROLLER_CAP,
196 MGMT_OP_READ_EXP_FEATURES_INFO,
197 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
198 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
201 static const u16 mgmt_untrusted_events[] = {
203 MGMT_EV_INDEX_REMOVED,
204 MGMT_EV_NEW_SETTINGS,
205 MGMT_EV_CLASS_OF_DEV_CHANGED,
206 MGMT_EV_LOCAL_NAME_CHANGED,
207 MGMT_EV_UNCONF_INDEX_ADDED,
208 MGMT_EV_UNCONF_INDEX_REMOVED,
209 MGMT_EV_NEW_CONFIG_OPTIONS,
210 MGMT_EV_EXT_INDEX_ADDED,
211 MGMT_EV_EXT_INDEX_REMOVED,
212 MGMT_EV_EXT_INFO_CHANGED,
213 MGMT_EV_EXP_FEATURE_CHANGED,
216 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
218 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
219 "\x00\x00\x00\x00\x00\x00\x00\x00"
221 /* HCI to MGMT error code conversion table */
222 static const u8 mgmt_status_table[] = {
224 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
225 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
226 MGMT_STATUS_FAILED, /* Hardware Failure */
227 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
228 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
229 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
230 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
231 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
232 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
233 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
234 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
235 MGMT_STATUS_BUSY, /* Command Disallowed */
236 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
237 MGMT_STATUS_REJECTED, /* Rejected Security */
238 MGMT_STATUS_REJECTED, /* Rejected Personal */
239 MGMT_STATUS_TIMEOUT, /* Host Timeout */
240 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
241 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
242 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
243 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
244 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
245 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
246 MGMT_STATUS_BUSY, /* Repeated Attempts */
247 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
248 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
249 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
250 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
251 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
252 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
253 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
254 MGMT_STATUS_FAILED, /* Unspecified Error */
255 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
256 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
257 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
258 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
259 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
260 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
261 MGMT_STATUS_FAILED, /* Unit Link Key Used */
262 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
263 MGMT_STATUS_TIMEOUT, /* Instant Passed */
264 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
265 MGMT_STATUS_FAILED, /* Transaction Collision */
266 MGMT_STATUS_FAILED, /* Reserved for future use */
267 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
268 MGMT_STATUS_REJECTED, /* QoS Rejected */
269 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
270 MGMT_STATUS_REJECTED, /* Insufficient Security */
271 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
272 MGMT_STATUS_FAILED, /* Reserved for future use */
273 MGMT_STATUS_BUSY, /* Role Switch Pending */
274 MGMT_STATUS_FAILED, /* Reserved for future use */
275 MGMT_STATUS_FAILED, /* Slot Violation */
276 MGMT_STATUS_FAILED, /* Role Switch Failed */
277 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
278 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
279 MGMT_STATUS_BUSY, /* Host Busy Pairing */
280 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
281 MGMT_STATUS_BUSY, /* Controller Busy */
282 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
283 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
284 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
285 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
286 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
289 static u8 mgmt_errno_status(int err)
293 return MGMT_STATUS_SUCCESS;
295 return MGMT_STATUS_REJECTED;
297 return MGMT_STATUS_INVALID_PARAMS;
299 return MGMT_STATUS_NOT_SUPPORTED;
301 return MGMT_STATUS_BUSY;
303 return MGMT_STATUS_AUTH_FAILED;
305 return MGMT_STATUS_NO_RESOURCES;
307 return MGMT_STATUS_ALREADY_CONNECTED;
309 return MGMT_STATUS_DISCONNECTED;
312 return MGMT_STATUS_FAILED;
315 static u8 mgmt_status(int err)
318 return mgmt_errno_status(err);
320 if (err < ARRAY_SIZE(mgmt_status_table))
321 return mgmt_status_table[err];
323 return MGMT_STATUS_FAILED;
326 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
329 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
333 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
334 u16 len, int flag, struct sock *skip_sk)
336 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
340 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
341 struct sock *skip_sk)
343 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
344 HCI_SOCK_TRUSTED, skip_sk);
347 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
349 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
353 static u8 le_addr_type(u8 mgmt_addr_type)
355 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
356 return ADDR_LE_DEV_PUBLIC;
358 return ADDR_LE_DEV_RANDOM;
361 void mgmt_fill_version_info(void *ver)
363 struct mgmt_rp_read_version *rp = ver;
365 rp->version = MGMT_VERSION;
366 rp->revision = cpu_to_le16(MGMT_REVISION);
369 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
372 struct mgmt_rp_read_version rp;
374 bt_dev_dbg(hdev, "sock %p", sk);
376 mgmt_fill_version_info(&rp);
378 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
382 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
385 struct mgmt_rp_read_commands *rp;
386 u16 num_commands, num_events;
390 bt_dev_dbg(hdev, "sock %p", sk);
392 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
393 num_commands = ARRAY_SIZE(mgmt_commands);
394 num_events = ARRAY_SIZE(mgmt_events);
396 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
397 num_events = ARRAY_SIZE(mgmt_untrusted_events);
400 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
402 rp = kmalloc(rp_size, GFP_KERNEL);
406 rp->num_commands = cpu_to_le16(num_commands);
407 rp->num_events = cpu_to_le16(num_events);
409 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
410 __le16 *opcode = rp->opcodes;
412 for (i = 0; i < num_commands; i++, opcode++)
413 put_unaligned_le16(mgmt_commands[i], opcode);
415 for (i = 0; i < num_events; i++, opcode++)
416 put_unaligned_le16(mgmt_events[i], opcode);
418 __le16 *opcode = rp->opcodes;
420 for (i = 0; i < num_commands; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
423 for (i = 0; i < num_events; i++, opcode++)
424 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
427 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
434 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
437 struct mgmt_rp_read_index_list *rp;
443 bt_dev_dbg(hdev, "sock %p", sk);
445 read_lock(&hci_dev_list_lock);
448 list_for_each_entry(d, &hci_dev_list, list) {
449 if (d->dev_type == HCI_PRIMARY &&
450 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
454 rp_len = sizeof(*rp) + (2 * count);
455 rp = kmalloc(rp_len, GFP_ATOMIC);
457 read_unlock(&hci_dev_list_lock);
462 list_for_each_entry(d, &hci_dev_list, list) {
463 if (hci_dev_test_flag(d, HCI_SETUP) ||
464 hci_dev_test_flag(d, HCI_CONFIG) ||
465 hci_dev_test_flag(d, HCI_USER_CHANNEL))
468 /* Devices marked as raw-only are neither configured
469 * nor unconfigured controllers.
471 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
474 if (d->dev_type == HCI_PRIMARY &&
475 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
476 rp->index[count++] = cpu_to_le16(d->id);
477 bt_dev_dbg(hdev, "Added hci%u", d->id);
481 rp->num_controllers = cpu_to_le16(count);
482 rp_len = sizeof(*rp) + (2 * count);
484 read_unlock(&hci_dev_list_lock);
486 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
494 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
495 void *data, u16 data_len)
497 struct mgmt_rp_read_unconf_index_list *rp;
503 bt_dev_dbg(hdev, "sock %p", sk);
505 read_lock(&hci_dev_list_lock);
508 list_for_each_entry(d, &hci_dev_list, list) {
509 if (d->dev_type == HCI_PRIMARY &&
510 hci_dev_test_flag(d, HCI_UNCONFIGURED))
514 rp_len = sizeof(*rp) + (2 * count);
515 rp = kmalloc(rp_len, GFP_ATOMIC);
517 read_unlock(&hci_dev_list_lock);
522 list_for_each_entry(d, &hci_dev_list, list) {
523 if (hci_dev_test_flag(d, HCI_SETUP) ||
524 hci_dev_test_flag(d, HCI_CONFIG) ||
525 hci_dev_test_flag(d, HCI_USER_CHANNEL))
528 /* Devices marked as raw-only are neither configured
529 * nor unconfigured controllers.
531 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
534 if (d->dev_type == HCI_PRIMARY &&
535 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
536 rp->index[count++] = cpu_to_le16(d->id);
537 bt_dev_dbg(hdev, "Added hci%u", d->id);
541 rp->num_controllers = cpu_to_le16(count);
542 rp_len = sizeof(*rp) + (2 * count);
544 read_unlock(&hci_dev_list_lock);
546 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
547 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
554 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
555 void *data, u16 data_len)
557 struct mgmt_rp_read_ext_index_list *rp;
562 bt_dev_dbg(hdev, "sock %p", sk);
564 read_lock(&hci_dev_list_lock);
567 list_for_each_entry(d, &hci_dev_list, list) {
568 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
572 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
574 read_unlock(&hci_dev_list_lock);
579 list_for_each_entry(d, &hci_dev_list, list) {
580 if (hci_dev_test_flag(d, HCI_SETUP) ||
581 hci_dev_test_flag(d, HCI_CONFIG) ||
582 hci_dev_test_flag(d, HCI_USER_CHANNEL))
585 /* Devices marked as raw-only are neither configured
586 * nor unconfigured controllers.
588 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
591 if (d->dev_type == HCI_PRIMARY) {
592 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
593 rp->entry[count].type = 0x01;
595 rp->entry[count].type = 0x00;
596 } else if (d->dev_type == HCI_AMP) {
597 rp->entry[count].type = 0x02;
602 rp->entry[count].bus = d->bus;
603 rp->entry[count++].index = cpu_to_le16(d->id);
604 bt_dev_dbg(hdev, "Added hci%u", d->id);
607 rp->num_controllers = cpu_to_le16(count);
609 read_unlock(&hci_dev_list_lock);
611 /* If this command is called at least once, then all the
612 * default index and unconfigured index events are disabled
613 * and from now on only extended index events are used.
615 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
616 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
617 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
619 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
620 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
621 struct_size(rp, entry, count));
628 static bool is_configured(struct hci_dev *hdev)
630 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
631 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
634 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
635 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
636 !bacmp(&hdev->public_addr, BDADDR_ANY))
642 static __le32 get_missing_options(struct hci_dev *hdev)
646 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
647 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
648 options |= MGMT_OPTION_EXTERNAL_CONFIG;
650 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
651 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
652 !bacmp(&hdev->public_addr, BDADDR_ANY))
653 options |= MGMT_OPTION_PUBLIC_ADDRESS;
655 return cpu_to_le32(options);
658 static int new_options(struct hci_dev *hdev, struct sock *skip)
660 __le32 options = get_missing_options(hdev);
662 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
663 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
666 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
668 __le32 options = get_missing_options(hdev);
670 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
674 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
675 void *data, u16 data_len)
677 struct mgmt_rp_read_config_info rp;
680 bt_dev_dbg(hdev, "sock %p", sk);
684 memset(&rp, 0, sizeof(rp));
685 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
687 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
688 options |= MGMT_OPTION_EXTERNAL_CONFIG;
690 if (hdev->set_bdaddr)
691 options |= MGMT_OPTION_PUBLIC_ADDRESS;
693 rp.supported_options = cpu_to_le32(options);
694 rp.missing_options = get_missing_options(hdev);
696 hci_dev_unlock(hdev);
698 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
702 static u32 get_supported_phys(struct hci_dev *hdev)
704 u32 supported_phys = 0;
706 if (lmp_bredr_capable(hdev)) {
707 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
709 if (hdev->features[0][0] & LMP_3SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
712 if (hdev->features[0][0] & LMP_5SLOT)
713 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
715 if (lmp_edr_2m_capable(hdev)) {
716 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
718 if (lmp_edr_3slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
721 if (lmp_edr_5slot_capable(hdev))
722 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
724 if (lmp_edr_3m_capable(hdev)) {
725 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
727 if (lmp_edr_3slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
730 if (lmp_edr_5slot_capable(hdev))
731 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
736 if (lmp_le_capable(hdev)) {
737 supported_phys |= MGMT_PHY_LE_1M_TX;
738 supported_phys |= MGMT_PHY_LE_1M_RX;
740 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
741 supported_phys |= MGMT_PHY_LE_2M_TX;
742 supported_phys |= MGMT_PHY_LE_2M_RX;
745 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
746 supported_phys |= MGMT_PHY_LE_CODED_TX;
747 supported_phys |= MGMT_PHY_LE_CODED_RX;
751 return supported_phys;
754 static u32 get_selected_phys(struct hci_dev *hdev)
756 u32 selected_phys = 0;
758 if (lmp_bredr_capable(hdev)) {
759 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
761 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
762 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
764 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
765 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
767 if (lmp_edr_2m_capable(hdev)) {
768 if (!(hdev->pkt_type & HCI_2DH1))
769 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
771 if (lmp_edr_3slot_capable(hdev) &&
772 !(hdev->pkt_type & HCI_2DH3))
773 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
775 if (lmp_edr_5slot_capable(hdev) &&
776 !(hdev->pkt_type & HCI_2DH5))
777 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
779 if (lmp_edr_3m_capable(hdev)) {
780 if (!(hdev->pkt_type & HCI_3DH1))
781 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
783 if (lmp_edr_3slot_capable(hdev) &&
784 !(hdev->pkt_type & HCI_3DH3))
785 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
787 if (lmp_edr_5slot_capable(hdev) &&
788 !(hdev->pkt_type & HCI_3DH5))
789 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
794 if (lmp_le_capable(hdev)) {
795 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_TX;
798 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
799 selected_phys |= MGMT_PHY_LE_1M_RX;
801 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_TX;
804 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
805 selected_phys |= MGMT_PHY_LE_2M_RX;
807 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_TX;
810 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
811 selected_phys |= MGMT_PHY_LE_CODED_RX;
814 return selected_phys;
817 static u32 get_configurable_phys(struct hci_dev *hdev)
819 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
820 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
823 static u32 get_supported_settings(struct hci_dev *hdev)
827 settings |= MGMT_SETTING_POWERED;
828 settings |= MGMT_SETTING_BONDABLE;
829 settings |= MGMT_SETTING_DEBUG_KEYS;
830 settings |= MGMT_SETTING_CONNECTABLE;
831 settings |= MGMT_SETTING_DISCOVERABLE;
833 if (lmp_bredr_capable(hdev)) {
834 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
835 settings |= MGMT_SETTING_FAST_CONNECTABLE;
836 settings |= MGMT_SETTING_BREDR;
837 settings |= MGMT_SETTING_LINK_SECURITY;
839 if (lmp_ssp_capable(hdev)) {
840 settings |= MGMT_SETTING_SSP;
841 if (IS_ENABLED(CONFIG_BT_HS))
842 settings |= MGMT_SETTING_HS;
845 if (lmp_sc_capable(hdev))
846 settings |= MGMT_SETTING_SECURE_CONN;
848 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
850 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
853 if (lmp_le_capable(hdev)) {
854 settings |= MGMT_SETTING_LE;
855 settings |= MGMT_SETTING_SECURE_CONN;
856 settings |= MGMT_SETTING_PRIVACY;
857 settings |= MGMT_SETTING_STATIC_ADDRESS;
858 settings |= MGMT_SETTING_ADVERTISING;
861 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
863 settings |= MGMT_SETTING_CONFIGURATION;
865 settings |= MGMT_SETTING_PHY_CONFIGURATION;
870 static u32 get_current_settings(struct hci_dev *hdev)
874 if (hdev_is_powered(hdev))
875 settings |= MGMT_SETTING_POWERED;
877 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
878 settings |= MGMT_SETTING_CONNECTABLE;
880 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
881 settings |= MGMT_SETTING_FAST_CONNECTABLE;
883 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
884 settings |= MGMT_SETTING_DISCOVERABLE;
886 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
887 settings |= MGMT_SETTING_BONDABLE;
889 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
890 settings |= MGMT_SETTING_BREDR;
892 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
893 settings |= MGMT_SETTING_LE;
895 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
896 settings |= MGMT_SETTING_LINK_SECURITY;
898 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
899 settings |= MGMT_SETTING_SSP;
901 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
902 settings |= MGMT_SETTING_HS;
904 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
905 settings |= MGMT_SETTING_ADVERTISING;
907 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
908 settings |= MGMT_SETTING_SECURE_CONN;
910 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
911 settings |= MGMT_SETTING_DEBUG_KEYS;
913 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
914 settings |= MGMT_SETTING_PRIVACY;
916 /* The current setting for static address has two purposes. The
917 * first is to indicate if the static address will be used and
918 * the second is to indicate if it is actually set.
920 * This means if the static address is not configured, this flag
921 * will never be set. If the address is configured, then if the
922 * address is actually used decides if the flag is set or not.
924 * For single mode LE only controllers and dual-mode controllers
925 * with BR/EDR disabled, the existence of the static address will
928 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
929 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
930 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
931 if (bacmp(&hdev->static_addr, BDADDR_ANY))
932 settings |= MGMT_SETTING_STATIC_ADDRESS;
935 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
936 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
941 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
943 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
946 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
948 struct mgmt_pending_cmd *cmd;
950 /* If there's a pending mgmt command the flags will not yet have
951 * their final values, so check for this first.
953 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
955 struct mgmt_mode *cp = cmd->param;
957 return LE_AD_GENERAL;
958 else if (cp->val == 0x02)
959 return LE_AD_LIMITED;
961 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
962 return LE_AD_LIMITED;
963 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
964 return LE_AD_GENERAL;
970 bool mgmt_get_connectable(struct hci_dev *hdev)
972 struct mgmt_pending_cmd *cmd;
974 /* If there's a pending mgmt command the flag will not yet have
975 * it's final value, so check for this first.
977 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
979 struct mgmt_mode *cp = cmd->param;
984 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
987 static int service_cache_sync(struct hci_dev *hdev, void *data)
989 hci_update_eir_sync(hdev);
990 hci_update_class_sync(hdev);
995 static void service_cache_off(struct work_struct *work)
997 struct hci_dev *hdev = container_of(work, struct hci_dev,
1000 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1003 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1006 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1008 /* The generation of a new RPA and programming it into the
1009 * controller happens in the hci_req_enable_advertising()
1012 if (ext_adv_capable(hdev))
1013 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1015 return hci_enable_advertising_sync(hdev);
1018 static void rpa_expired(struct work_struct *work)
1020 struct hci_dev *hdev = container_of(work, struct hci_dev,
1023 bt_dev_dbg(hdev, "");
1025 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1027 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1030 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1033 static void discov_off(struct work_struct *work)
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1038 bt_dev_dbg(hdev, "");
1042 /* When discoverable timeout triggers, then just make sure
1043 * the limited discoverable flag is cleared. Even in the case
1044 * of a timeout triggered from general discoverable, it is
1045 * safe to unconditionally clear the flag.
1047 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 hdev->discov_timeout = 0;
1051 hci_update_discoverable(hdev);
1053 mgmt_new_settings(hdev);
1055 hci_dev_unlock(hdev);
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 struct mgmt_mesh_tx *mesh_tx, bool silent)
1063 u8 handle = mesh_tx->handle;
1066 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 sizeof(handle), NULL);
1069 mgmt_mesh_remove(mesh_tx);
1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1074 struct mgmt_mesh_tx *mesh_tx;
1076 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 hci_disable_advertising_sync(hdev);
1078 mesh_tx = mgmt_mesh_next(hdev, NULL);
1081 mesh_send_complete(hdev, mesh_tx, false);
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1090 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1095 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 mesh_send_start_complete);
1099 mesh_send_complete(hdev, mesh_tx, false);
1101 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1104 static void mesh_send_done(struct work_struct *work)
1106 struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 mesh_send_done.work);
1109 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1112 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1117 if (hci_dev_test_flag(hdev, HCI_MGMT))
1120 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1122 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1127 /* Non-mgmt controlled devices get this bit set
1128 * implicitly so that pairing works for them, however
1129 * for mgmt we require user-space to explicitly enable
1132 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1134 hci_dev_set_flag(hdev, HCI_MGMT);
1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 void *data, u16 data_len)
1140 struct mgmt_rp_read_info rp;
1142 bt_dev_dbg(hdev, "sock %p", sk);
1146 memset(&rp, 0, sizeof(rp));
1148 bacpy(&rp.bdaddr, &hdev->bdaddr);
1150 rp.version = hdev->hci_ver;
1151 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1153 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1156 memcpy(rp.dev_class, hdev->dev_class, 3);
1158 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1161 hci_dev_unlock(hdev);
1163 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1172 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 hdev->dev_class, 3);
1176 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1180 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 hdev->dev_name, name_len);
1184 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 hdev->short_name, name_len);
1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 void *data, u16 data_len)
1195 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1198 bt_dev_dbg(hdev, "sock %p", sk);
1200 memset(&buf, 0, sizeof(buf));
1204 bacpy(&rp->bdaddr, &hdev->bdaddr);
1206 rp->version = hdev->hci_ver;
1207 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1209 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1213 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 rp->eir_len = cpu_to_le16(eir_len);
1216 hci_dev_unlock(hdev);
1218 /* If this command is called at least once, then the events
1219 * for class of device and local name changes are disabled
1220 * and only the new extended controller information event
1223 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1227 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 sizeof(*rp) + eir_len);
1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1234 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1237 memset(buf, 0, sizeof(buf));
1239 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 ev->eir_len = cpu_to_le16(eir_len);
1242 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 sizeof(*ev) + eir_len,
1244 HCI_MGMT_EXT_INFO_EVENTS, skip);
1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1249 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1251 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1257 struct mgmt_ev_advertising_added ev;
1259 ev.instance = instance;
1261 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1267 struct mgmt_ev_advertising_removed ev;
1269 ev.instance = instance;
1271 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1276 if (hdev->adv_instance_timeout) {
1277 hdev->adv_instance_timeout = 0;
1278 cancel_delayed_work(&hdev->adv_instance_expire);
1282 /* This function requires the caller holds hdev->lock */
1283 static void restart_le_actions(struct hci_dev *hdev)
1285 struct hci_conn_params *p;
1287 list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 /* Needed for AUTO_OFF case where might not "really"
1289 * have been powered off.
1291 list_del_init(&p->action);
1293 switch (p->auto_connect) {
1294 case HCI_AUTO_CONN_DIRECT:
1295 case HCI_AUTO_CONN_ALWAYS:
1296 list_add(&p->action, &hdev->pend_le_conns);
1298 case HCI_AUTO_CONN_REPORT:
1299 list_add(&p->action, &hdev->pend_le_reports);
1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1309 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1311 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1317 struct mgmt_pending_cmd *cmd = data;
1318 struct mgmt_mode *cp;
1320 /* Make sure cmd still outstanding. */
1321 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1326 bt_dev_dbg(hdev, "err %d", err);
1331 restart_le_actions(hdev);
1332 hci_update_passive_scan(hdev);
1333 hci_dev_unlock(hdev);
1336 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1338 /* Only call new_setting for power on as power off is deferred
1339 * to hdev->power_off work which does call hci_dev_do_close.
1342 new_settings(hdev, cmd->sk);
1344 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1348 mgmt_pending_remove(cmd);
1351 static int set_powered_sync(struct hci_dev *hdev, void *data)
1353 struct mgmt_pending_cmd *cmd = data;
1354 struct mgmt_mode *cp = cmd->param;
1356 BT_DBG("%s", hdev->name);
1358 return hci_set_powered_sync(hdev, cp->val);
1361 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1364 struct mgmt_mode *cp = data;
1365 struct mgmt_pending_cmd *cmd;
1368 bt_dev_dbg(hdev, "sock %p", sk);
1370 if (cp->val != 0x00 && cp->val != 0x01)
1371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1372 MGMT_STATUS_INVALID_PARAMS);
1376 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1382 if (!!cp->val == hdev_is_powered(hdev)) {
1383 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1387 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1393 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1394 mgmt_set_powered_complete);
1397 mgmt_pending_remove(cmd);
1400 hci_dev_unlock(hdev);
1404 int mgmt_new_settings(struct hci_dev *hdev)
1406 return new_settings(hdev, NULL);
1411 struct hci_dev *hdev;
1415 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1417 struct cmd_lookup *match = data;
1419 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1421 list_del(&cmd->list);
1423 if (match->sk == NULL) {
1424 match->sk = cmd->sk;
1425 sock_hold(match->sk);
1428 mgmt_pending_free(cmd);
1431 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1435 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1436 mgmt_pending_remove(cmd);
1439 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1441 if (cmd->cmd_complete) {
1444 cmd->cmd_complete(cmd, *status);
1445 mgmt_pending_remove(cmd);
1450 cmd_status_rsp(cmd, data);
1453 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1455 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1456 cmd->param, cmd->param_len);
1459 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1461 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1462 cmd->param, sizeof(struct mgmt_addr_info));
1465 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1467 if (!lmp_bredr_capable(hdev))
1468 return MGMT_STATUS_NOT_SUPPORTED;
1469 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1470 return MGMT_STATUS_REJECTED;
1472 return MGMT_STATUS_SUCCESS;
1475 static u8 mgmt_le_support(struct hci_dev *hdev)
1477 if (!lmp_le_capable(hdev))
1478 return MGMT_STATUS_NOT_SUPPORTED;
1479 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1480 return MGMT_STATUS_REJECTED;
1482 return MGMT_STATUS_SUCCESS;
1485 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1488 struct mgmt_pending_cmd *cmd = data;
1490 bt_dev_dbg(hdev, "err %d", err);
1492 /* Make sure cmd still outstanding. */
1493 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1499 u8 mgmt_err = mgmt_status(err);
1500 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1501 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1505 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1506 hdev->discov_timeout > 0) {
1507 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1508 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1511 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1512 new_settings(hdev, cmd->sk);
1515 mgmt_pending_remove(cmd);
1516 hci_dev_unlock(hdev);
1519 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1521 BT_DBG("%s", hdev->name);
1523 return hci_update_discoverable_sync(hdev);
1526 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1529 struct mgmt_cp_set_discoverable *cp = data;
1530 struct mgmt_pending_cmd *cmd;
1534 bt_dev_dbg(hdev, "sock %p", sk);
1536 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1537 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1538 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1539 MGMT_STATUS_REJECTED);
1541 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1542 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1543 MGMT_STATUS_INVALID_PARAMS);
1545 timeout = __le16_to_cpu(cp->timeout);
1547 /* Disabling discoverable requires that no timeout is set,
1548 * and enabling limited discoverable requires a timeout.
1550 if ((cp->val == 0x00 && timeout > 0) ||
1551 (cp->val == 0x02 && timeout == 0))
1552 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1553 MGMT_STATUS_INVALID_PARAMS);
1557 if (!hdev_is_powered(hdev) && timeout > 0) {
1558 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1559 MGMT_STATUS_NOT_POWERED);
1563 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1564 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1565 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1570 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1571 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 MGMT_STATUS_REJECTED);
1576 if (hdev->advertising_paused) {
1577 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1582 if (!hdev_is_powered(hdev)) {
1583 bool changed = false;
1585 /* Setting limited discoverable when powered off is
1586 * not a valid operation since it requires a timeout
1587 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1589 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1590 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1594 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1599 err = new_settings(hdev, sk);
1604 /* If the current mode is the same, then just update the timeout
1605 * value with the new value. And if only the timeout gets updated,
1606 * then no need for any HCI transactions.
1608 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1609 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1610 HCI_LIMITED_DISCOVERABLE)) {
1611 cancel_delayed_work(&hdev->discov_off);
1612 hdev->discov_timeout = timeout;
1614 if (cp->val && hdev->discov_timeout > 0) {
1615 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1616 queue_delayed_work(hdev->req_workqueue,
1617 &hdev->discov_off, to);
1620 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1624 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1630 /* Cancel any potential discoverable timeout that might be
1631 * still active and store new timeout value. The arming of
1632 * the timeout happens in the complete handler.
1634 cancel_delayed_work(&hdev->discov_off);
1635 hdev->discov_timeout = timeout;
1638 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1640 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1642 /* Limited discoverable mode */
1643 if (cp->val == 0x02)
1644 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1646 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1648 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1649 mgmt_set_discoverable_complete);
1652 mgmt_pending_remove(cmd);
1655 hci_dev_unlock(hdev);
1659 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1662 struct mgmt_pending_cmd *cmd = data;
1664 bt_dev_dbg(hdev, "err %d", err);
1666 /* Make sure cmd still outstanding. */
1667 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1673 u8 mgmt_err = mgmt_status(err);
1674 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1678 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1679 new_settings(hdev, cmd->sk);
1683 mgmt_pending_remove(cmd);
1685 hci_dev_unlock(hdev);
1688 static int set_connectable_update_settings(struct hci_dev *hdev,
1689 struct sock *sk, u8 val)
1691 bool changed = false;
1694 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1698 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1700 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1701 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1704 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1709 hci_update_scan(hdev);
1710 hci_update_passive_scan(hdev);
1711 return new_settings(hdev, sk);
1717 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1719 BT_DBG("%s", hdev->name);
1721 return hci_update_connectable_sync(hdev);
1724 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1727 struct mgmt_mode *cp = data;
1728 struct mgmt_pending_cmd *cmd;
1731 bt_dev_dbg(hdev, "sock %p", sk);
1733 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1734 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1735 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1736 MGMT_STATUS_REJECTED);
1738 if (cp->val != 0x00 && cp->val != 0x01)
1739 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1740 MGMT_STATUS_INVALID_PARAMS);
1744 if (!hdev_is_powered(hdev)) {
1745 err = set_connectable_update_settings(hdev, sk, cp->val);
1749 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1750 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1751 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1756 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1763 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1765 if (hdev->discov_timeout > 0)
1766 cancel_delayed_work(&hdev->discov_off);
1768 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1769 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1770 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1773 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1774 mgmt_set_connectable_complete);
1777 mgmt_pending_remove(cmd);
1780 hci_dev_unlock(hdev);
1784 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1787 struct mgmt_mode *cp = data;
1791 bt_dev_dbg(hdev, "sock %p", sk);
1793 if (cp->val != 0x00 && cp->val != 0x01)
1794 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1795 MGMT_STATUS_INVALID_PARAMS);
1800 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1802 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1804 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1809 /* In limited privacy mode the change of bondable mode
1810 * may affect the local advertising address.
1812 hci_update_discoverable(hdev);
1814 err = new_settings(hdev, sk);
1818 hci_dev_unlock(hdev);
1822 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1825 struct mgmt_mode *cp = data;
1826 struct mgmt_pending_cmd *cmd;
1830 bt_dev_dbg(hdev, "sock %p", sk);
1832 status = mgmt_bredr_support(hdev);
1834 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1837 if (cp->val != 0x00 && cp->val != 0x01)
1838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1839 MGMT_STATUS_INVALID_PARAMS);
1843 if (!hdev_is_powered(hdev)) {
1844 bool changed = false;
1846 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1847 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1851 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1856 err = new_settings(hdev, sk);
1861 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1862 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1869 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1870 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1874 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1880 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1882 mgmt_pending_remove(cmd);
1887 hci_dev_unlock(hdev);
1891 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1893 struct cmd_lookup match = { NULL, hdev };
1894 struct mgmt_pending_cmd *cmd = data;
1895 struct mgmt_mode *cp = cmd->param;
1896 u8 enable = cp->val;
1899 /* Make sure cmd still outstanding. */
1900 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1904 u8 mgmt_err = mgmt_status(err);
1906 if (enable && hci_dev_test_and_clear_flag(hdev,
1908 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1909 new_settings(hdev, NULL);
1912 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1918 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1920 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1923 changed = hci_dev_test_and_clear_flag(hdev,
1926 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1929 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1932 new_settings(hdev, match.sk);
1937 hci_update_eir_sync(hdev);
1940 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1942 struct mgmt_pending_cmd *cmd = data;
1943 struct mgmt_mode *cp = cmd->param;
1944 bool changed = false;
1948 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1950 err = hci_write_ssp_mode_sync(hdev, cp->val);
1952 if (!err && changed)
1953 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1958 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1960 struct mgmt_mode *cp = data;
1961 struct mgmt_pending_cmd *cmd;
1965 bt_dev_dbg(hdev, "sock %p", sk);
1967 status = mgmt_bredr_support(hdev);
1969 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1971 if (!lmp_ssp_capable(hdev))
1972 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1973 MGMT_STATUS_NOT_SUPPORTED);
1975 if (cp->val != 0x00 && cp->val != 0x01)
1976 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1977 MGMT_STATUS_INVALID_PARAMS);
1981 if (!hdev_is_powered(hdev)) {
1985 changed = !hci_dev_test_and_set_flag(hdev,
1988 changed = hci_dev_test_and_clear_flag(hdev,
1991 changed = hci_dev_test_and_clear_flag(hdev,
1994 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1997 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2002 err = new_settings(hdev, sk);
2007 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2008 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2013 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2014 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2018 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2022 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2026 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2027 MGMT_STATUS_FAILED);
2030 mgmt_pending_remove(cmd);
2034 hci_dev_unlock(hdev);
2038 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2040 struct mgmt_mode *cp = data;
2045 bt_dev_dbg(hdev, "sock %p", sk);
2047 if (!IS_ENABLED(CONFIG_BT_HS))
2048 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2049 MGMT_STATUS_NOT_SUPPORTED);
2051 status = mgmt_bredr_support(hdev);
2053 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2055 if (!lmp_ssp_capable(hdev))
2056 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2057 MGMT_STATUS_NOT_SUPPORTED);
2059 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2060 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2061 MGMT_STATUS_REJECTED);
2063 if (cp->val != 0x00 && cp->val != 0x01)
2064 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2065 MGMT_STATUS_INVALID_PARAMS);
2069 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2070 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2076 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2078 if (hdev_is_powered(hdev)) {
2079 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2080 MGMT_STATUS_REJECTED);
2084 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2087 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2092 err = new_settings(hdev, sk);
2095 hci_dev_unlock(hdev);
2099 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2101 struct cmd_lookup match = { NULL, hdev };
2102 u8 status = mgmt_status(err);
2104 bt_dev_dbg(hdev, "err %d", err);
2107 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2112 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2114 new_settings(hdev, match.sk);
2120 static int set_le_sync(struct hci_dev *hdev, void *data)
2122 struct mgmt_pending_cmd *cmd = data;
2123 struct mgmt_mode *cp = cmd->param;
2128 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2130 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2131 hci_disable_advertising_sync(hdev);
2133 if (ext_adv_capable(hdev))
2134 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2136 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2139 err = hci_write_le_host_supported_sync(hdev, val, 0);
2141 /* Make sure the controller has a good default for
2142 * advertising data. Restrict the update to when LE
2143 * has actually been enabled. During power on, the
2144 * update in powered_update_hci will take care of it.
2146 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2147 if (ext_adv_capable(hdev)) {
2150 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2152 hci_update_scan_rsp_data_sync(hdev, 0x00);
2154 hci_update_adv_data_sync(hdev, 0x00);
2155 hci_update_scan_rsp_data_sync(hdev, 0x00);
2158 hci_update_passive_scan(hdev);
2164 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2166 struct mgmt_pending_cmd *cmd = data;
2167 u8 status = mgmt_status(err);
2168 struct sock *sk = cmd->sk;
2171 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2172 cmd_status_rsp, &status);
2176 mgmt_pending_remove(cmd);
2177 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2180 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2182 struct mgmt_pending_cmd *cmd = data;
2183 struct mgmt_cp_set_mesh *cp = cmd->param;
2184 size_t len = cmd->param_len;
2186 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2189 hci_dev_set_flag(hdev, HCI_MESH);
2191 hci_dev_clear_flag(hdev, HCI_MESH);
2195 /* If filters don't fit, forward all adv pkts */
2196 if (len <= sizeof(hdev->mesh_ad_types))
2197 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2199 hci_update_passive_scan_sync(hdev);
2203 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2205 struct mgmt_cp_set_mesh *cp = data;
2206 struct mgmt_pending_cmd *cmd;
2209 bt_dev_dbg(hdev, "sock %p", sk);
2211 if (!lmp_le_capable(hdev) ||
2212 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2213 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2214 MGMT_STATUS_NOT_SUPPORTED);
2216 if (cp->enable != 0x00 && cp->enable != 0x01)
2217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2218 MGMT_STATUS_INVALID_PARAMS);
2222 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2226 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2230 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2231 MGMT_STATUS_FAILED);
2234 mgmt_pending_remove(cmd);
2237 hci_dev_unlock(hdev);
2241 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2243 struct mgmt_mesh_tx *mesh_tx = data;
2244 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2245 unsigned long mesh_send_interval;
2246 u8 mgmt_err = mgmt_status(err);
2248 /* Report any errors here, but don't report completion */
2251 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2252 /* Send Complete Error Code for handle */
2253 mesh_send_complete(hdev, mesh_tx, false);
2257 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2258 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2259 mesh_send_interval);
2262 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2264 struct mgmt_mesh_tx *mesh_tx = data;
2265 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2266 struct adv_info *adv, *next_instance;
2267 u8 instance = hdev->le_num_of_adv_sets + 1;
2268 u16 timeout, duration;
2271 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2272 return MGMT_STATUS_BUSY;
2275 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2276 adv = hci_add_adv_instance(hdev, instance, 0,
2277 send->adv_data_len, send->adv_data,
2280 HCI_ADV_TX_POWER_NO_PREFERENCE,
2281 hdev->le_adv_min_interval,
2282 hdev->le_adv_max_interval,
2286 mesh_tx->instance = instance;
2290 if (hdev->cur_adv_instance == instance) {
2291 /* If the currently advertised instance is being changed then
2292 * cancel the current advertising and schedule the next
2293 * instance. If there is only one instance then the overridden
2294 * advertising data will be visible right away.
2296 cancel_adv_timeout(hdev);
2298 next_instance = hci_get_next_instance(hdev, instance);
2300 instance = next_instance->instance;
2303 } else if (hdev->adv_instance_timeout) {
2304 /* Immediately advertise the new instance if no other, or
2305 * let it go naturally from queue if ADV is already happening
2311 return hci_schedule_adv_instance_sync(hdev, instance, true);
2316 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2318 struct mgmt_rp_mesh_read_features *rp = data;
2320 if (rp->used_handles >= rp->max_handles)
2323 rp->handles[rp->used_handles++] = mesh_tx->handle;
2326 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2327 void *data, u16 len)
2329 struct mgmt_rp_mesh_read_features rp;
2331 if (!lmp_le_capable(hdev) ||
2332 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2333 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2334 MGMT_STATUS_NOT_SUPPORTED);
2336 memset(&rp, 0, sizeof(rp));
2337 rp.index = cpu_to_le16(hdev->id);
2338 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2339 rp.max_handles = MESH_HANDLES_MAX;
2344 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2346 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2347 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2349 hci_dev_unlock(hdev);
2353 static int send_cancel(struct hci_dev *hdev, void *data)
2355 struct mgmt_pending_cmd *cmd = data;
2356 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2357 struct mgmt_mesh_tx *mesh_tx;
2359 if (!cancel->handle) {
2361 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2364 mesh_send_complete(hdev, mesh_tx, false);
2367 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2369 if (mesh_tx && mesh_tx->sk == cmd->sk)
2370 mesh_send_complete(hdev, mesh_tx, false);
2373 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2375 mgmt_pending_free(cmd);
2380 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2381 void *data, u16 len)
2383 struct mgmt_pending_cmd *cmd;
2386 if (!lmp_le_capable(hdev) ||
2387 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2388 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2389 MGMT_STATUS_NOT_SUPPORTED);
2391 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2392 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2393 MGMT_STATUS_REJECTED);
2396 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2400 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2403 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2404 MGMT_STATUS_FAILED);
2407 mgmt_pending_free(cmd);
2410 hci_dev_unlock(hdev);
2414 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2416 struct mgmt_mesh_tx *mesh_tx;
2417 struct mgmt_cp_mesh_send *send = data;
2418 struct mgmt_rp_mesh_read_features rp;
2422 if (!lmp_le_capable(hdev) ||
2423 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2425 MGMT_STATUS_NOT_SUPPORTED);
2426 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2427 len <= MGMT_MESH_SEND_SIZE ||
2428 len > (MGMT_MESH_SEND_SIZE + 31))
2429 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2430 MGMT_STATUS_REJECTED);
2434 memset(&rp, 0, sizeof(rp));
2435 rp.max_handles = MESH_HANDLES_MAX;
2437 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2439 if (rp.max_handles <= rp.used_handles) {
2440 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2445 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2446 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2451 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2452 mesh_send_start_complete);
2455 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2456 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2457 MGMT_STATUS_FAILED);
2461 mgmt_mesh_remove(mesh_tx);
2464 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2466 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2467 &mesh_tx->handle, 1);
2471 hci_dev_unlock(hdev);
2475 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2477 struct mgmt_mode *cp = data;
2478 struct mgmt_pending_cmd *cmd;
2482 bt_dev_dbg(hdev, "sock %p", sk);
2484 if (!lmp_le_capable(hdev))
2485 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2486 MGMT_STATUS_NOT_SUPPORTED);
2488 if (cp->val != 0x00 && cp->val != 0x01)
2489 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2490 MGMT_STATUS_INVALID_PARAMS);
2492 /* Bluetooth single mode LE only controllers or dual-mode
2493 * controllers configured as LE only devices, do not allow
2494 * switching LE off. These have either LE enabled explicitly
2495 * or BR/EDR has been previously switched off.
2497 * When trying to enable an already enabled LE, then gracefully
2498 * send a positive response. Trying to disable it however will
2499 * result into rejection.
2501 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2502 if (cp->val == 0x01)
2503 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2505 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2506 MGMT_STATUS_REJECTED);
2512 enabled = lmp_host_le_capable(hdev);
2514 if (!hdev_is_powered(hdev) || val == enabled) {
2515 bool changed = false;
2517 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2518 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2522 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2523 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2527 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2532 err = new_settings(hdev, sk);
2537 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2538 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2539 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2544 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2548 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2552 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2553 MGMT_STATUS_FAILED);
2556 mgmt_pending_remove(cmd);
2560 hci_dev_unlock(hdev);
2564 /* This is a helper function to test for pending mgmt commands that can
2565 * cause CoD or EIR HCI commands. We can only allow one such pending
2566 * mgmt command at a time since otherwise we cannot easily track what
2567 * the current values are, will be, and based on that calculate if a new
2568 * HCI command needs to be sent and if yes with what value.
2570 static bool pending_eir_or_class(struct hci_dev *hdev)
2572 struct mgmt_pending_cmd *cmd;
2574 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2575 switch (cmd->opcode) {
2576 case MGMT_OP_ADD_UUID:
2577 case MGMT_OP_REMOVE_UUID:
2578 case MGMT_OP_SET_DEV_CLASS:
2579 case MGMT_OP_SET_POWERED:
2587 static const u8 bluetooth_base_uuid[] = {
2588 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2589 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2592 static u8 get_uuid_size(const u8 *uuid)
2596 if (memcmp(uuid, bluetooth_base_uuid, 12))
2599 val = get_unaligned_le32(&uuid[12]);
2606 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2608 struct mgmt_pending_cmd *cmd = data;
2610 bt_dev_dbg(hdev, "err %d", err);
2612 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2613 mgmt_status(err), hdev->dev_class, 3);
2615 mgmt_pending_free(cmd);
2618 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2622 err = hci_update_class_sync(hdev);
2626 return hci_update_eir_sync(hdev);
2629 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2631 struct mgmt_cp_add_uuid *cp = data;
2632 struct mgmt_pending_cmd *cmd;
2633 struct bt_uuid *uuid;
2636 bt_dev_dbg(hdev, "sock %p", sk);
2640 if (pending_eir_or_class(hdev)) {
2641 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2646 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2652 memcpy(uuid->uuid, cp->uuid, 16);
2653 uuid->svc_hint = cp->svc_hint;
2654 uuid->size = get_uuid_size(cp->uuid);
2656 list_add_tail(&uuid->list, &hdev->uuids);
2658 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2664 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2666 mgmt_pending_free(cmd);
2671 hci_dev_unlock(hdev);
2675 static bool enable_service_cache(struct hci_dev *hdev)
2677 if (!hdev_is_powered(hdev))
2680 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2681 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2689 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2693 err = hci_update_class_sync(hdev);
2697 return hci_update_eir_sync(hdev);
2700 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2703 struct mgmt_cp_remove_uuid *cp = data;
2704 struct mgmt_pending_cmd *cmd;
2705 struct bt_uuid *match, *tmp;
2706 static const u8 bt_uuid_any[] = {
2707 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2711 bt_dev_dbg(hdev, "sock %p", sk);
2715 if (pending_eir_or_class(hdev)) {
2716 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2721 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2722 hci_uuids_clear(hdev);
2724 if (enable_service_cache(hdev)) {
2725 err = mgmt_cmd_complete(sk, hdev->id,
2726 MGMT_OP_REMOVE_UUID,
2727 0, hdev->dev_class, 3);
2736 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2737 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2740 list_del(&match->list);
2746 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2747 MGMT_STATUS_INVALID_PARAMS);
2752 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2758 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2759 mgmt_class_complete);
2761 mgmt_pending_free(cmd);
2764 hci_dev_unlock(hdev);
2768 static int set_class_sync(struct hci_dev *hdev, void *data)
2772 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2773 cancel_delayed_work_sync(&hdev->service_cache);
2774 err = hci_update_eir_sync(hdev);
2780 return hci_update_class_sync(hdev);
2783 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2786 struct mgmt_cp_set_dev_class *cp = data;
2787 struct mgmt_pending_cmd *cmd;
2790 bt_dev_dbg(hdev, "sock %p", sk);
2792 if (!lmp_bredr_capable(hdev))
2793 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2794 MGMT_STATUS_NOT_SUPPORTED);
2798 if (pending_eir_or_class(hdev)) {
2799 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2804 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2806 MGMT_STATUS_INVALID_PARAMS);
2810 hdev->major_class = cp->major;
2811 hdev->minor_class = cp->minor;
2813 if (!hdev_is_powered(hdev)) {
2814 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2815 hdev->dev_class, 3);
2819 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2825 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2826 mgmt_class_complete);
2828 mgmt_pending_free(cmd);
2831 hci_dev_unlock(hdev);
2835 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2838 struct mgmt_cp_load_link_keys *cp = data;
2839 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2840 sizeof(struct mgmt_link_key_info));
2841 u16 key_count, expected_len;
2845 bt_dev_dbg(hdev, "sock %p", sk);
2847 if (!lmp_bredr_capable(hdev))
2848 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2849 MGMT_STATUS_NOT_SUPPORTED);
2851 key_count = __le16_to_cpu(cp->key_count);
2852 if (key_count > max_key_count) {
2853 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2855 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2856 MGMT_STATUS_INVALID_PARAMS);
2859 expected_len = struct_size(cp, keys, key_count);
2860 if (expected_len != len) {
2861 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2864 MGMT_STATUS_INVALID_PARAMS);
2867 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2868 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2869 MGMT_STATUS_INVALID_PARAMS);
2871 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2874 for (i = 0; i < key_count; i++) {
2875 struct mgmt_link_key_info *key = &cp->keys[i];
2877 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2878 return mgmt_cmd_status(sk, hdev->id,
2879 MGMT_OP_LOAD_LINK_KEYS,
2880 MGMT_STATUS_INVALID_PARAMS);
2885 hci_link_keys_clear(hdev);
2888 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2890 changed = hci_dev_test_and_clear_flag(hdev,
2891 HCI_KEEP_DEBUG_KEYS);
2894 new_settings(hdev, NULL);
2896 for (i = 0; i < key_count; i++) {
2897 struct mgmt_link_key_info *key = &cp->keys[i];
2899 if (hci_is_blocked_key(hdev,
2900 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2902 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2907 /* Always ignore debug keys and require a new pairing if
2908 * the user wants to use them.
2910 if (key->type == HCI_LK_DEBUG_COMBINATION)
2913 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2914 key->type, key->pin_len, NULL);
2917 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2919 hci_dev_unlock(hdev);
2924 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2925 u8 addr_type, struct sock *skip_sk)
2927 struct mgmt_ev_device_unpaired ev;
2929 bacpy(&ev.addr.bdaddr, bdaddr);
2930 ev.addr.type = addr_type;
2932 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2936 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2938 struct mgmt_pending_cmd *cmd = data;
2939 struct mgmt_cp_unpair_device *cp = cmd->param;
2942 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2944 cmd->cmd_complete(cmd, err);
2945 mgmt_pending_free(cmd);
2948 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2950 struct mgmt_pending_cmd *cmd = data;
2951 struct mgmt_cp_unpair_device *cp = cmd->param;
2952 struct hci_conn *conn;
2954 if (cp->addr.type == BDADDR_BREDR)
2955 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2958 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2959 le_addr_type(cp->addr.type));
2964 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2967 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2970 struct mgmt_cp_unpair_device *cp = data;
2971 struct mgmt_rp_unpair_device rp;
2972 struct hci_conn_params *params;
2973 struct mgmt_pending_cmd *cmd;
2974 struct hci_conn *conn;
2978 memset(&rp, 0, sizeof(rp));
2979 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2980 rp.addr.type = cp->addr.type;
2982 if (!bdaddr_type_is_valid(cp->addr.type))
2983 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2984 MGMT_STATUS_INVALID_PARAMS,
2987 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2988 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2989 MGMT_STATUS_INVALID_PARAMS,
2994 if (!hdev_is_powered(hdev)) {
2995 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2996 MGMT_STATUS_NOT_POWERED, &rp,
3001 if (cp->addr.type == BDADDR_BREDR) {
3002 /* If disconnection is requested, then look up the
3003 * connection. If the remote device is connected, it
3004 * will be later used to terminate the link.
3006 * Setting it to NULL explicitly will cause no
3007 * termination of the link.
3010 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3015 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3017 err = mgmt_cmd_complete(sk, hdev->id,
3018 MGMT_OP_UNPAIR_DEVICE,
3019 MGMT_STATUS_NOT_PAIRED, &rp,
3027 /* LE address type */
3028 addr_type = le_addr_type(cp->addr.type);
3030 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3031 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3033 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3034 MGMT_STATUS_NOT_PAIRED, &rp,
3039 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3041 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3046 /* Defer clearing up the connection parameters until closing to
3047 * give a chance of keeping them if a repairing happens.
3049 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3051 /* Disable auto-connection parameters if present */
3052 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3054 if (params->explicit_connect)
3055 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3057 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3060 /* If disconnection is not requested, then clear the connection
3061 * variable so that the link is not terminated.
3063 if (!cp->disconnect)
3067 /* If the connection variable is set, then termination of the
3068 * link is requested.
3071 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3073 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3077 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3084 cmd->cmd_complete = addr_cmd_complete;
3086 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3087 unpair_device_complete);
3089 mgmt_pending_free(cmd);
3092 hci_dev_unlock(hdev);
3096 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3099 struct mgmt_cp_disconnect *cp = data;
3100 struct mgmt_rp_disconnect rp;
3101 struct mgmt_pending_cmd *cmd;
3102 struct hci_conn *conn;
3105 bt_dev_dbg(hdev, "sock %p", sk);
3107 memset(&rp, 0, sizeof(rp));
3108 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3109 rp.addr.type = cp->addr.type;
3111 if (!bdaddr_type_is_valid(cp->addr.type))
3112 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3113 MGMT_STATUS_INVALID_PARAMS,
3118 if (!test_bit(HCI_UP, &hdev->flags)) {
3119 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3120 MGMT_STATUS_NOT_POWERED, &rp,
3125 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3126 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3127 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3131 if (cp->addr.type == BDADDR_BREDR)
3132 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3135 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3136 le_addr_type(cp->addr.type));
3138 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3139 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3140 MGMT_STATUS_NOT_CONNECTED, &rp,
3145 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3151 cmd->cmd_complete = generic_cmd_complete;
3153 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3155 mgmt_pending_remove(cmd);
3158 hci_dev_unlock(hdev);
3162 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3164 switch (link_type) {
3166 switch (addr_type) {
3167 case ADDR_LE_DEV_PUBLIC:
3168 return BDADDR_LE_PUBLIC;
3171 /* Fallback to LE Random address type */
3172 return BDADDR_LE_RANDOM;
3176 /* Fallback to BR/EDR type */
3177 return BDADDR_BREDR;
3181 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3184 struct mgmt_rp_get_connections *rp;
3189 bt_dev_dbg(hdev, "sock %p", sk);
3193 if (!hdev_is_powered(hdev)) {
3194 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3195 MGMT_STATUS_NOT_POWERED);
3200 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3201 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3205 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3212 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3213 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3215 bacpy(&rp->addr[i].bdaddr, &c->dst);
3216 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3217 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3222 rp->conn_count = cpu_to_le16(i);
3224 /* Recalculate length in case of filtered SCO connections, etc */
3225 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3226 struct_size(rp, addr, i));
3231 hci_dev_unlock(hdev);
3235 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3236 struct mgmt_cp_pin_code_neg_reply *cp)
3238 struct mgmt_pending_cmd *cmd;
3241 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3246 cmd->cmd_complete = addr_cmd_complete;
3248 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3249 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3251 mgmt_pending_remove(cmd);
3256 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3259 struct hci_conn *conn;
3260 struct mgmt_cp_pin_code_reply *cp = data;
3261 struct hci_cp_pin_code_reply reply;
3262 struct mgmt_pending_cmd *cmd;
3265 bt_dev_dbg(hdev, "sock %p", sk);
3269 if (!hdev_is_powered(hdev)) {
3270 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3271 MGMT_STATUS_NOT_POWERED);
3275 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3277 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3278 MGMT_STATUS_NOT_CONNECTED);
3282 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3283 struct mgmt_cp_pin_code_neg_reply ncp;
3285 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3287 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3289 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3291 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3292 MGMT_STATUS_INVALID_PARAMS);
3297 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3303 cmd->cmd_complete = addr_cmd_complete;
3305 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3306 reply.pin_len = cp->pin_len;
3307 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3309 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3311 mgmt_pending_remove(cmd);
3314 hci_dev_unlock(hdev);
3318 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3321 struct mgmt_cp_set_io_capability *cp = data;
3323 bt_dev_dbg(hdev, "sock %p", sk);
3325 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3326 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3327 MGMT_STATUS_INVALID_PARAMS);
3331 hdev->io_capability = cp->io_capability;
3333 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3335 hci_dev_unlock(hdev);
3337 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3341 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3343 struct hci_dev *hdev = conn->hdev;
3344 struct mgmt_pending_cmd *cmd;
3346 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3347 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3350 if (cmd->user_data != conn)
3359 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3361 struct mgmt_rp_pair_device rp;
3362 struct hci_conn *conn = cmd->user_data;
3365 bacpy(&rp.addr.bdaddr, &conn->dst);
3366 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3368 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3369 status, &rp, sizeof(rp));
3371 /* So we don't get further callbacks for this connection */
3372 conn->connect_cfm_cb = NULL;
3373 conn->security_cfm_cb = NULL;
3374 conn->disconn_cfm_cb = NULL;
3376 hci_conn_drop(conn);
3378 /* The device is paired so there is no need to remove
3379 * its connection parameters anymore.
3381 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3388 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3390 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3391 struct mgmt_pending_cmd *cmd;
3393 cmd = find_pairing(conn);
3395 cmd->cmd_complete(cmd, status);
3396 mgmt_pending_remove(cmd);
3400 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3402 struct mgmt_pending_cmd *cmd;
3404 BT_DBG("status %u", status);
3406 cmd = find_pairing(conn);
3408 BT_DBG("Unable to find a pending command");
3412 cmd->cmd_complete(cmd, mgmt_status(status));
3413 mgmt_pending_remove(cmd);
3416 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3418 struct mgmt_pending_cmd *cmd;
3420 BT_DBG("status %u", status);
3425 cmd = find_pairing(conn);
3427 BT_DBG("Unable to find a pending command");
3431 cmd->cmd_complete(cmd, mgmt_status(status));
3432 mgmt_pending_remove(cmd);
3435 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3438 struct mgmt_cp_pair_device *cp = data;
3439 struct mgmt_rp_pair_device rp;
3440 struct mgmt_pending_cmd *cmd;
3441 u8 sec_level, auth_type;
3442 struct hci_conn *conn;
3445 bt_dev_dbg(hdev, "sock %p", sk);
3447 memset(&rp, 0, sizeof(rp));
3448 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3449 rp.addr.type = cp->addr.type;
3451 if (!bdaddr_type_is_valid(cp->addr.type))
3452 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3453 MGMT_STATUS_INVALID_PARAMS,
3456 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3457 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3458 MGMT_STATUS_INVALID_PARAMS,
3463 if (!hdev_is_powered(hdev)) {
3464 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3465 MGMT_STATUS_NOT_POWERED, &rp,
3470 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3471 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3472 MGMT_STATUS_ALREADY_PAIRED, &rp,
3477 sec_level = BT_SECURITY_MEDIUM;
3478 auth_type = HCI_AT_DEDICATED_BONDING;
3480 if (cp->addr.type == BDADDR_BREDR) {
3481 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3482 auth_type, CONN_REASON_PAIR_DEVICE);
3484 u8 addr_type = le_addr_type(cp->addr.type);
3485 struct hci_conn_params *p;
3487 /* When pairing a new device, it is expected to remember
3488 * this device for future connections. Adding the connection
3489 * parameter information ahead of time allows tracking
3490 * of the peripheral preferred values and will speed up any
3491 * further connection establishment.
3493 * If connection parameters already exist, then they
3494 * will be kept and this function does nothing.
3496 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3498 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3499 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3501 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3502 sec_level, HCI_LE_CONN_TIMEOUT,
3503 CONN_REASON_PAIR_DEVICE);
3509 if (PTR_ERR(conn) == -EBUSY)
3510 status = MGMT_STATUS_BUSY;
3511 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3512 status = MGMT_STATUS_NOT_SUPPORTED;
3513 else if (PTR_ERR(conn) == -ECONNREFUSED)
3514 status = MGMT_STATUS_REJECTED;
3516 status = MGMT_STATUS_CONNECT_FAILED;
3518 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3519 status, &rp, sizeof(rp));
3523 if (conn->connect_cfm_cb) {
3524 hci_conn_drop(conn);
3525 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3526 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3530 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3533 hci_conn_drop(conn);
3537 cmd->cmd_complete = pairing_complete;
3539 /* For LE, just connecting isn't a proof that the pairing finished */
3540 if (cp->addr.type == BDADDR_BREDR) {
3541 conn->connect_cfm_cb = pairing_complete_cb;
3542 conn->security_cfm_cb = pairing_complete_cb;
3543 conn->disconn_cfm_cb = pairing_complete_cb;
3545 conn->connect_cfm_cb = le_pairing_complete_cb;
3546 conn->security_cfm_cb = le_pairing_complete_cb;
3547 conn->disconn_cfm_cb = le_pairing_complete_cb;
3550 conn->io_capability = cp->io_cap;
3551 cmd->user_data = hci_conn_get(conn);
3553 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3554 hci_conn_security(conn, sec_level, auth_type, true)) {
3555 cmd->cmd_complete(cmd, 0);
3556 mgmt_pending_remove(cmd);
3562 hci_dev_unlock(hdev);
3566 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3568 struct hci_conn *conn;
3569 u16 handle = PTR_ERR(data);
3571 conn = hci_conn_hash_lookup_handle(hdev, handle);
3575 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3578 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3581 struct mgmt_addr_info *addr = data;
3582 struct mgmt_pending_cmd *cmd;
3583 struct hci_conn *conn;
3586 bt_dev_dbg(hdev, "sock %p", sk);
3590 if (!hdev_is_powered(hdev)) {
3591 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3592 MGMT_STATUS_NOT_POWERED);
3596 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3599 MGMT_STATUS_INVALID_PARAMS);
3603 conn = cmd->user_data;
3605 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3606 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3607 MGMT_STATUS_INVALID_PARAMS);
3611 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3612 mgmt_pending_remove(cmd);
3614 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3615 addr, sizeof(*addr));
3617 /* Since user doesn't want to proceed with the connection, abort any
3618 * ongoing pairing and then terminate the link if it was created
3619 * because of the pair device action.
3621 if (addr->type == BDADDR_BREDR)
3622 hci_remove_link_key(hdev, &addr->bdaddr);
3624 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3625 le_addr_type(addr->type));
3627 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3628 hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3632 hci_dev_unlock(hdev);
3636 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3637 struct mgmt_addr_info *addr, u16 mgmt_op,
3638 u16 hci_op, __le32 passkey)
3640 struct mgmt_pending_cmd *cmd;
3641 struct hci_conn *conn;
3646 if (!hdev_is_powered(hdev)) {
3647 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3648 MGMT_STATUS_NOT_POWERED, addr,
3653 if (addr->type == BDADDR_BREDR)
3654 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3656 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3657 le_addr_type(addr->type));
3660 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3661 MGMT_STATUS_NOT_CONNECTED, addr,
3666 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3667 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3669 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3670 MGMT_STATUS_SUCCESS, addr,
3673 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3674 MGMT_STATUS_FAILED, addr,
3680 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3686 cmd->cmd_complete = addr_cmd_complete;
3688 /* Continue with pairing via HCI */
3689 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3690 struct hci_cp_user_passkey_reply cp;
3692 bacpy(&cp.bdaddr, &addr->bdaddr);
3693 cp.passkey = passkey;
3694 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3696 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3700 mgmt_pending_remove(cmd);
3703 hci_dev_unlock(hdev);
3707 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3708 void *data, u16 len)
3710 struct mgmt_cp_pin_code_neg_reply *cp = data;
3712 bt_dev_dbg(hdev, "sock %p", sk);
3714 return user_pairing_resp(sk, hdev, &cp->addr,
3715 MGMT_OP_PIN_CODE_NEG_REPLY,
3716 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3719 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3722 struct mgmt_cp_user_confirm_reply *cp = data;
3724 bt_dev_dbg(hdev, "sock %p", sk);
3726 if (len != sizeof(*cp))
3727 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3728 MGMT_STATUS_INVALID_PARAMS);
3730 return user_pairing_resp(sk, hdev, &cp->addr,
3731 MGMT_OP_USER_CONFIRM_REPLY,
3732 HCI_OP_USER_CONFIRM_REPLY, 0);
3735 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3736 void *data, u16 len)
3738 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3740 bt_dev_dbg(hdev, "sock %p", sk);
3742 return user_pairing_resp(sk, hdev, &cp->addr,
3743 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3744 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3747 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3750 struct mgmt_cp_user_passkey_reply *cp = data;
3752 bt_dev_dbg(hdev, "sock %p", sk);
3754 return user_pairing_resp(sk, hdev, &cp->addr,
3755 MGMT_OP_USER_PASSKEY_REPLY,
3756 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3759 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3760 void *data, u16 len)
3762 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3764 bt_dev_dbg(hdev, "sock %p", sk);
3766 return user_pairing_resp(sk, hdev, &cp->addr,
3767 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3768 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3771 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3773 struct adv_info *adv_instance;
3775 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3779 /* stop if current instance doesn't need to be changed */
3780 if (!(adv_instance->flags & flags))
3783 cancel_adv_timeout(hdev);
3785 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3789 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3794 static int name_changed_sync(struct hci_dev *hdev, void *data)
3796 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3799 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3801 struct mgmt_pending_cmd *cmd = data;
3802 struct mgmt_cp_set_local_name *cp = cmd->param;
3803 u8 status = mgmt_status(err);
3805 bt_dev_dbg(hdev, "err %d", err);
3807 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3811 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3814 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3817 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3818 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3821 mgmt_pending_remove(cmd);
3824 static int set_name_sync(struct hci_dev *hdev, void *data)
3826 if (lmp_bredr_capable(hdev)) {
3827 hci_update_name_sync(hdev);
3828 hci_update_eir_sync(hdev);
3831 /* The name is stored in the scan response data and so
3832 * no need to update the advertising data here.
3834 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3835 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3840 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3843 struct mgmt_cp_set_local_name *cp = data;
3844 struct mgmt_pending_cmd *cmd;
3847 bt_dev_dbg(hdev, "sock %p", sk);
3851 /* If the old values are the same as the new ones just return a
3852 * direct command complete event.
3854 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3855 !memcmp(hdev->short_name, cp->short_name,
3856 sizeof(hdev->short_name))) {
3857 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3862 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3864 if (!hdev_is_powered(hdev)) {
3865 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3867 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3872 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3873 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3874 ext_info_changed(hdev, sk);
3879 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3883 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3887 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3888 MGMT_STATUS_FAILED);
3891 mgmt_pending_remove(cmd);
3896 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3899 hci_dev_unlock(hdev);
3903 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3905 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3908 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3911 struct mgmt_cp_set_appearance *cp = data;
3915 bt_dev_dbg(hdev, "sock %p", sk);
3917 if (!lmp_le_capable(hdev))
3918 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3919 MGMT_STATUS_NOT_SUPPORTED);
3921 appearance = le16_to_cpu(cp->appearance);
3925 if (hdev->appearance != appearance) {
3926 hdev->appearance = appearance;
3928 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3929 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3932 ext_info_changed(hdev, sk);
3935 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3938 hci_dev_unlock(hdev);
3943 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3944 void *data, u16 len)
3946 struct mgmt_rp_get_phy_configuration rp;
3948 bt_dev_dbg(hdev, "sock %p", sk);
3952 memset(&rp, 0, sizeof(rp));
3954 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3955 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3956 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3958 hci_dev_unlock(hdev);
3960 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3964 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3966 struct mgmt_ev_phy_configuration_changed ev;
3968 memset(&ev, 0, sizeof(ev));
3970 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3972 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3976 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3978 struct mgmt_pending_cmd *cmd = data;
3979 struct sk_buff *skb = cmd->skb;
3980 u8 status = mgmt_status(err);
3982 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3987 status = MGMT_STATUS_FAILED;
3988 else if (IS_ERR(skb))
3989 status = mgmt_status(PTR_ERR(skb));
3991 status = mgmt_status(skb->data[0]);
3994 bt_dev_dbg(hdev, "status %d", status);
3997 mgmt_cmd_status(cmd->sk, hdev->id,
3998 MGMT_OP_SET_PHY_CONFIGURATION, status);
4000 mgmt_cmd_complete(cmd->sk, hdev->id,
4001 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4004 mgmt_phy_configuration_changed(hdev, cmd->sk);
4007 if (skb && !IS_ERR(skb))
4010 mgmt_pending_remove(cmd);
4013 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4015 struct mgmt_pending_cmd *cmd = data;
4016 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4017 struct hci_cp_le_set_default_phy cp_phy;
4018 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4020 memset(&cp_phy, 0, sizeof(cp_phy));
4022 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4023 cp_phy.all_phys |= 0x01;
4025 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4026 cp_phy.all_phys |= 0x02;
4028 if (selected_phys & MGMT_PHY_LE_1M_TX)
4029 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4031 if (selected_phys & MGMT_PHY_LE_2M_TX)
4032 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4034 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4035 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4037 if (selected_phys & MGMT_PHY_LE_1M_RX)
4038 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4040 if (selected_phys & MGMT_PHY_LE_2M_RX)
4041 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4043 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4044 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4046 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4047 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4052 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4053 void *data, u16 len)
4055 struct mgmt_cp_set_phy_configuration *cp = data;
4056 struct mgmt_pending_cmd *cmd;
4057 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4058 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4059 bool changed = false;
4062 bt_dev_dbg(hdev, "sock %p", sk);
4064 configurable_phys = get_configurable_phys(hdev);
4065 supported_phys = get_supported_phys(hdev);
4066 selected_phys = __le32_to_cpu(cp->selected_phys);
4068 if (selected_phys & ~supported_phys)
4069 return mgmt_cmd_status(sk, hdev->id,
4070 MGMT_OP_SET_PHY_CONFIGURATION,
4071 MGMT_STATUS_INVALID_PARAMS);
4073 unconfigure_phys = supported_phys & ~configurable_phys;
4075 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4076 return mgmt_cmd_status(sk, hdev->id,
4077 MGMT_OP_SET_PHY_CONFIGURATION,
4078 MGMT_STATUS_INVALID_PARAMS);
4080 if (selected_phys == get_selected_phys(hdev))
4081 return mgmt_cmd_complete(sk, hdev->id,
4082 MGMT_OP_SET_PHY_CONFIGURATION,
4087 if (!hdev_is_powered(hdev)) {
4088 err = mgmt_cmd_status(sk, hdev->id,
4089 MGMT_OP_SET_PHY_CONFIGURATION,
4090 MGMT_STATUS_REJECTED);
4094 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4095 err = mgmt_cmd_status(sk, hdev->id,
4096 MGMT_OP_SET_PHY_CONFIGURATION,
4101 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4102 pkt_type |= (HCI_DH3 | HCI_DM3);
4104 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4106 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4107 pkt_type |= (HCI_DH5 | HCI_DM5);
4109 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4111 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4112 pkt_type &= ~HCI_2DH1;
4114 pkt_type |= HCI_2DH1;
4116 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4117 pkt_type &= ~HCI_2DH3;
4119 pkt_type |= HCI_2DH3;
4121 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4122 pkt_type &= ~HCI_2DH5;
4124 pkt_type |= HCI_2DH5;
4126 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4127 pkt_type &= ~HCI_3DH1;
4129 pkt_type |= HCI_3DH1;
4131 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4132 pkt_type &= ~HCI_3DH3;
4134 pkt_type |= HCI_3DH3;
4136 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4137 pkt_type &= ~HCI_3DH5;
4139 pkt_type |= HCI_3DH5;
4141 if (pkt_type != hdev->pkt_type) {
4142 hdev->pkt_type = pkt_type;
4146 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4147 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4149 mgmt_phy_configuration_changed(hdev, sk);
4151 err = mgmt_cmd_complete(sk, hdev->id,
4152 MGMT_OP_SET_PHY_CONFIGURATION,
4158 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4163 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4164 set_default_phy_complete);
4167 err = mgmt_cmd_status(sk, hdev->id,
4168 MGMT_OP_SET_PHY_CONFIGURATION,
4169 MGMT_STATUS_FAILED);
4172 mgmt_pending_remove(cmd);
4176 hci_dev_unlock(hdev);
4181 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4184 int err = MGMT_STATUS_SUCCESS;
4185 struct mgmt_cp_set_blocked_keys *keys = data;
4186 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4187 sizeof(struct mgmt_blocked_key_info));
4188 u16 key_count, expected_len;
4191 bt_dev_dbg(hdev, "sock %p", sk);
4193 key_count = __le16_to_cpu(keys->key_count);
4194 if (key_count > max_key_count) {
4195 bt_dev_err(hdev, "too big key_count value %u", key_count);
4196 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4197 MGMT_STATUS_INVALID_PARAMS);
4200 expected_len = struct_size(keys, keys, key_count);
4201 if (expected_len != len) {
4202 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4204 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4205 MGMT_STATUS_INVALID_PARAMS);
4210 hci_blocked_keys_clear(hdev);
4212 for (i = 0; i < key_count; ++i) {
4213 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4216 err = MGMT_STATUS_NO_RESOURCES;
4220 b->type = keys->keys[i].type;
4221 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4222 list_add_rcu(&b->list, &hdev->blocked_keys);
4224 hci_dev_unlock(hdev);
4226 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4230 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4231 void *data, u16 len)
4233 struct mgmt_mode *cp = data;
4235 bool changed = false;
4237 bt_dev_dbg(hdev, "sock %p", sk);
4239 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4240 return mgmt_cmd_status(sk, hdev->id,
4241 MGMT_OP_SET_WIDEBAND_SPEECH,
4242 MGMT_STATUS_NOT_SUPPORTED);
4244 if (cp->val != 0x00 && cp->val != 0x01)
4245 return mgmt_cmd_status(sk, hdev->id,
4246 MGMT_OP_SET_WIDEBAND_SPEECH,
4247 MGMT_STATUS_INVALID_PARAMS);
4251 if (hdev_is_powered(hdev) &&
4252 !!cp->val != hci_dev_test_flag(hdev,
4253 HCI_WIDEBAND_SPEECH_ENABLED)) {
4254 err = mgmt_cmd_status(sk, hdev->id,
4255 MGMT_OP_SET_WIDEBAND_SPEECH,
4256 MGMT_STATUS_REJECTED);
4261 changed = !hci_dev_test_and_set_flag(hdev,
4262 HCI_WIDEBAND_SPEECH_ENABLED);
4264 changed = hci_dev_test_and_clear_flag(hdev,
4265 HCI_WIDEBAND_SPEECH_ENABLED);
4267 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4272 err = new_settings(hdev, sk);
4275 hci_dev_unlock(hdev);
4279 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4280 void *data, u16 data_len)
4283 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4286 u8 tx_power_range[2];
4288 bt_dev_dbg(hdev, "sock %p", sk);
4290 memset(&buf, 0, sizeof(buf));
4294 /* When the Read Simple Pairing Options command is supported, then
4295 * the remote public key validation is supported.
4297 * Alternatively, when Microsoft extensions are available, they can
4298 * indicate support for public key validation as well.
4300 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4301 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4303 flags |= 0x02; /* Remote public key validation (LE) */
4305 /* When the Read Encryption Key Size command is supported, then the
4306 * encryption key size is enforced.
4308 if (hdev->commands[20] & 0x10)
4309 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4311 flags |= 0x08; /* Encryption key size enforcement (LE) */
4313 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4316 /* When the Read Simple Pairing Options command is supported, then
4317 * also max encryption key size information is provided.
4319 if (hdev->commands[41] & 0x08)
4320 cap_len = eir_append_le16(rp->cap, cap_len,
4321 MGMT_CAP_MAX_ENC_KEY_SIZE,
4322 hdev->max_enc_key_size);
4324 cap_len = eir_append_le16(rp->cap, cap_len,
4325 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4326 SMP_MAX_ENC_KEY_SIZE);
4328 /* Append the min/max LE tx power parameters if we were able to fetch
4329 * it from the controller
4331 if (hdev->commands[38] & 0x80) {
4332 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4333 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4334 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4338 rp->cap_len = cpu_to_le16(cap_len);
4340 hci_dev_unlock(hdev);
4342 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4343 rp, sizeof(*rp) + cap_len);
4346 #ifdef CONFIG_BT_FEATURE_DEBUG
4347 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4348 static const u8 debug_uuid[16] = {
4349 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4350 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4354 /* 330859bc-7506-492d-9370-9a6f0614037f */
4355 static const u8 quality_report_uuid[16] = {
4356 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4357 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4360 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4361 static const u8 offload_codecs_uuid[16] = {
4362 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4363 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4366 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4367 static const u8 le_simultaneous_roles_uuid[16] = {
4368 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4369 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4372 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4373 static const u8 rpa_resolution_uuid[16] = {
4374 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4375 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4378 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4379 static const u8 iso_socket_uuid[16] = {
4380 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4381 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4384 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4385 static const u8 mgmt_mesh_uuid[16] = {
4386 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4387 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4390 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4391 void *data, u16 data_len)
4393 struct mgmt_rp_read_exp_features_info *rp;
4399 bt_dev_dbg(hdev, "sock %p", sk);
4401 /* Enough space for 7 features */
4402 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4403 rp = kzalloc(len, GFP_KERNEL);
4407 #ifdef CONFIG_BT_FEATURE_DEBUG
4409 flags = bt_dbg_get() ? BIT(0) : 0;
4411 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4412 rp->features[idx].flags = cpu_to_le32(flags);
4417 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4418 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4423 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4424 rp->features[idx].flags = cpu_to_le32(flags);
4428 if (hdev && ll_privacy_capable(hdev)) {
4429 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4430 flags = BIT(0) | BIT(1);
4434 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4435 rp->features[idx].flags = cpu_to_le32(flags);
4439 if (hdev && (aosp_has_quality_report(hdev) ||
4440 hdev->set_quality_report)) {
4441 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4446 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4447 rp->features[idx].flags = cpu_to_le32(flags);
4451 if (hdev && hdev->get_data_path_id) {
4452 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4457 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4458 rp->features[idx].flags = cpu_to_le32(flags);
4462 if (IS_ENABLED(CONFIG_BT_LE)) {
4463 flags = iso_enabled() ? BIT(0) : 0;
4464 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4465 rp->features[idx].flags = cpu_to_le32(flags);
4469 if (hdev && lmp_le_capable(hdev)) {
4470 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4475 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4476 rp->features[idx].flags = cpu_to_le32(flags);
4480 rp->feature_count = cpu_to_le16(idx);
4482 /* After reading the experimental features information, enable
4483 * the events to update client on any future change.
4485 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4487 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4488 MGMT_OP_READ_EXP_FEATURES_INFO,
4489 0, rp, sizeof(*rp) + (20 * idx));
4495 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4498 struct mgmt_ev_exp_feature_changed ev;
4500 memset(&ev, 0, sizeof(ev));
4501 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4502 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4504 // Do we need to be atomic with the conn_flags?
4505 if (enabled && privacy_mode_capable(hdev))
4506 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4508 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4510 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4512 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4516 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4517 bool enabled, struct sock *skip)
4519 struct mgmt_ev_exp_feature_changed ev;
4521 memset(&ev, 0, sizeof(ev));
4522 memcpy(ev.uuid, uuid, 16);
4523 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4525 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4527 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4530 #define EXP_FEAT(_uuid, _set_func) \
4533 .set_func = _set_func, \
4536 /* The zero key uuid is special. Multiple exp features are set through it. */
4537 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4538 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4540 struct mgmt_rp_set_exp_feature rp;
4542 memset(rp.uuid, 0, 16);
4543 rp.flags = cpu_to_le32(0);
4545 #ifdef CONFIG_BT_FEATURE_DEBUG
4547 bool changed = bt_dbg_get();
4552 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4556 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4559 changed = hci_dev_test_and_clear_flag(hdev,
4560 HCI_ENABLE_LL_PRIVACY);
4562 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4566 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4568 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4569 MGMT_OP_SET_EXP_FEATURE, 0,
4573 #ifdef CONFIG_BT_FEATURE_DEBUG
4574 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4575 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4577 struct mgmt_rp_set_exp_feature rp;
4582 /* Command requires to use the non-controller index */
4584 return mgmt_cmd_status(sk, hdev->id,
4585 MGMT_OP_SET_EXP_FEATURE,
4586 MGMT_STATUS_INVALID_INDEX);
4588 /* Parameters are limited to a single octet */
4589 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4590 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4591 MGMT_OP_SET_EXP_FEATURE,
4592 MGMT_STATUS_INVALID_PARAMS);
4594 /* Only boolean on/off is supported */
4595 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4596 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4597 MGMT_OP_SET_EXP_FEATURE,
4598 MGMT_STATUS_INVALID_PARAMS);
4600 val = !!cp->param[0];
4601 changed = val ? !bt_dbg_get() : bt_dbg_get();
4604 memcpy(rp.uuid, debug_uuid, 16);
4605 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4607 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4609 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4610 MGMT_OP_SET_EXP_FEATURE, 0,
4614 exp_feature_changed(hdev, debug_uuid, val, sk);
4620 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4621 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4623 struct mgmt_rp_set_exp_feature rp;
4627 /* Command requires to use the controller index */
4629 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4630 MGMT_OP_SET_EXP_FEATURE,
4631 MGMT_STATUS_INVALID_INDEX);
4633 /* Changes can only be made when controller is powered down */
4634 if (hdev_is_powered(hdev))
4635 return mgmt_cmd_status(sk, hdev->id,
4636 MGMT_OP_SET_EXP_FEATURE,
4637 MGMT_STATUS_REJECTED);
4639 /* Parameters are limited to a single octet */
4640 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4641 return mgmt_cmd_status(sk, hdev->id,
4642 MGMT_OP_SET_EXP_FEATURE,
4643 MGMT_STATUS_INVALID_PARAMS);
4645 /* Only boolean on/off is supported */
4646 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4647 return mgmt_cmd_status(sk, hdev->id,
4648 MGMT_OP_SET_EXP_FEATURE,
4649 MGMT_STATUS_INVALID_PARAMS);
4651 val = !!cp->param[0];
4654 changed = !hci_dev_test_and_set_flag(hdev,
4655 HCI_MESH_EXPERIMENTAL);
4657 hci_dev_clear_flag(hdev, HCI_MESH);
4658 changed = hci_dev_test_and_clear_flag(hdev,
4659 HCI_MESH_EXPERIMENTAL);
4662 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4663 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4665 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4667 err = mgmt_cmd_complete(sk, hdev->id,
4668 MGMT_OP_SET_EXP_FEATURE, 0,
4672 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4677 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4678 struct mgmt_cp_set_exp_feature *cp,
4681 struct mgmt_rp_set_exp_feature rp;
4686 /* Command requires to use the controller index */
4688 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4689 MGMT_OP_SET_EXP_FEATURE,
4690 MGMT_STATUS_INVALID_INDEX);
4692 /* Changes can only be made when controller is powered down */
4693 if (hdev_is_powered(hdev))
4694 return mgmt_cmd_status(sk, hdev->id,
4695 MGMT_OP_SET_EXP_FEATURE,
4696 MGMT_STATUS_REJECTED);
4698 /* Parameters are limited to a single octet */
4699 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4700 return mgmt_cmd_status(sk, hdev->id,
4701 MGMT_OP_SET_EXP_FEATURE,
4702 MGMT_STATUS_INVALID_PARAMS);
4704 /* Only boolean on/off is supported */
4705 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4706 return mgmt_cmd_status(sk, hdev->id,
4707 MGMT_OP_SET_EXP_FEATURE,
4708 MGMT_STATUS_INVALID_PARAMS);
4710 val = !!cp->param[0];
4713 changed = !hci_dev_test_and_set_flag(hdev,
4714 HCI_ENABLE_LL_PRIVACY);
4715 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4717 /* Enable LL privacy + supported settings changed */
4718 flags = BIT(0) | BIT(1);
4720 changed = hci_dev_test_and_clear_flag(hdev,
4721 HCI_ENABLE_LL_PRIVACY);
4723 /* Disable LL privacy + supported settings changed */
4727 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4728 rp.flags = cpu_to_le32(flags);
4730 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4732 err = mgmt_cmd_complete(sk, hdev->id,
4733 MGMT_OP_SET_EXP_FEATURE, 0,
4737 exp_ll_privacy_feature_changed(val, hdev, sk);
4742 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4743 struct mgmt_cp_set_exp_feature *cp,
4746 struct mgmt_rp_set_exp_feature rp;
4750 /* Command requires to use a valid controller index */
4752 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4753 MGMT_OP_SET_EXP_FEATURE,
4754 MGMT_STATUS_INVALID_INDEX);
4756 /* Parameters are limited to a single octet */
4757 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4758 return mgmt_cmd_status(sk, hdev->id,
4759 MGMT_OP_SET_EXP_FEATURE,
4760 MGMT_STATUS_INVALID_PARAMS);
4762 /* Only boolean on/off is supported */
4763 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4764 return mgmt_cmd_status(sk, hdev->id,
4765 MGMT_OP_SET_EXP_FEATURE,
4766 MGMT_STATUS_INVALID_PARAMS);
4768 hci_req_sync_lock(hdev);
4770 val = !!cp->param[0];
4771 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4773 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4774 err = mgmt_cmd_status(sk, hdev->id,
4775 MGMT_OP_SET_EXP_FEATURE,
4776 MGMT_STATUS_NOT_SUPPORTED);
4777 goto unlock_quality_report;
4781 if (hdev->set_quality_report)
4782 err = hdev->set_quality_report(hdev, val);
4784 err = aosp_set_quality_report(hdev, val);
4787 err = mgmt_cmd_status(sk, hdev->id,
4788 MGMT_OP_SET_EXP_FEATURE,
4789 MGMT_STATUS_FAILED);
4790 goto unlock_quality_report;
4794 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4796 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4799 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4801 memcpy(rp.uuid, quality_report_uuid, 16);
4802 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4803 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4805 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4809 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4811 unlock_quality_report:
4812 hci_req_sync_unlock(hdev);
4816 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4817 struct mgmt_cp_set_exp_feature *cp,
4822 struct mgmt_rp_set_exp_feature rp;
4824 /* Command requires to use a valid controller index */
4826 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4827 MGMT_OP_SET_EXP_FEATURE,
4828 MGMT_STATUS_INVALID_INDEX);
4830 /* Parameters are limited to a single octet */
4831 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4832 return mgmt_cmd_status(sk, hdev->id,
4833 MGMT_OP_SET_EXP_FEATURE,
4834 MGMT_STATUS_INVALID_PARAMS);
4836 /* Only boolean on/off is supported */
4837 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4838 return mgmt_cmd_status(sk, hdev->id,
4839 MGMT_OP_SET_EXP_FEATURE,
4840 MGMT_STATUS_INVALID_PARAMS);
4842 val = !!cp->param[0];
4843 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4845 if (!hdev->get_data_path_id) {
4846 return mgmt_cmd_status(sk, hdev->id,
4847 MGMT_OP_SET_EXP_FEATURE,
4848 MGMT_STATUS_NOT_SUPPORTED);
4853 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4855 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4858 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4861 memcpy(rp.uuid, offload_codecs_uuid, 16);
4862 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4863 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4864 err = mgmt_cmd_complete(sk, hdev->id,
4865 MGMT_OP_SET_EXP_FEATURE, 0,
4869 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4874 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4875 struct mgmt_cp_set_exp_feature *cp,
4880 struct mgmt_rp_set_exp_feature rp;
4882 /* Command requires to use a valid controller index */
4884 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4885 MGMT_OP_SET_EXP_FEATURE,
4886 MGMT_STATUS_INVALID_INDEX);
4888 /* Parameters are limited to a single octet */
4889 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4890 return mgmt_cmd_status(sk, hdev->id,
4891 MGMT_OP_SET_EXP_FEATURE,
4892 MGMT_STATUS_INVALID_PARAMS);
4894 /* Only boolean on/off is supported */
4895 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4896 return mgmt_cmd_status(sk, hdev->id,
4897 MGMT_OP_SET_EXP_FEATURE,
4898 MGMT_STATUS_INVALID_PARAMS);
4900 val = !!cp->param[0];
4901 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4903 if (!hci_dev_le_state_simultaneous(hdev)) {
4904 return mgmt_cmd_status(sk, hdev->id,
4905 MGMT_OP_SET_EXP_FEATURE,
4906 MGMT_STATUS_NOT_SUPPORTED);
4911 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4913 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4916 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4919 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4920 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4921 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4922 err = mgmt_cmd_complete(sk, hdev->id,
4923 MGMT_OP_SET_EXP_FEATURE, 0,
4927 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4933 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4934 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4936 struct mgmt_rp_set_exp_feature rp;
4937 bool val, changed = false;
4940 /* Command requires to use the non-controller index */
4942 return mgmt_cmd_status(sk, hdev->id,
4943 MGMT_OP_SET_EXP_FEATURE,
4944 MGMT_STATUS_INVALID_INDEX);
4946 /* Parameters are limited to a single octet */
4947 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4948 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4949 MGMT_OP_SET_EXP_FEATURE,
4950 MGMT_STATUS_INVALID_PARAMS);
4952 /* Only boolean on/off is supported */
4953 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4954 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4955 MGMT_OP_SET_EXP_FEATURE,
4956 MGMT_STATUS_INVALID_PARAMS);
4958 val = cp->param[0] ? true : false;
4967 memcpy(rp.uuid, iso_socket_uuid, 16);
4968 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4970 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4972 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4973 MGMT_OP_SET_EXP_FEATURE, 0,
4977 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4983 static const struct mgmt_exp_feature {
4985 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4986 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4987 } exp_features[] = {
4988 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4989 #ifdef CONFIG_BT_FEATURE_DEBUG
4990 EXP_FEAT(debug_uuid, set_debug_func),
4992 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4993 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4994 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4995 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4996 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4998 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5001 /* end with a null feature */
5002 EXP_FEAT(NULL, NULL)
5005 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5006 void *data, u16 data_len)
5008 struct mgmt_cp_set_exp_feature *cp = data;
5011 bt_dev_dbg(hdev, "sock %p", sk);
5013 for (i = 0; exp_features[i].uuid; i++) {
5014 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5015 return exp_features[i].set_func(sk, hdev, cp, data_len);
5018 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5019 MGMT_OP_SET_EXP_FEATURE,
5020 MGMT_STATUS_NOT_SUPPORTED);
5023 static u32 get_params_flags(struct hci_dev *hdev,
5024 struct hci_conn_params *params)
5026 u32 flags = hdev->conn_flags;
5028 /* Devices using RPAs can only be programmed in the acceptlist if
5029 * LL Privacy has been enable otherwise they cannot mark
5030 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5032 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5033 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5034 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5039 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5042 struct mgmt_cp_get_device_flags *cp = data;
5043 struct mgmt_rp_get_device_flags rp;
5044 struct bdaddr_list_with_flags *br_params;
5045 struct hci_conn_params *params;
5046 u32 supported_flags;
5047 u32 current_flags = 0;
5048 u8 status = MGMT_STATUS_INVALID_PARAMS;
5050 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5051 &cp->addr.bdaddr, cp->addr.type);
5055 supported_flags = hdev->conn_flags;
5057 memset(&rp, 0, sizeof(rp));
5059 if (cp->addr.type == BDADDR_BREDR) {
5060 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5066 current_flags = br_params->flags;
5068 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5069 le_addr_type(cp->addr.type));
5073 supported_flags = get_params_flags(hdev, params);
5074 current_flags = params->flags;
5077 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5078 rp.addr.type = cp->addr.type;
5079 rp.supported_flags = cpu_to_le32(supported_flags);
5080 rp.current_flags = cpu_to_le32(current_flags);
5082 status = MGMT_STATUS_SUCCESS;
5085 hci_dev_unlock(hdev);
5087 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5091 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5092 bdaddr_t *bdaddr, u8 bdaddr_type,
5093 u32 supported_flags, u32 current_flags)
5095 struct mgmt_ev_device_flags_changed ev;
5097 bacpy(&ev.addr.bdaddr, bdaddr);
5098 ev.addr.type = bdaddr_type;
5099 ev.supported_flags = cpu_to_le32(supported_flags);
5100 ev.current_flags = cpu_to_le32(current_flags);
5102 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5105 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5108 struct mgmt_cp_set_device_flags *cp = data;
5109 struct bdaddr_list_with_flags *br_params;
5110 struct hci_conn_params *params;
5111 u8 status = MGMT_STATUS_INVALID_PARAMS;
5112 u32 supported_flags;
5113 u32 current_flags = __le32_to_cpu(cp->current_flags);
5115 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5116 &cp->addr.bdaddr, cp->addr.type, current_flags);
5118 // We should take hci_dev_lock() early, I think.. conn_flags can change
5119 supported_flags = hdev->conn_flags;
5121 if ((supported_flags | current_flags) != supported_flags) {
5122 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5123 current_flags, supported_flags);
5129 if (cp->addr.type == BDADDR_BREDR) {
5130 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5135 br_params->flags = current_flags;
5136 status = MGMT_STATUS_SUCCESS;
5138 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5139 &cp->addr.bdaddr, cp->addr.type);
5145 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5146 le_addr_type(cp->addr.type));
5148 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5149 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5153 supported_flags = get_params_flags(hdev, params);
5155 if ((supported_flags | current_flags) != supported_flags) {
5156 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5157 current_flags, supported_flags);
5161 params->flags = current_flags;
5162 status = MGMT_STATUS_SUCCESS;
5164 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5167 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5168 hci_update_passive_scan(hdev);
5171 hci_dev_unlock(hdev);
5174 if (status == MGMT_STATUS_SUCCESS)
5175 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5176 supported_flags, current_flags);
5178 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5179 &cp->addr, sizeof(cp->addr));
5182 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5185 struct mgmt_ev_adv_monitor_added ev;
5187 ev.monitor_handle = cpu_to_le16(handle);
5189 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5192 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5194 struct mgmt_ev_adv_monitor_removed ev;
5195 struct mgmt_pending_cmd *cmd;
5196 struct sock *sk_skip = NULL;
5197 struct mgmt_cp_remove_adv_monitor *cp;
5199 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5203 if (cp->monitor_handle)
5207 ev.monitor_handle = cpu_to_le16(handle);
5209 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5212 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5213 void *data, u16 len)
5215 struct adv_monitor *monitor = NULL;
5216 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5219 __u32 supported = 0;
5221 __u16 num_handles = 0;
5222 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5224 BT_DBG("request for %s", hdev->name);
5228 if (msft_monitor_supported(hdev))
5229 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5231 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5232 handles[num_handles++] = monitor->handle;
5234 hci_dev_unlock(hdev);
5236 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5237 rp = kmalloc(rp_size, GFP_KERNEL);
5241 /* All supported features are currently enabled */
5242 enabled = supported;
5244 rp->supported_features = cpu_to_le32(supported);
5245 rp->enabled_features = cpu_to_le32(enabled);
5246 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5247 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5248 rp->num_handles = cpu_to_le16(num_handles);
5250 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5252 err = mgmt_cmd_complete(sk, hdev->id,
5253 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5254 MGMT_STATUS_SUCCESS, rp, rp_size);
5261 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5262 void *data, int status)
5264 struct mgmt_rp_add_adv_patterns_monitor rp;
5265 struct mgmt_pending_cmd *cmd = data;
5266 struct adv_monitor *monitor = cmd->user_data;
5270 rp.monitor_handle = cpu_to_le16(monitor->handle);
5273 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5274 hdev->adv_monitors_cnt++;
5275 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5276 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5277 hci_update_passive_scan(hdev);
5280 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5281 mgmt_status(status), &rp, sizeof(rp));
5282 mgmt_pending_remove(cmd);
5284 hci_dev_unlock(hdev);
5285 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5286 rp.monitor_handle, status);
5289 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5291 struct mgmt_pending_cmd *cmd = data;
5292 struct adv_monitor *monitor = cmd->user_data;
5294 return hci_add_adv_monitor(hdev, monitor);
5297 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5298 struct adv_monitor *m, u8 status,
5299 void *data, u16 len, u16 op)
5301 struct mgmt_pending_cmd *cmd;
5309 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5310 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5311 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5312 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5313 status = MGMT_STATUS_BUSY;
5317 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5319 status = MGMT_STATUS_NO_RESOURCES;
5324 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5325 mgmt_add_adv_patterns_monitor_complete);
5328 status = MGMT_STATUS_NO_RESOURCES;
5330 status = MGMT_STATUS_FAILED;
5335 hci_dev_unlock(hdev);
5340 hci_free_adv_monitor(hdev, m);
5341 hci_dev_unlock(hdev);
5342 return mgmt_cmd_status(sk, hdev->id, op, status);
5345 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5346 struct mgmt_adv_rssi_thresholds *rssi)
5349 m->rssi.low_threshold = rssi->low_threshold;
5350 m->rssi.low_threshold_timeout =
5351 __le16_to_cpu(rssi->low_threshold_timeout);
5352 m->rssi.high_threshold = rssi->high_threshold;
5353 m->rssi.high_threshold_timeout =
5354 __le16_to_cpu(rssi->high_threshold_timeout);
5355 m->rssi.sampling_period = rssi->sampling_period;
5357 /* Default values. These numbers are the least constricting
5358 * parameters for MSFT API to work, so it behaves as if there
5359 * are no rssi parameter to consider. May need to be changed
5360 * if other API are to be supported.
5362 m->rssi.low_threshold = -127;
5363 m->rssi.low_threshold_timeout = 60;
5364 m->rssi.high_threshold = -127;
5365 m->rssi.high_threshold_timeout = 0;
5366 m->rssi.sampling_period = 0;
5370 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5371 struct mgmt_adv_pattern *patterns)
5373 u8 offset = 0, length = 0;
5374 struct adv_pattern *p = NULL;
5377 for (i = 0; i < pattern_count; i++) {
5378 offset = patterns[i].offset;
5379 length = patterns[i].length;
5380 if (offset >= HCI_MAX_AD_LENGTH ||
5381 length > HCI_MAX_AD_LENGTH ||
5382 (offset + length) > HCI_MAX_AD_LENGTH)
5383 return MGMT_STATUS_INVALID_PARAMS;
5385 p = kmalloc(sizeof(*p), GFP_KERNEL);
5387 return MGMT_STATUS_NO_RESOURCES;
5389 p->ad_type = patterns[i].ad_type;
5390 p->offset = patterns[i].offset;
5391 p->length = patterns[i].length;
5392 memcpy(p->value, patterns[i].value, p->length);
5394 INIT_LIST_HEAD(&p->list);
5395 list_add(&p->list, &m->patterns);
5398 return MGMT_STATUS_SUCCESS;
5401 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5402 void *data, u16 len)
5404 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5405 struct adv_monitor *m = NULL;
5406 u8 status = MGMT_STATUS_SUCCESS;
5407 size_t expected_size = sizeof(*cp);
5409 BT_DBG("request for %s", hdev->name);
5411 if (len <= sizeof(*cp)) {
5412 status = MGMT_STATUS_INVALID_PARAMS;
5416 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5417 if (len != expected_size) {
5418 status = MGMT_STATUS_INVALID_PARAMS;
5422 m = kzalloc(sizeof(*m), GFP_KERNEL);
5424 status = MGMT_STATUS_NO_RESOURCES;
5428 INIT_LIST_HEAD(&m->patterns);
5430 parse_adv_monitor_rssi(m, NULL);
5431 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5434 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5435 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5438 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5439 void *data, u16 len)
5441 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5442 struct adv_monitor *m = NULL;
5443 u8 status = MGMT_STATUS_SUCCESS;
5444 size_t expected_size = sizeof(*cp);
5446 BT_DBG("request for %s", hdev->name);
5448 if (len <= sizeof(*cp)) {
5449 status = MGMT_STATUS_INVALID_PARAMS;
5453 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5454 if (len != expected_size) {
5455 status = MGMT_STATUS_INVALID_PARAMS;
5459 m = kzalloc(sizeof(*m), GFP_KERNEL);
5461 status = MGMT_STATUS_NO_RESOURCES;
5465 INIT_LIST_HEAD(&m->patterns);
5467 parse_adv_monitor_rssi(m, &cp->rssi);
5468 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5471 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5472 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5475 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5476 void *data, int status)
5478 struct mgmt_rp_remove_adv_monitor rp;
5479 struct mgmt_pending_cmd *cmd = data;
5480 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5484 rp.monitor_handle = cp->monitor_handle;
5487 hci_update_passive_scan(hdev);
5489 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5490 mgmt_status(status), &rp, sizeof(rp));
5491 mgmt_pending_remove(cmd);
5493 hci_dev_unlock(hdev);
5494 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5495 rp.monitor_handle, status);
5498 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5500 struct mgmt_pending_cmd *cmd = data;
5501 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5502 u16 handle = __le16_to_cpu(cp->monitor_handle);
5505 return hci_remove_all_adv_monitor(hdev);
5507 return hci_remove_single_adv_monitor(hdev, handle);
5510 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5511 void *data, u16 len)
5513 struct mgmt_pending_cmd *cmd;
5518 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5519 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5520 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5521 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5522 status = MGMT_STATUS_BUSY;
5526 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5528 status = MGMT_STATUS_NO_RESOURCES;
5532 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5533 mgmt_remove_adv_monitor_complete);
5536 mgmt_pending_remove(cmd);
5539 status = MGMT_STATUS_NO_RESOURCES;
5541 status = MGMT_STATUS_FAILED;
5546 hci_dev_unlock(hdev);
5551 hci_dev_unlock(hdev);
5552 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5556 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5558 struct mgmt_rp_read_local_oob_data mgmt_rp;
5559 size_t rp_size = sizeof(mgmt_rp);
5560 struct mgmt_pending_cmd *cmd = data;
5561 struct sk_buff *skb = cmd->skb;
5562 u8 status = mgmt_status(err);
5566 status = MGMT_STATUS_FAILED;
5567 else if (IS_ERR(skb))
5568 status = mgmt_status(PTR_ERR(skb));
5570 status = mgmt_status(skb->data[0]);
5573 bt_dev_dbg(hdev, "status %d", status);
5576 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5580 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5582 if (!bredr_sc_enabled(hdev)) {
5583 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5585 if (skb->len < sizeof(*rp)) {
5586 mgmt_cmd_status(cmd->sk, hdev->id,
5587 MGMT_OP_READ_LOCAL_OOB_DATA,
5588 MGMT_STATUS_FAILED);
5592 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5593 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5595 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5597 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5599 if (skb->len < sizeof(*rp)) {
5600 mgmt_cmd_status(cmd->sk, hdev->id,
5601 MGMT_OP_READ_LOCAL_OOB_DATA,
5602 MGMT_STATUS_FAILED);
5606 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5607 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5609 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5610 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5613 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5614 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5617 if (skb && !IS_ERR(skb))
5620 mgmt_pending_free(cmd);
5623 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5625 struct mgmt_pending_cmd *cmd = data;
5627 if (bredr_sc_enabled(hdev))
5628 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5630 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5632 if (IS_ERR(cmd->skb))
5633 return PTR_ERR(cmd->skb);
5638 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5639 void *data, u16 data_len)
5641 struct mgmt_pending_cmd *cmd;
5644 bt_dev_dbg(hdev, "sock %p", sk);
5648 if (!hdev_is_powered(hdev)) {
5649 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5650 MGMT_STATUS_NOT_POWERED);
5654 if (!lmp_ssp_capable(hdev)) {
5655 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5656 MGMT_STATUS_NOT_SUPPORTED);
5660 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5664 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5665 read_local_oob_data_complete);
5668 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5669 MGMT_STATUS_FAILED);
5672 mgmt_pending_free(cmd);
5676 hci_dev_unlock(hdev);
5680 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5681 void *data, u16 len)
5683 struct mgmt_addr_info *addr = data;
5686 bt_dev_dbg(hdev, "sock %p", sk);
5688 if (!bdaddr_type_is_valid(addr->type))
5689 return mgmt_cmd_complete(sk, hdev->id,
5690 MGMT_OP_ADD_REMOTE_OOB_DATA,
5691 MGMT_STATUS_INVALID_PARAMS,
5692 addr, sizeof(*addr));
5696 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5697 struct mgmt_cp_add_remote_oob_data *cp = data;
5700 if (cp->addr.type != BDADDR_BREDR) {
5701 err = mgmt_cmd_complete(sk, hdev->id,
5702 MGMT_OP_ADD_REMOTE_OOB_DATA,
5703 MGMT_STATUS_INVALID_PARAMS,
5704 &cp->addr, sizeof(cp->addr));
5708 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5709 cp->addr.type, cp->hash,
5710 cp->rand, NULL, NULL);
5712 status = MGMT_STATUS_FAILED;
5714 status = MGMT_STATUS_SUCCESS;
5716 err = mgmt_cmd_complete(sk, hdev->id,
5717 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5718 &cp->addr, sizeof(cp->addr));
5719 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5720 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5721 u8 *rand192, *hash192, *rand256, *hash256;
5724 if (bdaddr_type_is_le(cp->addr.type)) {
5725 /* Enforce zero-valued 192-bit parameters as
5726 * long as legacy SMP OOB isn't implemented.
5728 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5729 memcmp(cp->hash192, ZERO_KEY, 16)) {
5730 err = mgmt_cmd_complete(sk, hdev->id,
5731 MGMT_OP_ADD_REMOTE_OOB_DATA,
5732 MGMT_STATUS_INVALID_PARAMS,
5733 addr, sizeof(*addr));
5740 /* In case one of the P-192 values is set to zero,
5741 * then just disable OOB data for P-192.
5743 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5744 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5748 rand192 = cp->rand192;
5749 hash192 = cp->hash192;
5753 /* In case one of the P-256 values is set to zero, then just
5754 * disable OOB data for P-256.
5756 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5757 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5761 rand256 = cp->rand256;
5762 hash256 = cp->hash256;
5765 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5766 cp->addr.type, hash192, rand192,
5769 status = MGMT_STATUS_FAILED;
5771 status = MGMT_STATUS_SUCCESS;
5773 err = mgmt_cmd_complete(sk, hdev->id,
5774 MGMT_OP_ADD_REMOTE_OOB_DATA,
5775 status, &cp->addr, sizeof(cp->addr));
5777 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5779 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5780 MGMT_STATUS_INVALID_PARAMS);
5784 hci_dev_unlock(hdev);
5788 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5789 void *data, u16 len)
5791 struct mgmt_cp_remove_remote_oob_data *cp = data;
5795 bt_dev_dbg(hdev, "sock %p", sk);
5797 if (cp->addr.type != BDADDR_BREDR)
5798 return mgmt_cmd_complete(sk, hdev->id,
5799 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5800 MGMT_STATUS_INVALID_PARAMS,
5801 &cp->addr, sizeof(cp->addr));
5805 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5806 hci_remote_oob_data_clear(hdev);
5807 status = MGMT_STATUS_SUCCESS;
5811 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5813 status = MGMT_STATUS_INVALID_PARAMS;
5815 status = MGMT_STATUS_SUCCESS;
5818 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5819 status, &cp->addr, sizeof(cp->addr));
5821 hci_dev_unlock(hdev);
5825 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5827 struct mgmt_pending_cmd *cmd;
5829 bt_dev_dbg(hdev, "status %u", status);
5833 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5835 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5838 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5841 cmd->cmd_complete(cmd, mgmt_status(status));
5842 mgmt_pending_remove(cmd);
5845 hci_dev_unlock(hdev);
5848 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5849 uint8_t *mgmt_status)
5852 case DISCOV_TYPE_LE:
5853 *mgmt_status = mgmt_le_support(hdev);
5857 case DISCOV_TYPE_INTERLEAVED:
5858 *mgmt_status = mgmt_le_support(hdev);
5862 case DISCOV_TYPE_BREDR:
5863 *mgmt_status = mgmt_bredr_support(hdev);
5868 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5875 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5877 struct mgmt_pending_cmd *cmd = data;
5879 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5880 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5881 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5884 bt_dev_dbg(hdev, "err %d", err);
5886 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5888 mgmt_pending_remove(cmd);
5890 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5894 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5896 return hci_start_discovery_sync(hdev);
5899 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5900 u16 op, void *data, u16 len)
5902 struct mgmt_cp_start_discovery *cp = data;
5903 struct mgmt_pending_cmd *cmd;
5907 bt_dev_dbg(hdev, "sock %p", sk);
5911 if (!hdev_is_powered(hdev)) {
5912 err = mgmt_cmd_complete(sk, hdev->id, op,
5913 MGMT_STATUS_NOT_POWERED,
5914 &cp->type, sizeof(cp->type));
5918 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5919 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5920 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5921 &cp->type, sizeof(cp->type));
5925 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5926 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5927 &cp->type, sizeof(cp->type));
5931 /* Can't start discovery when it is paused */
5932 if (hdev->discovery_paused) {
5933 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5934 &cp->type, sizeof(cp->type));
5938 /* Clear the discovery filter first to free any previously
5939 * allocated memory for the UUID list.
5941 hci_discovery_filter_clear(hdev);
5943 hdev->discovery.type = cp->type;
5944 hdev->discovery.report_invalid_rssi = false;
5945 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5946 hdev->discovery.limited = true;
5948 hdev->discovery.limited = false;
5950 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5956 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5957 start_discovery_complete);
5959 mgmt_pending_remove(cmd);
5963 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5966 hci_dev_unlock(hdev);
5970 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5971 void *data, u16 len)
5973 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5977 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5978 void *data, u16 len)
5980 return start_discovery_internal(sk, hdev,
5981 MGMT_OP_START_LIMITED_DISCOVERY,
5985 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5986 void *data, u16 len)
5988 struct mgmt_cp_start_service_discovery *cp = data;
5989 struct mgmt_pending_cmd *cmd;
5990 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5991 u16 uuid_count, expected_len;
5995 bt_dev_dbg(hdev, "sock %p", sk);
5999 if (!hdev_is_powered(hdev)) {
6000 err = mgmt_cmd_complete(sk, hdev->id,
6001 MGMT_OP_START_SERVICE_DISCOVERY,
6002 MGMT_STATUS_NOT_POWERED,
6003 &cp->type, sizeof(cp->type));
6007 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6008 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6009 err = mgmt_cmd_complete(sk, hdev->id,
6010 MGMT_OP_START_SERVICE_DISCOVERY,
6011 MGMT_STATUS_BUSY, &cp->type,
6016 if (hdev->discovery_paused) {
6017 err = mgmt_cmd_complete(sk, hdev->id,
6018 MGMT_OP_START_SERVICE_DISCOVERY,
6019 MGMT_STATUS_BUSY, &cp->type,
6024 uuid_count = __le16_to_cpu(cp->uuid_count);
6025 if (uuid_count > max_uuid_count) {
6026 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6028 err = mgmt_cmd_complete(sk, hdev->id,
6029 MGMT_OP_START_SERVICE_DISCOVERY,
6030 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6035 expected_len = sizeof(*cp) + uuid_count * 16;
6036 if (expected_len != len) {
6037 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6039 err = mgmt_cmd_complete(sk, hdev->id,
6040 MGMT_OP_START_SERVICE_DISCOVERY,
6041 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6046 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6047 err = mgmt_cmd_complete(sk, hdev->id,
6048 MGMT_OP_START_SERVICE_DISCOVERY,
6049 status, &cp->type, sizeof(cp->type));
6053 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6060 /* Clear the discovery filter first to free any previously
6061 * allocated memory for the UUID list.
6063 hci_discovery_filter_clear(hdev);
6065 hdev->discovery.result_filtering = true;
6066 hdev->discovery.type = cp->type;
6067 hdev->discovery.rssi = cp->rssi;
6068 hdev->discovery.uuid_count = uuid_count;
6070 if (uuid_count > 0) {
6071 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6073 if (!hdev->discovery.uuids) {
6074 err = mgmt_cmd_complete(sk, hdev->id,
6075 MGMT_OP_START_SERVICE_DISCOVERY,
6077 &cp->type, sizeof(cp->type));
6078 mgmt_pending_remove(cmd);
6083 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6084 start_discovery_complete);
6086 mgmt_pending_remove(cmd);
6090 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6093 hci_dev_unlock(hdev);
6097 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6099 struct mgmt_pending_cmd *cmd;
6101 bt_dev_dbg(hdev, "status %u", status);
6105 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6107 cmd->cmd_complete(cmd, mgmt_status(status));
6108 mgmt_pending_remove(cmd);
6111 hci_dev_unlock(hdev);
6114 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6116 struct mgmt_pending_cmd *cmd = data;
6118 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6121 bt_dev_dbg(hdev, "err %d", err);
6123 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6125 mgmt_pending_remove(cmd);
6128 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6131 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6133 return hci_stop_discovery_sync(hdev);
6136 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6139 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6140 struct mgmt_pending_cmd *cmd;
6143 bt_dev_dbg(hdev, "sock %p", sk);
6147 if (!hci_discovery_active(hdev)) {
6148 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6149 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6150 sizeof(mgmt_cp->type));
6154 if (hdev->discovery.type != mgmt_cp->type) {
6155 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6156 MGMT_STATUS_INVALID_PARAMS,
6157 &mgmt_cp->type, sizeof(mgmt_cp->type));
6161 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6167 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6168 stop_discovery_complete);
6170 mgmt_pending_remove(cmd);
6174 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6177 hci_dev_unlock(hdev);
6181 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6184 struct mgmt_cp_confirm_name *cp = data;
6185 struct inquiry_entry *e;
6188 bt_dev_dbg(hdev, "sock %p", sk);
6192 if (!hci_discovery_active(hdev)) {
6193 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6194 MGMT_STATUS_FAILED, &cp->addr,
6199 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6201 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6202 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6207 if (cp->name_known) {
6208 e->name_state = NAME_KNOWN;
6211 e->name_state = NAME_NEEDED;
6212 hci_inquiry_cache_update_resolve(hdev, e);
6215 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6216 &cp->addr, sizeof(cp->addr));
6219 hci_dev_unlock(hdev);
6223 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6226 struct mgmt_cp_block_device *cp = data;
6230 bt_dev_dbg(hdev, "sock %p", sk);
6232 if (!bdaddr_type_is_valid(cp->addr.type))
6233 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6234 MGMT_STATUS_INVALID_PARAMS,
6235 &cp->addr, sizeof(cp->addr));
6239 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6242 status = MGMT_STATUS_FAILED;
6246 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6248 status = MGMT_STATUS_SUCCESS;
6251 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6252 &cp->addr, sizeof(cp->addr));
6254 hci_dev_unlock(hdev);
6259 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6262 struct mgmt_cp_unblock_device *cp = data;
6266 bt_dev_dbg(hdev, "sock %p", sk);
6268 if (!bdaddr_type_is_valid(cp->addr.type))
6269 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6270 MGMT_STATUS_INVALID_PARAMS,
6271 &cp->addr, sizeof(cp->addr));
6275 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6278 status = MGMT_STATUS_INVALID_PARAMS;
6282 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6284 status = MGMT_STATUS_SUCCESS;
6287 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6288 &cp->addr, sizeof(cp->addr));
6290 hci_dev_unlock(hdev);
6295 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6297 return hci_update_eir_sync(hdev);
6300 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6303 struct mgmt_cp_set_device_id *cp = data;
6307 bt_dev_dbg(hdev, "sock %p", sk);
6309 source = __le16_to_cpu(cp->source);
6311 if (source > 0x0002)
6312 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6313 MGMT_STATUS_INVALID_PARAMS);
6317 hdev->devid_source = source;
6318 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6319 hdev->devid_product = __le16_to_cpu(cp->product);
6320 hdev->devid_version = __le16_to_cpu(cp->version);
6322 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6325 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6327 hci_dev_unlock(hdev);
6332 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6335 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6337 bt_dev_dbg(hdev, "status %d", err);
6340 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6342 struct cmd_lookup match = { NULL, hdev };
6344 struct adv_info *adv_instance;
6345 u8 status = mgmt_status(err);
6348 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6349 cmd_status_rsp, &status);
6353 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6354 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6356 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6358 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6361 new_settings(hdev, match.sk);
6366 /* If "Set Advertising" was just disabled and instance advertising was
6367 * set up earlier, then re-enable multi-instance advertising.
6369 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6370 list_empty(&hdev->adv_instances))
6373 instance = hdev->cur_adv_instance;
6375 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6376 struct adv_info, list);
6380 instance = adv_instance->instance;
6383 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6385 enable_advertising_instance(hdev, err);
6388 static int set_adv_sync(struct hci_dev *hdev, void *data)
6390 struct mgmt_pending_cmd *cmd = data;
6391 struct mgmt_mode *cp = cmd->param;
6394 if (cp->val == 0x02)
6395 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6397 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6399 cancel_adv_timeout(hdev);
6402 /* Switch to instance "0" for the Set Advertising setting.
6403 * We cannot use update_[adv|scan_rsp]_data() here as the
6404 * HCI_ADVERTISING flag is not yet set.
6406 hdev->cur_adv_instance = 0x00;
6408 if (ext_adv_capable(hdev)) {
6409 hci_start_ext_adv_sync(hdev, 0x00);
6411 hci_update_adv_data_sync(hdev, 0x00);
6412 hci_update_scan_rsp_data_sync(hdev, 0x00);
6413 hci_enable_advertising_sync(hdev);
6416 hci_disable_advertising_sync(hdev);
6422 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6425 struct mgmt_mode *cp = data;
6426 struct mgmt_pending_cmd *cmd;
6430 bt_dev_dbg(hdev, "sock %p", sk);
6432 status = mgmt_le_support(hdev);
6434 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6437 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6438 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6439 MGMT_STATUS_INVALID_PARAMS);
6441 if (hdev->advertising_paused)
6442 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6449 /* The following conditions are ones which mean that we should
6450 * not do any HCI communication but directly send a mgmt
6451 * response to user space (after toggling the flag if
6454 if (!hdev_is_powered(hdev) ||
6455 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6456 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6457 hci_dev_test_flag(hdev, HCI_MESH) ||
6458 hci_conn_num(hdev, LE_LINK) > 0 ||
6459 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6460 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6464 hdev->cur_adv_instance = 0x00;
6465 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6466 if (cp->val == 0x02)
6467 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6469 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6471 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6472 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6475 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6480 err = new_settings(hdev, sk);
6485 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6486 pending_find(MGMT_OP_SET_LE, hdev)) {
6487 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6492 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6496 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6497 set_advertising_complete);
6500 mgmt_pending_remove(cmd);
6503 hci_dev_unlock(hdev);
6507 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6508 void *data, u16 len)
6510 struct mgmt_cp_set_static_address *cp = data;
6513 bt_dev_dbg(hdev, "sock %p", sk);
6515 if (!lmp_le_capable(hdev))
6516 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6517 MGMT_STATUS_NOT_SUPPORTED);
6519 if (hdev_is_powered(hdev))
6520 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6521 MGMT_STATUS_REJECTED);
6523 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6524 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6525 return mgmt_cmd_status(sk, hdev->id,
6526 MGMT_OP_SET_STATIC_ADDRESS,
6527 MGMT_STATUS_INVALID_PARAMS);
6529 /* Two most significant bits shall be set */
6530 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6531 return mgmt_cmd_status(sk, hdev->id,
6532 MGMT_OP_SET_STATIC_ADDRESS,
6533 MGMT_STATUS_INVALID_PARAMS);
6538 bacpy(&hdev->static_addr, &cp->bdaddr);
6540 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6544 err = new_settings(hdev, sk);
6547 hci_dev_unlock(hdev);
6551 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6552 void *data, u16 len)
6554 struct mgmt_cp_set_scan_params *cp = data;
6555 __u16 interval, window;
6558 bt_dev_dbg(hdev, "sock %p", sk);
6560 if (!lmp_le_capable(hdev))
6561 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6562 MGMT_STATUS_NOT_SUPPORTED);
6564 interval = __le16_to_cpu(cp->interval);
6566 if (interval < 0x0004 || interval > 0x4000)
6567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6568 MGMT_STATUS_INVALID_PARAMS);
6570 window = __le16_to_cpu(cp->window);
6572 if (window < 0x0004 || window > 0x4000)
6573 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6574 MGMT_STATUS_INVALID_PARAMS);
6576 if (window > interval)
6577 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6578 MGMT_STATUS_INVALID_PARAMS);
6582 hdev->le_scan_interval = interval;
6583 hdev->le_scan_window = window;
6585 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6588 /* If background scan is running, restart it so new parameters are
6591 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6592 hdev->discovery.state == DISCOVERY_STOPPED)
6593 hci_update_passive_scan(hdev);
6595 hci_dev_unlock(hdev);
6600 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6602 struct mgmt_pending_cmd *cmd = data;
6604 bt_dev_dbg(hdev, "err %d", err);
6607 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6610 struct mgmt_mode *cp = cmd->param;
6613 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6615 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6617 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6618 new_settings(hdev, cmd->sk);
6621 mgmt_pending_free(cmd);
6624 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6626 struct mgmt_pending_cmd *cmd = data;
6627 struct mgmt_mode *cp = cmd->param;
6629 return hci_write_fast_connectable_sync(hdev, cp->val);
6632 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6633 void *data, u16 len)
6635 struct mgmt_mode *cp = data;
6636 struct mgmt_pending_cmd *cmd;
6639 bt_dev_dbg(hdev, "sock %p", sk);
6641 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6642 hdev->hci_ver < BLUETOOTH_VER_1_2)
6643 return mgmt_cmd_status(sk, hdev->id,
6644 MGMT_OP_SET_FAST_CONNECTABLE,
6645 MGMT_STATUS_NOT_SUPPORTED);
6647 if (cp->val != 0x00 && cp->val != 0x01)
6648 return mgmt_cmd_status(sk, hdev->id,
6649 MGMT_OP_SET_FAST_CONNECTABLE,
6650 MGMT_STATUS_INVALID_PARAMS);
6654 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6655 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6659 if (!hdev_is_powered(hdev)) {
6660 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6661 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6662 new_settings(hdev, sk);
6666 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6671 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6672 fast_connectable_complete);
6675 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6676 MGMT_STATUS_FAILED);
6679 mgmt_pending_free(cmd);
6683 hci_dev_unlock(hdev);
6688 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6690 struct mgmt_pending_cmd *cmd = data;
6692 bt_dev_dbg(hdev, "err %d", err);
6695 u8 mgmt_err = mgmt_status(err);
6697 /* We need to restore the flag if related HCI commands
6700 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6702 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6704 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6705 new_settings(hdev, cmd->sk);
6708 mgmt_pending_free(cmd);
6711 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6715 status = hci_write_fast_connectable_sync(hdev, false);
6718 status = hci_update_scan_sync(hdev);
6720 /* Since only the advertising data flags will change, there
6721 * is no need to update the scan response data.
6724 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6729 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6731 struct mgmt_mode *cp = data;
6732 struct mgmt_pending_cmd *cmd;
6735 bt_dev_dbg(hdev, "sock %p", sk);
6737 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6738 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6739 MGMT_STATUS_NOT_SUPPORTED);
6741 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6742 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6743 MGMT_STATUS_REJECTED);
6745 if (cp->val != 0x00 && cp->val != 0x01)
6746 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6747 MGMT_STATUS_INVALID_PARAMS);
6751 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6752 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6756 if (!hdev_is_powered(hdev)) {
6758 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6759 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6760 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6761 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6762 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6765 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6767 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6771 err = new_settings(hdev, sk);
6775 /* Reject disabling when powered on */
6777 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6778 MGMT_STATUS_REJECTED);
6781 /* When configuring a dual-mode controller to operate
6782 * with LE only and using a static address, then switching
6783 * BR/EDR back on is not allowed.
6785 * Dual-mode controllers shall operate with the public
6786 * address as its identity address for BR/EDR and LE. So
6787 * reject the attempt to create an invalid configuration.
6789 * The same restrictions applies when secure connections
6790 * has been enabled. For BR/EDR this is a controller feature
6791 * while for LE it is a host stack feature. This means that
6792 * switching BR/EDR back on when secure connections has been
6793 * enabled is not a supported transaction.
6795 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6796 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6797 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6798 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6799 MGMT_STATUS_REJECTED);
6804 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6808 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6809 set_bredr_complete);
6812 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6813 MGMT_STATUS_FAILED);
6815 mgmt_pending_free(cmd);
6820 /* We need to flip the bit already here so that
6821 * hci_req_update_adv_data generates the correct flags.
6823 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6826 hci_dev_unlock(hdev);
6830 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6832 struct mgmt_pending_cmd *cmd = data;
6833 struct mgmt_mode *cp;
6835 bt_dev_dbg(hdev, "err %d", err);
6838 u8 mgmt_err = mgmt_status(err);
6840 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6848 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6849 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6852 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6853 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6856 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6857 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6861 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6862 new_settings(hdev, cmd->sk);
6865 mgmt_pending_free(cmd);
6868 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6870 struct mgmt_pending_cmd *cmd = data;
6871 struct mgmt_mode *cp = cmd->param;
6874 /* Force write of val */
6875 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6877 return hci_write_sc_support_sync(hdev, val);
6880 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6881 void *data, u16 len)
6883 struct mgmt_mode *cp = data;
6884 struct mgmt_pending_cmd *cmd;
6888 bt_dev_dbg(hdev, "sock %p", sk);
6890 if (!lmp_sc_capable(hdev) &&
6891 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6892 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6893 MGMT_STATUS_NOT_SUPPORTED);
6895 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6896 lmp_sc_capable(hdev) &&
6897 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6898 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6899 MGMT_STATUS_REJECTED);
6901 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6902 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6903 MGMT_STATUS_INVALID_PARAMS);
6907 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6908 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6912 changed = !hci_dev_test_and_set_flag(hdev,
6914 if (cp->val == 0x02)
6915 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6917 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6919 changed = hci_dev_test_and_clear_flag(hdev,
6921 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6924 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6929 err = new_settings(hdev, sk);
6936 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6937 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6938 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6942 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6946 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6947 set_secure_conn_complete);
6950 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6951 MGMT_STATUS_FAILED);
6953 mgmt_pending_free(cmd);
6957 hci_dev_unlock(hdev);
6961 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6962 void *data, u16 len)
6964 struct mgmt_mode *cp = data;
6965 bool changed, use_changed;
6968 bt_dev_dbg(hdev, "sock %p", sk);
6970 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6971 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6972 MGMT_STATUS_INVALID_PARAMS);
6977 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6979 changed = hci_dev_test_and_clear_flag(hdev,
6980 HCI_KEEP_DEBUG_KEYS);
6982 if (cp->val == 0x02)
6983 use_changed = !hci_dev_test_and_set_flag(hdev,
6984 HCI_USE_DEBUG_KEYS);
6986 use_changed = hci_dev_test_and_clear_flag(hdev,
6987 HCI_USE_DEBUG_KEYS);
6989 if (hdev_is_powered(hdev) && use_changed &&
6990 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6991 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6992 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6993 sizeof(mode), &mode);
6996 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7001 err = new_settings(hdev, sk);
7004 hci_dev_unlock(hdev);
7008 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7011 struct mgmt_cp_set_privacy *cp = cp_data;
7015 bt_dev_dbg(hdev, "sock %p", sk);
7017 if (!lmp_le_capable(hdev))
7018 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7019 MGMT_STATUS_NOT_SUPPORTED);
7021 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7022 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7023 MGMT_STATUS_INVALID_PARAMS);
7025 if (hdev_is_powered(hdev))
7026 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7027 MGMT_STATUS_REJECTED);
7031 /* If user space supports this command it is also expected to
7032 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7034 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7037 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7038 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7039 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7040 hci_adv_instances_set_rpa_expired(hdev, true);
7041 if (cp->privacy == 0x02)
7042 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7044 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7046 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7047 memset(hdev->irk, 0, sizeof(hdev->irk));
7048 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7049 hci_adv_instances_set_rpa_expired(hdev, false);
7050 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7053 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7058 err = new_settings(hdev, sk);
7061 hci_dev_unlock(hdev);
7065 static bool irk_is_valid(struct mgmt_irk_info *irk)
7067 switch (irk->addr.type) {
7068 case BDADDR_LE_PUBLIC:
7071 case BDADDR_LE_RANDOM:
7072 /* Two most significant bits shall be set */
7073 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7081 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7084 struct mgmt_cp_load_irks *cp = cp_data;
7085 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7086 sizeof(struct mgmt_irk_info));
7087 u16 irk_count, expected_len;
7090 bt_dev_dbg(hdev, "sock %p", sk);
7092 if (!lmp_le_capable(hdev))
7093 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7094 MGMT_STATUS_NOT_SUPPORTED);
7096 irk_count = __le16_to_cpu(cp->irk_count);
7097 if (irk_count > max_irk_count) {
7098 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7100 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7101 MGMT_STATUS_INVALID_PARAMS);
7104 expected_len = struct_size(cp, irks, irk_count);
7105 if (expected_len != len) {
7106 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7108 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7109 MGMT_STATUS_INVALID_PARAMS);
7112 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7114 for (i = 0; i < irk_count; i++) {
7115 struct mgmt_irk_info *key = &cp->irks[i];
7117 if (!irk_is_valid(key))
7118 return mgmt_cmd_status(sk, hdev->id,
7120 MGMT_STATUS_INVALID_PARAMS);
7125 hci_smp_irks_clear(hdev);
7127 for (i = 0; i < irk_count; i++) {
7128 struct mgmt_irk_info *irk = &cp->irks[i];
7130 if (hci_is_blocked_key(hdev,
7131 HCI_BLOCKED_KEY_TYPE_IRK,
7133 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7138 hci_add_irk(hdev, &irk->addr.bdaddr,
7139 le_addr_type(irk->addr.type), irk->val,
7143 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7145 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7147 hci_dev_unlock(hdev);
7153 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
7154 void *data, u16 len)
7156 struct mgmt_cp_set_advertising_params *cp = data;
7161 BT_DBG("%s", hdev->name);
7163 if (!lmp_le_capable(hdev))
7164 return mgmt_cmd_status(sk, hdev->id,
7165 MGMT_OP_SET_ADVERTISING_PARAMS,
7166 MGMT_STATUS_NOT_SUPPORTED);
7168 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7169 return mgmt_cmd_status(sk, hdev->id,
7170 MGMT_OP_SET_ADVERTISING_PARAMS,
7173 min_interval = __le16_to_cpu(cp->interval_min);
7174 max_interval = __le16_to_cpu(cp->interval_max);
7176 if (min_interval > max_interval ||
7177 min_interval < 0x0020 || max_interval > 0x4000)
7178 return mgmt_cmd_status(sk, hdev->id,
7179 MGMT_OP_SET_ADVERTISING_PARAMS,
7180 MGMT_STATUS_INVALID_PARAMS);
7184 hdev->le_adv_min_interval = min_interval;
7185 hdev->le_adv_max_interval = max_interval;
7186 hdev->adv_filter_policy = cp->filter_policy;
7187 hdev->adv_type = cp->type;
7189 err = mgmt_cmd_complete(sk, hdev->id,
7190 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
7192 hci_dev_unlock(hdev);
7197 static void set_advertising_data_complete(struct hci_dev *hdev,
7198 u8 status, u16 opcode)
7200 struct mgmt_cp_set_advertising_data *cp;
7201 struct mgmt_pending_cmd *cmd;
7203 BT_DBG("status 0x%02x", status);
7207 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
7214 mgmt_cmd_status(cmd->sk, hdev->id,
7215 MGMT_OP_SET_ADVERTISING_DATA,
7216 mgmt_status(status));
7218 mgmt_cmd_complete(cmd->sk, hdev->id,
7219 MGMT_OP_SET_ADVERTISING_DATA, 0,
7222 mgmt_pending_remove(cmd);
7225 hci_dev_unlock(hdev);
7228 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
7229 void *data, u16 len)
7231 struct mgmt_pending_cmd *cmd;
7232 struct hci_request req;
7233 struct mgmt_cp_set_advertising_data *cp = data;
7234 struct hci_cp_le_set_adv_data adv;
7237 BT_DBG("%s", hdev->name);
7239 if (!lmp_le_capable(hdev)) {
7240 return mgmt_cmd_status(sk, hdev->id,
7241 MGMT_OP_SET_ADVERTISING_DATA,
7242 MGMT_STATUS_NOT_SUPPORTED);
7247 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
7248 err = mgmt_cmd_status(sk, hdev->id,
7249 MGMT_OP_SET_ADVERTISING_DATA,
7254 if (len > HCI_MAX_AD_LENGTH) {
7255 err = mgmt_cmd_status(sk, hdev->id,
7256 MGMT_OP_SET_ADVERTISING_DATA,
7257 MGMT_STATUS_INVALID_PARAMS);
7261 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
7268 hci_req_init(&req, hdev);
7270 memset(&adv, 0, sizeof(adv));
7271 memcpy(adv.data, cp->data, len);
7274 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
7276 err = hci_req_run(&req, set_advertising_data_complete);
7278 mgmt_pending_remove(cmd);
7281 hci_dev_unlock(hdev);
7286 /* Adv White List feature */
7287 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7289 struct mgmt_cp_add_dev_white_list *cp;
7290 struct mgmt_pending_cmd *cmd;
7292 BT_DBG("status 0x%02x", status);
7296 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
7303 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7304 mgmt_status(status));
7306 mgmt_cmd_complete(cmd->sk, hdev->id,
7307 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
7309 mgmt_pending_remove(cmd);
7312 hci_dev_unlock(hdev);
7315 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
7316 void *data, u16 len)
7318 struct mgmt_pending_cmd *cmd;
7319 struct mgmt_cp_add_dev_white_list *cp = data;
7320 struct hci_request req;
7323 BT_DBG("%s", hdev->name);
7325 if (!lmp_le_capable(hdev))
7326 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7327 MGMT_STATUS_NOT_SUPPORTED);
7329 if (!hdev_is_powered(hdev))
7330 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7331 MGMT_STATUS_REJECTED);
7335 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
7336 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7341 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
7347 hci_req_init(&req, hdev);
7349 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
7351 err = hci_req_run(&req, add_white_list_complete);
7353 mgmt_pending_remove(cmd);
7358 hci_dev_unlock(hdev);
7363 static void remove_from_white_list_complete(struct hci_dev *hdev,
7364 u8 status, u16 opcode)
7366 struct mgmt_cp_remove_dev_from_white_list *cp;
7367 struct mgmt_pending_cmd *cmd;
7369 BT_DBG("status 0x%02x", status);
7373 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
7380 mgmt_cmd_status(cmd->sk, hdev->id,
7381 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7382 mgmt_status(status));
7384 mgmt_cmd_complete(cmd->sk, hdev->id,
7385 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
7388 mgmt_pending_remove(cmd);
7391 hci_dev_unlock(hdev);
7394 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
7395 void *data, u16 len)
7397 struct mgmt_pending_cmd *cmd;
7398 struct mgmt_cp_remove_dev_from_white_list *cp = data;
7399 struct hci_request req;
7402 BT_DBG("%s", hdev->name);
7404 if (!lmp_le_capable(hdev))
7405 return mgmt_cmd_status(sk, hdev->id,
7406 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7407 MGMT_STATUS_NOT_SUPPORTED);
7409 if (!hdev_is_powered(hdev))
7410 return mgmt_cmd_status(sk, hdev->id,
7411 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7412 MGMT_STATUS_REJECTED);
7416 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
7417 err = mgmt_cmd_status(sk, hdev->id,
7418 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7423 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7430 hci_req_init(&req, hdev);
7432 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
7434 err = hci_req_run(&req, remove_from_white_list_complete);
7436 mgmt_pending_remove(cmd);
7441 hci_dev_unlock(hdev);
7446 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
7449 struct mgmt_pending_cmd *cmd;
7451 BT_DBG("status 0x%02x", status);
7455 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
7460 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7461 mgmt_status(status));
7463 mgmt_cmd_complete(cmd->sk, hdev->id,
7464 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7467 mgmt_pending_remove(cmd);
7470 hci_dev_unlock(hdev);
7473 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
7474 void *data, u16 len)
7476 struct mgmt_pending_cmd *cmd;
7477 struct hci_request req;
7480 BT_DBG("%s", hdev->name);
7482 if (!lmp_le_capable(hdev))
7483 return mgmt_cmd_status(sk, hdev->id,
7484 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7485 MGMT_STATUS_NOT_SUPPORTED);
7487 if (!hdev_is_powered(hdev))
7488 return mgmt_cmd_status(sk, hdev->id,
7489 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7490 MGMT_STATUS_REJECTED);
7494 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
7495 err = mgmt_cmd_status(sk, hdev->id,
7496 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7501 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7508 hci_req_init(&req, hdev);
7510 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
7512 err = hci_req_run(&req, clear_white_list_complete);
7514 mgmt_pending_remove(cmd);
7519 hci_dev_unlock(hdev);
7524 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
7527 struct mgmt_cp_set_scan_rsp_data *cp;
7528 struct mgmt_pending_cmd *cmd;
7530 BT_DBG("status 0x%02x", status);
7534 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
7541 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7542 mgmt_status(status));
7544 mgmt_cmd_complete(cmd->sk, hdev->id,
7545 MGMT_OP_SET_SCAN_RSP_DATA, 0,
7548 mgmt_pending_remove(cmd);
7551 hci_dev_unlock(hdev);
7554 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
7557 struct mgmt_pending_cmd *cmd;
7558 struct hci_request req;
7559 struct mgmt_cp_set_scan_rsp_data *cp = data;
7560 struct hci_cp_le_set_scan_rsp_data rsp;
7563 BT_DBG("%s", hdev->name);
7565 if (!lmp_le_capable(hdev))
7566 return mgmt_cmd_status(sk, hdev->id,
7567 MGMT_OP_SET_SCAN_RSP_DATA,
7568 MGMT_STATUS_NOT_SUPPORTED);
7572 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
7573 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7578 if (len > HCI_MAX_AD_LENGTH) {
7579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7580 MGMT_STATUS_INVALID_PARAMS);
7584 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
7590 hci_req_init(&req, hdev);
7592 memset(&rsp, 0, sizeof(rsp));
7593 memcpy(rsp.data, cp->data, len);
7596 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
7598 err = hci_req_run(&req, set_scan_rsp_data_complete);
7600 mgmt_pending_remove(cmd);
7603 hci_dev_unlock(hdev);
7608 static void set_rssi_threshold_complete(struct hci_dev *hdev,
7609 u8 status, u16 opcode)
7611 struct mgmt_pending_cmd *cmd;
7613 BT_DBG("status 0x%02x", status);
7617 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7622 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7623 mgmt_status(status));
7625 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7628 mgmt_pending_remove(cmd);
7631 hci_dev_unlock(hdev);
7634 static void set_rssi_disable_complete(struct hci_dev *hdev,
7635 u8 status, u16 opcode)
7637 struct mgmt_pending_cmd *cmd;
7639 BT_DBG("status 0x%02x", status);
7643 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7648 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7649 mgmt_status(status));
7651 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7654 mgmt_pending_remove(cmd);
7657 hci_dev_unlock(hdev);
7660 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
7661 void *data, u16 len)
7664 struct hci_cp_set_rssi_threshold th = { 0, };
7665 struct mgmt_cp_set_enable_rssi *cp = data;
7666 struct hci_conn *conn;
7667 struct mgmt_pending_cmd *cmd;
7668 struct hci_request req;
7673 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7676 MGMT_STATUS_FAILED);
7680 if (!lmp_le_capable(hdev)) {
7681 mgmt_pending_remove(cmd);
7682 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7683 MGMT_STATUS_NOT_SUPPORTED);
7687 if (!hdev_is_powered(hdev)) {
7688 BT_DBG("%s", hdev->name);
7689 mgmt_pending_remove(cmd);
7690 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7691 MGMT_STATUS_NOT_POWERED);
7695 if (cp->link_type == 0x01)
7696 dest_type = LE_LINK;
7698 dest_type = ACL_LINK;
7700 /* Get LE/ACL link handle info */
7701 conn = hci_conn_hash_lookup_ba(hdev,
7702 dest_type, &cp->bdaddr);
7705 err = mgmt_cmd_complete(sk, hdev->id,
7706 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
7707 mgmt_pending_remove(cmd);
7711 hci_req_init(&req, hdev);
7713 th.hci_le_ext_opcode = 0x0B;
7715 th.conn_handle = conn->handle;
7716 th.alert_mask = 0x07;
7717 th.low_th = cp->low_th;
7718 th.in_range_th = cp->in_range_th;
7719 th.high_th = cp->high_th;
7721 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7722 err = hci_req_run(&req, set_rssi_threshold_complete);
7725 mgmt_pending_remove(cmd);
7726 BT_ERR("Error in requesting hci_req_run");
7731 hci_dev_unlock(hdev);
7735 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
7736 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7738 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
7739 struct mgmt_cp_set_enable_rssi *cp = data;
7740 struct mgmt_pending_cmd *cmd;
7745 mgmt_rp.status = rp->status;
7746 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7747 mgmt_rp.bt_address = cp->bdaddr;
7748 mgmt_rp.link_type = cp->link_type;
7750 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7751 MGMT_STATUS_SUCCESS, &mgmt_rp,
7752 sizeof(struct mgmt_cc_rsp_enable_rssi));
7754 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
7755 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7757 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
7758 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7759 &mgmt_rp.bt_address, true);
7763 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7765 mgmt_pending_remove(cmd);
7767 hci_dev_unlock(hdev);
7770 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
7771 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7773 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
7774 struct mgmt_cp_disable_rssi *cp = data;
7775 struct mgmt_pending_cmd *cmd;
7780 mgmt_rp.status = rp->status;
7781 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7782 mgmt_rp.bt_address = cp->bdaddr;
7783 mgmt_rp.link_type = cp->link_type;
7785 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7786 MGMT_STATUS_SUCCESS, &mgmt_rp,
7787 sizeof(struct mgmt_cc_rsp_enable_rssi));
7789 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
7790 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7792 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7793 &mgmt_rp.bt_address, false);
7797 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7799 mgmt_pending_remove(cmd);
7801 hci_dev_unlock(hdev);
7804 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
7805 void *data, u16 len)
7807 struct mgmt_pending_cmd *cmd;
7808 struct hci_request req;
7809 struct hci_cp_set_enable_rssi cp_en = { 0, };
7812 BT_DBG("Set Disable RSSI.");
7814 cp_en.hci_le_ext_opcode = 0x01;
7815 cp_en.le_enable_cs_Features = 0x00;
7816 cp_en.data[0] = 0x00;
7817 cp_en.data[1] = 0x00;
7818 cp_en.data[2] = 0x00;
7822 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7824 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7825 MGMT_STATUS_FAILED);
7829 if (!lmp_le_capable(hdev)) {
7830 mgmt_pending_remove(cmd);
7831 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7832 MGMT_STATUS_NOT_SUPPORTED);
7836 if (!hdev_is_powered(hdev)) {
7837 BT_DBG("%s", hdev->name);
7838 mgmt_pending_remove(cmd);
7839 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7840 MGMT_STATUS_NOT_POWERED);
7844 hci_req_init(&req, hdev);
7846 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7847 sizeof(struct hci_cp_set_enable_rssi),
7848 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7849 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7851 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7852 err = hci_req_run(&req, set_rssi_disable_complete);
7855 mgmt_pending_remove(cmd);
7856 BT_ERR("Error in requesting hci_req_run");
7861 hci_dev_unlock(hdev);
7865 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
7867 struct hci_cc_rsp_enable_rssi *rp = response;
7868 struct mgmt_pending_cmd *cmd_enable = NULL;
7869 struct mgmt_pending_cmd *cmd_disable = NULL;
7870 struct mgmt_cp_set_enable_rssi *cp_en;
7871 struct mgmt_cp_disable_rssi *cp_dis;
7874 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7875 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7876 hci_dev_unlock(hdev);
7879 BT_DBG("Enable Request");
7882 BT_DBG("Disable Request");
7885 cp_en = cmd_enable->param;
7890 switch (rp->le_ext_opcode) {
7892 BT_DBG("RSSI enabled.. Setting Threshold...");
7893 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
7894 cp_en, sizeof(*cp_en));
7898 BT_DBG("Sending RSSI enable success");
7899 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
7900 cp_en, rp, rp->status);
7904 } else if (cmd_disable) {
7905 cp_dis = cmd_disable->param;
7910 switch (rp->le_ext_opcode) {
7912 BT_DBG("Sending RSSI disable success");
7913 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7914 cp_dis, rp, rp->status);
7919 * Only unset RSSI Threshold values for the Link if
7920 * RSSI is monitored for other BREDR or LE Links
7922 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
7923 BT_DBG("Unset Threshold. Other links being monitored");
7924 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7925 cp_dis, rp, rp->status);
7927 BT_DBG("Unset Threshold. Disabling...");
7928 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
7929 cp_dis, sizeof(*cp_dis));
7936 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
7939 struct mgmt_pending_cmd *cmd;
7941 BT_DBG("status 0x%02x", status);
7945 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7950 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7951 mgmt_status(status));
7953 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7956 mgmt_pending_remove(cmd);
7959 hci_dev_unlock(hdev);
7962 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7963 void *data, u16 len)
7965 struct mgmt_pending_cmd *cmd;
7966 struct hci_request req;
7967 struct mgmt_cp_set_enable_rssi *cp = data;
7968 struct hci_cp_set_enable_rssi cp_en = { 0, };
7971 BT_DBG("Set Enable RSSI.");
7973 cp_en.hci_le_ext_opcode = 0x01;
7974 cp_en.le_enable_cs_Features = 0x04;
7975 cp_en.data[0] = 0x00;
7976 cp_en.data[1] = 0x00;
7977 cp_en.data[2] = 0x00;
7981 if (!lmp_le_capable(hdev)) {
7982 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7983 MGMT_STATUS_NOT_SUPPORTED);
7987 if (!hdev_is_powered(hdev)) {
7988 BT_DBG("%s", hdev->name);
7989 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7990 MGMT_STATUS_NOT_POWERED);
7994 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
7995 BT_DBG("%s", hdev->name);
7996 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8001 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
8004 BT_DBG("%s", hdev->name);
8009 /* If RSSI is already enabled directly set Threshold values */
8010 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
8011 hci_dev_unlock(hdev);
8012 BT_DBG("RSSI Enabled. Directly set Threshold");
8013 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
8017 hci_req_init(&req, hdev);
8019 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
8020 sizeof(struct hci_cp_set_enable_rssi),
8021 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
8022 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
8024 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
8025 err = hci_req_run(&req, set_rssi_enable_complete);
8028 mgmt_pending_remove(cmd);
8029 BT_ERR("Error in requesting hci_req_run");
8034 hci_dev_unlock(hdev);
8039 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8041 struct mgmt_pending_cmd *cmd;
8043 BT_DBG("status 0x%02x", status);
8047 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
8051 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8052 MGMT_STATUS_SUCCESS, &status, 1);
8054 mgmt_pending_remove(cmd);
8057 hci_dev_unlock(hdev);
8060 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
8063 struct mgmt_pending_cmd *cmd;
8064 struct hci_request req;
8065 struct mgmt_cp_get_raw_rssi *cp = data;
8066 struct hci_cp_get_raw_rssi hci_cp;
8068 struct hci_conn *conn;
8072 BT_DBG("Get Raw RSSI.");
8076 if (!lmp_le_capable(hdev)) {
8077 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8078 MGMT_STATUS_NOT_SUPPORTED);
8082 if (cp->link_type == 0x01)
8083 dest_type = LE_LINK;
8085 dest_type = ACL_LINK;
8087 /* Get LE/BREDR link handle info */
8088 conn = hci_conn_hash_lookup_ba(hdev,
8089 dest_type, &cp->bt_address);
8091 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8092 MGMT_STATUS_NOT_CONNECTED);
8095 hci_cp.conn_handle = conn->handle;
8097 if (!hdev_is_powered(hdev)) {
8098 BT_DBG("%s", hdev->name);
8099 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8100 MGMT_STATUS_NOT_POWERED);
8104 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
8105 BT_DBG("%s", hdev->name);
8106 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8111 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
8113 BT_DBG("%s", hdev->name);
8118 hci_req_init(&req, hdev);
8120 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
8121 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
8122 err = hci_req_run(&req, get_raw_rssi_complete);
8125 mgmt_pending_remove(cmd);
8126 BT_ERR("Error in requesting hci_req_run");
8130 hci_dev_unlock(hdev);
8135 void mgmt_raw_rssi_response(struct hci_dev *hdev,
8136 struct hci_cc_rp_get_raw_rssi *rp, int success)
8138 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
8139 struct hci_conn *conn;
8141 mgmt_rp.status = rp->status;
8142 mgmt_rp.rssi_dbm = rp->rssi_dbm;
8144 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
8148 bacpy(&mgmt_rp.bt_address, &conn->dst);
8149 if (conn->type == LE_LINK)
8150 mgmt_rp.link_type = 0x01;
8152 mgmt_rp.link_type = 0x00;
8154 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
8155 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
8158 static void set_disable_threshold_complete(struct hci_dev *hdev,
8159 u8 status, u16 opcode)
8161 struct mgmt_pending_cmd *cmd;
8163 BT_DBG("status 0x%02x", status);
8167 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
8171 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8172 MGMT_STATUS_SUCCESS, &status, 1);
8174 mgmt_pending_remove(cmd);
8177 hci_dev_unlock(hdev);
8180 /** Removes monitoring for a link*/
8181 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
8182 void *data, u16 len)
8185 struct hci_cp_set_rssi_threshold th = { 0, };
8186 struct mgmt_cp_disable_rssi *cp = data;
8187 struct hci_conn *conn;
8188 struct mgmt_pending_cmd *cmd;
8189 struct hci_request req;
8192 BT_DBG("Set Disable RSSI.");
8196 if (!lmp_le_capable(hdev)) {
8197 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8198 MGMT_STATUS_NOT_SUPPORTED);
8202 /* Get LE/ACL link handle info*/
8203 if (cp->link_type == 0x01)
8204 dest_type = LE_LINK;
8206 dest_type = ACL_LINK;
8208 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
8210 err = mgmt_cmd_complete(sk, hdev->id,
8211 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
8215 th.hci_le_ext_opcode = 0x0B;
8217 th.conn_handle = conn->handle;
8218 th.alert_mask = 0x00;
8220 th.in_range_th = 0x00;
8223 if (!hdev_is_powered(hdev)) {
8224 BT_DBG("%s", hdev->name);
8225 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8230 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
8231 BT_DBG("%s", hdev->name);
8232 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8237 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
8240 BT_DBG("%s", hdev->name);
8245 hci_req_init(&req, hdev);
8247 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
8248 err = hci_req_run(&req, set_disable_threshold_complete);
8250 mgmt_pending_remove(cmd);
8251 BT_ERR("Error in requesting hci_req_run");
8256 hci_dev_unlock(hdev);
8261 void mgmt_rssi_alert_evt(struct hci_dev *hdev, struct sk_buff *skb)
8263 struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
8264 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
8265 struct hci_conn *conn;
8267 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
8268 ev->conn_handle, ev->alert_type, ev->rssi_dbm);
8270 conn = hci_conn_hash_lookup_handle(hdev, ev->conn_handle);
8273 BT_ERR("RSSI alert Error: Device not found for handle");
8276 bacpy(&mgmt_ev.bdaddr, &conn->dst);
8278 if (conn->type == LE_LINK)
8279 mgmt_ev.link_type = 0x01;
8281 mgmt_ev.link_type = 0x00;
8283 mgmt_ev.alert_type = ev->alert_type;
8284 mgmt_ev.rssi_dbm = ev->rssi_dbm;
8286 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
8287 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
8290 #endif /* TIZEN_BT */
8292 static bool ltk_is_valid(struct mgmt_ltk_info *key)
8294 if (key->initiator != 0x00 && key->initiator != 0x01)
8297 switch (key->addr.type) {
8298 case BDADDR_LE_PUBLIC:
8301 case BDADDR_LE_RANDOM:
8302 /* Two most significant bits shall be set */
8303 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
8311 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
8312 void *cp_data, u16 len)
8314 struct mgmt_cp_load_long_term_keys *cp = cp_data;
8315 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
8316 sizeof(struct mgmt_ltk_info));
8317 u16 key_count, expected_len;
8320 bt_dev_dbg(hdev, "sock %p", sk);
8322 if (!lmp_le_capable(hdev))
8323 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8324 MGMT_STATUS_NOT_SUPPORTED);
8326 key_count = __le16_to_cpu(cp->key_count);
8327 if (key_count > max_key_count) {
8328 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
8330 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8331 MGMT_STATUS_INVALID_PARAMS);
8334 expected_len = struct_size(cp, keys, key_count);
8335 if (expected_len != len) {
8336 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
8338 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8339 MGMT_STATUS_INVALID_PARAMS);
8342 bt_dev_dbg(hdev, "key_count %u", key_count);
8344 for (i = 0; i < key_count; i++) {
8345 struct mgmt_ltk_info *key = &cp->keys[i];
8347 if (!ltk_is_valid(key))
8348 return mgmt_cmd_status(sk, hdev->id,
8349 MGMT_OP_LOAD_LONG_TERM_KEYS,
8350 MGMT_STATUS_INVALID_PARAMS);
8355 hci_smp_ltks_clear(hdev);
8357 for (i = 0; i < key_count; i++) {
8358 struct mgmt_ltk_info *key = &cp->keys[i];
8359 u8 type, authenticated;
8361 if (hci_is_blocked_key(hdev,
8362 HCI_BLOCKED_KEY_TYPE_LTK,
8364 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
8369 switch (key->type) {
8370 case MGMT_LTK_UNAUTHENTICATED:
8371 authenticated = 0x00;
8372 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8374 case MGMT_LTK_AUTHENTICATED:
8375 authenticated = 0x01;
8376 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8378 case MGMT_LTK_P256_UNAUTH:
8379 authenticated = 0x00;
8380 type = SMP_LTK_P256;
8382 case MGMT_LTK_P256_AUTH:
8383 authenticated = 0x01;
8384 type = SMP_LTK_P256;
8386 case MGMT_LTK_P256_DEBUG:
8387 authenticated = 0x00;
8388 type = SMP_LTK_P256_DEBUG;
8394 hci_add_ltk(hdev, &key->addr.bdaddr,
8395 le_addr_type(key->addr.type), type, authenticated,
8396 key->val, key->enc_size, key->ediv, key->rand);
8399 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
8402 hci_dev_unlock(hdev);
8407 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
8409 struct mgmt_pending_cmd *cmd = data;
8410 struct hci_conn *conn = cmd->user_data;
8411 struct mgmt_cp_get_conn_info *cp = cmd->param;
8412 struct mgmt_rp_get_conn_info rp;
8415 bt_dev_dbg(hdev, "err %d", err);
8417 memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
8419 status = mgmt_status(err);
8420 if (status == MGMT_STATUS_SUCCESS) {
8421 rp.rssi = conn->rssi;
8422 rp.tx_power = conn->tx_power;
8423 rp.max_tx_power = conn->max_tx_power;
8425 rp.rssi = HCI_RSSI_INVALID;
8426 rp.tx_power = HCI_TX_POWER_INVALID;
8427 rp.max_tx_power = HCI_TX_POWER_INVALID;
8430 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
8433 mgmt_pending_free(cmd);
8436 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
8438 struct mgmt_pending_cmd *cmd = data;
8439 struct mgmt_cp_get_conn_info *cp = cmd->param;
8440 struct hci_conn *conn;
8444 /* Make sure we are still connected */
8445 if (cp->addr.type == BDADDR_BREDR)
8446 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8449 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8451 if (!conn || conn->state != BT_CONNECTED)
8452 return MGMT_STATUS_NOT_CONNECTED;
8454 cmd->user_data = conn;
8455 handle = cpu_to_le16(conn->handle);
8457 /* Refresh RSSI each time */
8458 err = hci_read_rssi_sync(hdev, handle);
8460 /* For LE links TX power does not change thus we don't need to
8461 * query for it once value is known.
8463 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
8464 conn->tx_power == HCI_TX_POWER_INVALID))
8465 err = hci_read_tx_power_sync(hdev, handle, 0x00);
8467 /* Max TX power needs to be read only once per connection */
8468 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
8469 err = hci_read_tx_power_sync(hdev, handle, 0x01);
8474 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
8477 struct mgmt_cp_get_conn_info *cp = data;
8478 struct mgmt_rp_get_conn_info rp;
8479 struct hci_conn *conn;
8480 unsigned long conn_info_age;
8483 bt_dev_dbg(hdev, "sock %p", sk);
8485 memset(&rp, 0, sizeof(rp));
8486 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8487 rp.addr.type = cp->addr.type;
8489 if (!bdaddr_type_is_valid(cp->addr.type))
8490 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8491 MGMT_STATUS_INVALID_PARAMS,
8496 if (!hdev_is_powered(hdev)) {
8497 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8498 MGMT_STATUS_NOT_POWERED, &rp,
8503 if (cp->addr.type == BDADDR_BREDR)
8504 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8507 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8509 if (!conn || conn->state != BT_CONNECTED) {
8510 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8511 MGMT_STATUS_NOT_CONNECTED, &rp,
8516 /* To avoid client trying to guess when to poll again for information we
8517 * calculate conn info age as random value between min/max set in hdev.
8519 conn_info_age = hdev->conn_info_min_age +
8520 prandom_u32_max(hdev->conn_info_max_age -
8521 hdev->conn_info_min_age);
8523 /* Query controller to refresh cached values if they are too old or were
8526 if (time_after(jiffies, conn->conn_info_timestamp +
8527 msecs_to_jiffies(conn_info_age)) ||
8528 !conn->conn_info_timestamp) {
8529 struct mgmt_pending_cmd *cmd;
8531 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
8536 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
8537 cmd, get_conn_info_complete);
8541 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8542 MGMT_STATUS_FAILED, &rp, sizeof(rp));
8545 mgmt_pending_free(cmd);
8550 conn->conn_info_timestamp = jiffies;
8552 /* Cache is valid, just reply with values cached in hci_conn */
8553 rp.rssi = conn->rssi;
8554 rp.tx_power = conn->tx_power;
8555 rp.max_tx_power = conn->max_tx_power;
8557 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
8558 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8562 hci_dev_unlock(hdev);
8566 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
8568 struct mgmt_pending_cmd *cmd = data;
8569 struct mgmt_cp_get_clock_info *cp = cmd->param;
8570 struct mgmt_rp_get_clock_info rp;
8571 struct hci_conn *conn = cmd->user_data;
8572 u8 status = mgmt_status(err);
8574 bt_dev_dbg(hdev, "err %d", err);
8576 memset(&rp, 0, sizeof(rp));
8577 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8578 rp.addr.type = cp->addr.type;
8583 rp.local_clock = cpu_to_le32(hdev->clock);
8586 rp.piconet_clock = cpu_to_le32(conn->clock);
8587 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
8591 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
8594 mgmt_pending_free(cmd);
8597 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
8599 struct mgmt_pending_cmd *cmd = data;
8600 struct mgmt_cp_get_clock_info *cp = cmd->param;
8601 struct hci_cp_read_clock hci_cp;
8602 struct hci_conn *conn;
8604 memset(&hci_cp, 0, sizeof(hci_cp));
8605 hci_read_clock_sync(hdev, &hci_cp);
8607 /* Make sure connection still exists */
8608 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
8609 if (!conn || conn->state != BT_CONNECTED)
8610 return MGMT_STATUS_NOT_CONNECTED;
8612 cmd->user_data = conn;
8613 hci_cp.handle = cpu_to_le16(conn->handle);
8614 hci_cp.which = 0x01; /* Piconet clock */
8616 return hci_read_clock_sync(hdev, &hci_cp);
8619 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
8622 struct mgmt_cp_get_clock_info *cp = data;
8623 struct mgmt_rp_get_clock_info rp;
8624 struct mgmt_pending_cmd *cmd;
8625 struct hci_conn *conn;
8628 bt_dev_dbg(hdev, "sock %p", sk);
8630 memset(&rp, 0, sizeof(rp));
8631 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8632 rp.addr.type = cp->addr.type;
8634 if (cp->addr.type != BDADDR_BREDR)
8635 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8636 MGMT_STATUS_INVALID_PARAMS,
8641 if (!hdev_is_powered(hdev)) {
8642 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8643 MGMT_STATUS_NOT_POWERED, &rp,
8648 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8649 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8651 if (!conn || conn->state != BT_CONNECTED) {
8652 err = mgmt_cmd_complete(sk, hdev->id,
8653 MGMT_OP_GET_CLOCK_INFO,
8654 MGMT_STATUS_NOT_CONNECTED,
8662 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
8666 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
8667 get_clock_info_complete);
8670 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8671 MGMT_STATUS_FAILED, &rp, sizeof(rp));
8674 mgmt_pending_free(cmd);
8679 hci_dev_unlock(hdev);
8683 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
8685 struct hci_conn *conn;
8687 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
8691 if (conn->dst_type != type)
8694 if (conn->state != BT_CONNECTED)
8700 /* This function requires the caller holds hdev->lock */
8701 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
8702 u8 addr_type, u8 auto_connect)
8704 struct hci_conn_params *params;
8706 params = hci_conn_params_add(hdev, addr, addr_type);
8710 if (params->auto_connect == auto_connect)
8713 list_del_init(¶ms->action);
8715 switch (auto_connect) {
8716 case HCI_AUTO_CONN_DISABLED:
8717 case HCI_AUTO_CONN_LINK_LOSS:
8718 /* If auto connect is being disabled when we're trying to
8719 * connect to device, keep connecting.
8721 if (params->explicit_connect)
8722 list_add(¶ms->action, &hdev->pend_le_conns);
8724 case HCI_AUTO_CONN_REPORT:
8725 if (params->explicit_connect)
8726 list_add(¶ms->action, &hdev->pend_le_conns);
8728 list_add(¶ms->action, &hdev->pend_le_reports);
8730 case HCI_AUTO_CONN_DIRECT:
8731 case HCI_AUTO_CONN_ALWAYS:
8732 if (!is_connected(hdev, addr, addr_type))
8733 list_add(¶ms->action, &hdev->pend_le_conns);
8737 params->auto_connect = auto_connect;
8739 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
8740 addr, addr_type, auto_connect);
8745 static void device_added(struct sock *sk, struct hci_dev *hdev,
8746 bdaddr_t *bdaddr, u8 type, u8 action)
8748 struct mgmt_ev_device_added ev;
8750 bacpy(&ev.addr.bdaddr, bdaddr);
8751 ev.addr.type = type;
8754 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
8757 static int add_device_sync(struct hci_dev *hdev, void *data)
8759 return hci_update_passive_scan_sync(hdev);
8762 static int add_device(struct sock *sk, struct hci_dev *hdev,
8763 void *data, u16 len)
8765 struct mgmt_cp_add_device *cp = data;
8766 u8 auto_conn, addr_type;
8767 struct hci_conn_params *params;
8769 u32 current_flags = 0;
8770 u32 supported_flags;
8772 bt_dev_dbg(hdev, "sock %p", sk);
8774 if (!bdaddr_type_is_valid(cp->addr.type) ||
8775 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
8776 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8777 MGMT_STATUS_INVALID_PARAMS,
8778 &cp->addr, sizeof(cp->addr));
8780 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
8781 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8782 MGMT_STATUS_INVALID_PARAMS,
8783 &cp->addr, sizeof(cp->addr));
8787 if (cp->addr.type == BDADDR_BREDR) {
8788 /* Only incoming connections action is supported for now */
8789 if (cp->action != 0x01) {
8790 err = mgmt_cmd_complete(sk, hdev->id,
8792 MGMT_STATUS_INVALID_PARAMS,
8793 &cp->addr, sizeof(cp->addr));
8797 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
8803 hci_update_scan(hdev);
8808 addr_type = le_addr_type(cp->addr.type);
8810 if (cp->action == 0x02)
8811 auto_conn = HCI_AUTO_CONN_ALWAYS;
8812 else if (cp->action == 0x01)
8813 auto_conn = HCI_AUTO_CONN_DIRECT;
8815 auto_conn = HCI_AUTO_CONN_REPORT;
8817 /* Kernel internally uses conn_params with resolvable private
8818 * address, but Add Device allows only identity addresses.
8819 * Make sure it is enforced before calling
8820 * hci_conn_params_lookup.
8822 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8823 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8824 MGMT_STATUS_INVALID_PARAMS,
8825 &cp->addr, sizeof(cp->addr));
8829 /* If the connection parameters don't exist for this device,
8830 * they will be created and configured with defaults.
8832 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
8834 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8835 MGMT_STATUS_FAILED, &cp->addr,
8839 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8842 current_flags = params->flags;
8845 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
8850 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
8851 supported_flags = hdev->conn_flags;
8852 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
8853 supported_flags, current_flags);
8855 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8856 MGMT_STATUS_SUCCESS, &cp->addr,
8860 hci_dev_unlock(hdev);
8864 static void device_removed(struct sock *sk, struct hci_dev *hdev,
8865 bdaddr_t *bdaddr, u8 type)
8867 struct mgmt_ev_device_removed ev;
8869 bacpy(&ev.addr.bdaddr, bdaddr);
8870 ev.addr.type = type;
8872 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
8875 static int remove_device_sync(struct hci_dev *hdev, void *data)
8877 return hci_update_passive_scan_sync(hdev);
8880 static int remove_device(struct sock *sk, struct hci_dev *hdev,
8881 void *data, u16 len)
8883 struct mgmt_cp_remove_device *cp = data;
8886 bt_dev_dbg(hdev, "sock %p", sk);
8890 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8891 struct hci_conn_params *params;
8894 if (!bdaddr_type_is_valid(cp->addr.type)) {
8895 err = mgmt_cmd_complete(sk, hdev->id,
8896 MGMT_OP_REMOVE_DEVICE,
8897 MGMT_STATUS_INVALID_PARAMS,
8898 &cp->addr, sizeof(cp->addr));
8902 if (cp->addr.type == BDADDR_BREDR) {
8903 err = hci_bdaddr_list_del(&hdev->accept_list,
8907 err = mgmt_cmd_complete(sk, hdev->id,
8908 MGMT_OP_REMOVE_DEVICE,
8909 MGMT_STATUS_INVALID_PARAMS,
8915 hci_update_scan(hdev);
8917 device_removed(sk, hdev, &cp->addr.bdaddr,
8922 addr_type = le_addr_type(cp->addr.type);
8924 /* Kernel internally uses conn_params with resolvable private
8925 * address, but Remove Device allows only identity addresses.
8926 * Make sure it is enforced before calling
8927 * hci_conn_params_lookup.
8929 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8930 err = mgmt_cmd_complete(sk, hdev->id,
8931 MGMT_OP_REMOVE_DEVICE,
8932 MGMT_STATUS_INVALID_PARAMS,
8933 &cp->addr, sizeof(cp->addr));
8937 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8940 err = mgmt_cmd_complete(sk, hdev->id,
8941 MGMT_OP_REMOVE_DEVICE,
8942 MGMT_STATUS_INVALID_PARAMS,
8943 &cp->addr, sizeof(cp->addr));
8947 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
8948 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
8949 err = mgmt_cmd_complete(sk, hdev->id,
8950 MGMT_OP_REMOVE_DEVICE,
8951 MGMT_STATUS_INVALID_PARAMS,
8952 &cp->addr, sizeof(cp->addr));
8956 list_del(¶ms->action);
8957 list_del(¶ms->list);
8960 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
8962 struct hci_conn_params *p, *tmp;
8963 struct bdaddr_list *b, *btmp;
8965 if (cp->addr.type) {
8966 err = mgmt_cmd_complete(sk, hdev->id,
8967 MGMT_OP_REMOVE_DEVICE,
8968 MGMT_STATUS_INVALID_PARAMS,
8969 &cp->addr, sizeof(cp->addr));
8973 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
8974 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
8979 hci_update_scan(hdev);
8981 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
8982 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
8984 device_removed(sk, hdev, &p->addr, p->addr_type);
8985 if (p->explicit_connect) {
8986 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
8989 list_del(&p->action);
8994 bt_dev_dbg(hdev, "All LE connection parameters were removed");
8997 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
9000 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
9001 MGMT_STATUS_SUCCESS, &cp->addr,
9004 hci_dev_unlock(hdev);
9008 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
9011 struct mgmt_cp_load_conn_param *cp = data;
9012 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
9013 sizeof(struct mgmt_conn_param));
9014 u16 param_count, expected_len;
9017 if (!lmp_le_capable(hdev))
9018 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9019 MGMT_STATUS_NOT_SUPPORTED);
9021 param_count = __le16_to_cpu(cp->param_count);
9022 if (param_count > max_param_count) {
9023 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
9025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9026 MGMT_STATUS_INVALID_PARAMS);
9029 expected_len = struct_size(cp, params, param_count);
9030 if (expected_len != len) {
9031 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
9033 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9034 MGMT_STATUS_INVALID_PARAMS);
9037 bt_dev_dbg(hdev, "param_count %u", param_count);
9041 hci_conn_params_clear_disabled(hdev);
9043 for (i = 0; i < param_count; i++) {
9044 struct mgmt_conn_param *param = &cp->params[i];
9045 struct hci_conn_params *hci_param;
9046 u16 min, max, latency, timeout;
9049 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
9052 if (param->addr.type == BDADDR_LE_PUBLIC) {
9053 addr_type = ADDR_LE_DEV_PUBLIC;
9054 } else if (param->addr.type == BDADDR_LE_RANDOM) {
9055 addr_type = ADDR_LE_DEV_RANDOM;
9057 bt_dev_err(hdev, "ignoring invalid connection parameters");
9061 min = le16_to_cpu(param->min_interval);
9062 max = le16_to_cpu(param->max_interval);
9063 latency = le16_to_cpu(param->latency);
9064 timeout = le16_to_cpu(param->timeout);
9066 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
9067 min, max, latency, timeout);
9069 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
9070 bt_dev_err(hdev, "ignoring invalid connection parameters");
9074 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
9077 bt_dev_err(hdev, "failed to add connection parameters");
9081 hci_param->conn_min_interval = min;
9082 hci_param->conn_max_interval = max;
9083 hci_param->conn_latency = latency;
9084 hci_param->supervision_timeout = timeout;
9087 hci_dev_unlock(hdev);
9089 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
9093 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
9094 void *data, u16 len)
9096 struct mgmt_cp_set_external_config *cp = data;
9100 bt_dev_dbg(hdev, "sock %p", sk);
9102 if (hdev_is_powered(hdev))
9103 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9104 MGMT_STATUS_REJECTED);
9106 if (cp->config != 0x00 && cp->config != 0x01)
9107 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9108 MGMT_STATUS_INVALID_PARAMS);
9110 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
9111 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9112 MGMT_STATUS_NOT_SUPPORTED);
9117 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
9119 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
9121 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
9128 err = new_options(hdev, sk);
9130 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
9131 mgmt_index_removed(hdev);
9133 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
9134 hci_dev_set_flag(hdev, HCI_CONFIG);
9135 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9137 queue_work(hdev->req_workqueue, &hdev->power_on);
9139 set_bit(HCI_RAW, &hdev->flags);
9140 mgmt_index_added(hdev);
9145 hci_dev_unlock(hdev);
9149 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
9150 void *data, u16 len)
9152 struct mgmt_cp_set_public_address *cp = data;
9156 bt_dev_dbg(hdev, "sock %p", sk);
9158 if (hdev_is_powered(hdev))
9159 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9160 MGMT_STATUS_REJECTED);
9162 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
9163 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9164 MGMT_STATUS_INVALID_PARAMS);
9166 if (!hdev->set_bdaddr)
9167 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9168 MGMT_STATUS_NOT_SUPPORTED);
9172 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
9173 bacpy(&hdev->public_addr, &cp->bdaddr);
9175 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
9182 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
9183 err = new_options(hdev, sk);
9185 if (is_configured(hdev)) {
9186 mgmt_index_removed(hdev);
9188 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
9190 hci_dev_set_flag(hdev, HCI_CONFIG);
9191 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9193 queue_work(hdev->req_workqueue, &hdev->power_on);
9197 hci_dev_unlock(hdev);
9201 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
9204 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
9205 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
9206 u8 *h192, *r192, *h256, *r256;
9207 struct mgmt_pending_cmd *cmd = data;
9208 struct sk_buff *skb = cmd->skb;
9209 u8 status = mgmt_status(err);
9212 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
9217 status = MGMT_STATUS_FAILED;
9218 else if (IS_ERR(skb))
9219 status = mgmt_status(PTR_ERR(skb));
9221 status = mgmt_status(skb->data[0]);
9224 bt_dev_dbg(hdev, "status %u", status);
9226 mgmt_cp = cmd->param;
9229 status = mgmt_status(status);
9236 } else if (!bredr_sc_enabled(hdev)) {
9237 struct hci_rp_read_local_oob_data *rp;
9239 if (skb->len != sizeof(*rp)) {
9240 status = MGMT_STATUS_FAILED;
9243 status = MGMT_STATUS_SUCCESS;
9244 rp = (void *)skb->data;
9246 eir_len = 5 + 18 + 18;
9253 struct hci_rp_read_local_oob_ext_data *rp;
9255 if (skb->len != sizeof(*rp)) {
9256 status = MGMT_STATUS_FAILED;
9259 status = MGMT_STATUS_SUCCESS;
9260 rp = (void *)skb->data;
9262 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
9263 eir_len = 5 + 18 + 18;
9267 eir_len = 5 + 18 + 18 + 18 + 18;
9277 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
9284 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
9285 hdev->dev_class, 3);
9288 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9289 EIR_SSP_HASH_C192, h192, 16);
9290 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9291 EIR_SSP_RAND_R192, r192, 16);
9295 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9296 EIR_SSP_HASH_C256, h256, 16);
9297 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9298 EIR_SSP_RAND_R256, r256, 16);
9302 mgmt_rp->type = mgmt_cp->type;
9303 mgmt_rp->eir_len = cpu_to_le16(eir_len);
9305 err = mgmt_cmd_complete(cmd->sk, hdev->id,
9306 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
9307 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
9308 if (err < 0 || status)
9311 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
9313 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9314 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
9315 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
9317 if (skb && !IS_ERR(skb))
9321 mgmt_pending_remove(cmd);
9324 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
9325 struct mgmt_cp_read_local_oob_ext_data *cp)
9327 struct mgmt_pending_cmd *cmd;
9330 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
9335 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
9336 read_local_oob_ext_data_complete);
9339 mgmt_pending_remove(cmd);
9346 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
9347 void *data, u16 data_len)
9349 struct mgmt_cp_read_local_oob_ext_data *cp = data;
9350 struct mgmt_rp_read_local_oob_ext_data *rp;
9353 u8 status, flags, role, addr[7], hash[16], rand[16];
9356 bt_dev_dbg(hdev, "sock %p", sk);
9358 if (hdev_is_powered(hdev)) {
9360 case BIT(BDADDR_BREDR):
9361 status = mgmt_bredr_support(hdev);
9367 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9368 status = mgmt_le_support(hdev);
9372 eir_len = 9 + 3 + 18 + 18 + 3;
9375 status = MGMT_STATUS_INVALID_PARAMS;
9380 status = MGMT_STATUS_NOT_POWERED;
9384 rp_len = sizeof(*rp) + eir_len;
9385 rp = kmalloc(rp_len, GFP_ATOMIC);
9389 if (!status && !lmp_ssp_capable(hdev)) {
9390 status = MGMT_STATUS_NOT_SUPPORTED;
9401 case BIT(BDADDR_BREDR):
9402 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9403 err = read_local_ssp_oob_req(hdev, sk, cp);
9404 hci_dev_unlock(hdev);
9408 status = MGMT_STATUS_FAILED;
9411 eir_len = eir_append_data(rp->eir, eir_len,
9413 hdev->dev_class, 3);
9416 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9417 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
9418 smp_generate_oob(hdev, hash, rand) < 0) {
9419 hci_dev_unlock(hdev);
9420 status = MGMT_STATUS_FAILED;
9424 /* This should return the active RPA, but since the RPA
9425 * is only programmed on demand, it is really hard to fill
9426 * this in at the moment. For now disallow retrieving
9427 * local out-of-band data when privacy is in use.
9429 * Returning the identity address will not help here since
9430 * pairing happens before the identity resolving key is
9431 * known and thus the connection establishment happens
9432 * based on the RPA and not the identity address.
9434 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
9435 hci_dev_unlock(hdev);
9436 status = MGMT_STATUS_REJECTED;
9440 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
9441 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
9442 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
9443 bacmp(&hdev->static_addr, BDADDR_ANY))) {
9444 memcpy(addr, &hdev->static_addr, 6);
9447 memcpy(addr, &hdev->bdaddr, 6);
9451 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
9452 addr, sizeof(addr));
9454 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
9459 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
9460 &role, sizeof(role));
9462 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
9463 eir_len = eir_append_data(rp->eir, eir_len,
9465 hash, sizeof(hash));
9467 eir_len = eir_append_data(rp->eir, eir_len,
9469 rand, sizeof(rand));
9472 flags = mgmt_get_adv_discov_flags(hdev);
9474 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
9475 flags |= LE_AD_NO_BREDR;
9477 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
9478 &flags, sizeof(flags));
9482 hci_dev_unlock(hdev);
9484 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
9486 status = MGMT_STATUS_SUCCESS;
9489 rp->type = cp->type;
9490 rp->eir_len = cpu_to_le16(eir_len);
9492 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
9493 status, rp, sizeof(*rp) + eir_len);
9494 if (err < 0 || status)
9497 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9498 rp, sizeof(*rp) + eir_len,
9499 HCI_MGMT_OOB_DATA_EVENTS, sk);
9507 static u32 get_supported_adv_flags(struct hci_dev *hdev)
9511 flags |= MGMT_ADV_FLAG_CONNECTABLE;
9512 flags |= MGMT_ADV_FLAG_DISCOV;
9513 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
9514 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
9515 flags |= MGMT_ADV_FLAG_APPEARANCE;
9516 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
9517 flags |= MGMT_ADV_PARAM_DURATION;
9518 flags |= MGMT_ADV_PARAM_TIMEOUT;
9519 flags |= MGMT_ADV_PARAM_INTERVALS;
9520 flags |= MGMT_ADV_PARAM_TX_POWER;
9521 flags |= MGMT_ADV_PARAM_SCAN_RSP;
9523 /* In extended adv TX_POWER returned from Set Adv Param
9524 * will be always valid.
9526 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
9527 flags |= MGMT_ADV_FLAG_TX_POWER;
9529 if (ext_adv_capable(hdev)) {
9530 flags |= MGMT_ADV_FLAG_SEC_1M;
9531 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
9532 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
9534 if (hdev->le_features[1] & HCI_LE_PHY_2M)
9535 flags |= MGMT_ADV_FLAG_SEC_2M;
9537 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
9538 flags |= MGMT_ADV_FLAG_SEC_CODED;
9544 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
9545 void *data, u16 data_len)
9547 struct mgmt_rp_read_adv_features *rp;
9550 struct adv_info *adv_instance;
9551 u32 supported_flags;
9554 bt_dev_dbg(hdev, "sock %p", sk);
9556 if (!lmp_le_capable(hdev))
9557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9558 MGMT_STATUS_REJECTED);
9562 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
9563 rp = kmalloc(rp_len, GFP_ATOMIC);
9565 hci_dev_unlock(hdev);
9569 supported_flags = get_supported_adv_flags(hdev);
9571 rp->supported_flags = cpu_to_le32(supported_flags);
9572 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
9573 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
9574 rp->max_instances = hdev->le_num_of_adv_sets;
9575 rp->num_instances = hdev->adv_instance_cnt;
9577 instance = rp->instance;
9578 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
9579 /* Only instances 1-le_num_of_adv_sets are externally visible */
9580 if (adv_instance->instance <= hdev->adv_instance_cnt) {
9581 *instance = adv_instance->instance;
9584 rp->num_instances--;
9589 hci_dev_unlock(hdev);
9591 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9592 MGMT_STATUS_SUCCESS, rp, rp_len);
9599 static u8 calculate_name_len(struct hci_dev *hdev)
9601 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
9603 return eir_append_local_name(hdev, buf, 0);
9606 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
9609 u8 max_len = HCI_MAX_AD_LENGTH;
9612 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
9613 MGMT_ADV_FLAG_LIMITED_DISCOV |
9614 MGMT_ADV_FLAG_MANAGED_FLAGS))
9617 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
9620 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
9621 max_len -= calculate_name_len(hdev);
9623 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
9630 static bool flags_managed(u32 adv_flags)
9632 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
9633 MGMT_ADV_FLAG_LIMITED_DISCOV |
9634 MGMT_ADV_FLAG_MANAGED_FLAGS);
9637 static bool tx_power_managed(u32 adv_flags)
9639 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
9642 static bool name_managed(u32 adv_flags)
9644 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
9647 static bool appearance_managed(u32 adv_flags)
9649 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
9652 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
9653 u8 len, bool is_adv_data)
9658 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
9663 /* Make sure that the data is correctly formatted. */
9664 for (i = 0; i < len; i += (cur_len + 1)) {
9670 if (data[i + 1] == EIR_FLAGS &&
9671 (!is_adv_data || flags_managed(adv_flags)))
9674 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
9677 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
9680 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
9683 if (data[i + 1] == EIR_APPEARANCE &&
9684 appearance_managed(adv_flags))
9687 /* If the current field length would exceed the total data
9688 * length, then it's invalid.
9690 if (i + cur_len >= len)
9697 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
9699 u32 supported_flags, phy_flags;
9701 /* The current implementation only supports a subset of the specified
9702 * flags. Also need to check mutual exclusiveness of sec flags.
9704 supported_flags = get_supported_adv_flags(hdev);
9705 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
9706 if (adv_flags & ~supported_flags ||
9707 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
9713 static bool adv_busy(struct hci_dev *hdev)
9715 return pending_find(MGMT_OP_SET_LE, hdev);
9718 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
9721 struct adv_info *adv, *n;
9723 bt_dev_dbg(hdev, "err %d", err);
9727 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
9734 adv->pending = false;
9738 instance = adv->instance;
9740 if (hdev->cur_adv_instance == instance)
9741 cancel_adv_timeout(hdev);
9743 hci_remove_adv_instance(hdev, instance);
9744 mgmt_advertising_removed(sk, hdev, instance);
9747 hci_dev_unlock(hdev);
9750 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
9752 struct mgmt_pending_cmd *cmd = data;
9753 struct mgmt_cp_add_advertising *cp = cmd->param;
9754 struct mgmt_rp_add_advertising rp;
9756 memset(&rp, 0, sizeof(rp));
9758 rp.instance = cp->instance;
9761 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9764 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9765 mgmt_status(err), &rp, sizeof(rp));
9767 add_adv_complete(hdev, cmd->sk, cp->instance, err);
9769 mgmt_pending_free(cmd);
9772 static int add_advertising_sync(struct hci_dev *hdev, void *data)
9774 struct mgmt_pending_cmd *cmd = data;
9775 struct mgmt_cp_add_advertising *cp = cmd->param;
9777 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9780 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
9781 void *data, u16 data_len)
9783 struct mgmt_cp_add_advertising *cp = data;
9784 struct mgmt_rp_add_advertising rp;
9787 u16 timeout, duration;
9788 unsigned int prev_instance_cnt;
9789 u8 schedule_instance = 0;
9790 struct adv_info *adv, *next_instance;
9792 struct mgmt_pending_cmd *cmd;
9794 bt_dev_dbg(hdev, "sock %p", sk);
9796 status = mgmt_le_support(hdev);
9798 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9801 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9803 MGMT_STATUS_INVALID_PARAMS);
9805 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
9806 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9807 MGMT_STATUS_INVALID_PARAMS);
9809 flags = __le32_to_cpu(cp->flags);
9810 timeout = __le16_to_cpu(cp->timeout);
9811 duration = __le16_to_cpu(cp->duration);
9813 if (!requested_adv_flags_are_valid(hdev, flags))
9814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9815 MGMT_STATUS_INVALID_PARAMS);
9819 if (timeout && !hdev_is_powered(hdev)) {
9820 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9821 MGMT_STATUS_REJECTED);
9825 if (adv_busy(hdev)) {
9826 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9831 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
9832 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
9833 cp->scan_rsp_len, false)) {
9834 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9835 MGMT_STATUS_INVALID_PARAMS);
9839 prev_instance_cnt = hdev->adv_instance_cnt;
9841 adv = hci_add_adv_instance(hdev, cp->instance, flags,
9842 cp->adv_data_len, cp->data,
9844 cp->data + cp->adv_data_len,
9846 HCI_ADV_TX_POWER_NO_PREFERENCE,
9847 hdev->le_adv_min_interval,
9848 hdev->le_adv_max_interval, 0);
9850 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9851 MGMT_STATUS_FAILED);
9855 /* Only trigger an advertising added event if a new instance was
9858 if (hdev->adv_instance_cnt > prev_instance_cnt)
9859 mgmt_advertising_added(sk, hdev, cp->instance);
9861 if (hdev->cur_adv_instance == cp->instance) {
9862 /* If the currently advertised instance is being changed then
9863 * cancel the current advertising and schedule the next
9864 * instance. If there is only one instance then the overridden
9865 * advertising data will be visible right away.
9867 cancel_adv_timeout(hdev);
9869 next_instance = hci_get_next_instance(hdev, cp->instance);
9871 schedule_instance = next_instance->instance;
9872 } else if (!hdev->adv_instance_timeout) {
9873 /* Immediately advertise the new instance if no other
9874 * instance is currently being advertised.
9876 schedule_instance = cp->instance;
9879 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
9880 * there is no instance to be advertised then we have no HCI
9881 * communication to make. Simply return.
9883 if (!hdev_is_powered(hdev) ||
9884 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
9885 !schedule_instance) {
9886 rp.instance = cp->instance;
9887 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9888 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9892 /* We're good to go, update advertising data, parameters, and start
9895 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
9902 cp->instance = schedule_instance;
9904 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
9905 add_advertising_complete);
9907 mgmt_pending_free(cmd);
9910 hci_dev_unlock(hdev);
9915 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
9918 struct mgmt_pending_cmd *cmd = data;
9919 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
9920 struct mgmt_rp_add_ext_adv_params rp;
9921 struct adv_info *adv;
9924 BT_DBG("%s", hdev->name);
9928 adv = hci_find_adv_instance(hdev, cp->instance);
9932 rp.instance = cp->instance;
9933 rp.tx_power = adv->tx_power;
9935 /* While we're at it, inform userspace of the available space for this
9936 * advertisement, given the flags that will be used.
9938 flags = __le32_to_cpu(cp->flags);
9939 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9940 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9943 /* If this advertisement was previously advertising and we
9944 * failed to update it, we signal that it has been removed and
9945 * delete its structure
9948 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
9950 hci_remove_adv_instance(hdev, cp->instance);
9952 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9955 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9956 mgmt_status(err), &rp, sizeof(rp));
9961 mgmt_pending_free(cmd);
9963 hci_dev_unlock(hdev);
9966 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
9968 struct mgmt_pending_cmd *cmd = data;
9969 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
9971 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
9974 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
9975 void *data, u16 data_len)
9977 struct mgmt_cp_add_ext_adv_params *cp = data;
9978 struct mgmt_rp_add_ext_adv_params rp;
9979 struct mgmt_pending_cmd *cmd = NULL;
9980 struct adv_info *adv;
9981 u32 flags, min_interval, max_interval;
9982 u16 timeout, duration;
9987 BT_DBG("%s", hdev->name);
9989 status = mgmt_le_support(hdev);
9991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9994 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9995 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9996 MGMT_STATUS_INVALID_PARAMS);
9998 /* The purpose of breaking add_advertising into two separate MGMT calls
9999 * for params and data is to allow more parameters to be added to this
10000 * structure in the future. For this reason, we verify that we have the
10001 * bare minimum structure we know of when the interface was defined. Any
10002 * extra parameters we don't know about will be ignored in this request.
10004 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
10005 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10006 MGMT_STATUS_INVALID_PARAMS);
10008 flags = __le32_to_cpu(cp->flags);
10010 if (!requested_adv_flags_are_valid(hdev, flags))
10011 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10012 MGMT_STATUS_INVALID_PARAMS);
10014 hci_dev_lock(hdev);
10016 /* In new interface, we require that we are powered to register */
10017 if (!hdev_is_powered(hdev)) {
10018 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10019 MGMT_STATUS_REJECTED);
10023 if (adv_busy(hdev)) {
10024 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10029 /* Parse defined parameters from request, use defaults otherwise */
10030 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
10031 __le16_to_cpu(cp->timeout) : 0;
10033 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
10034 __le16_to_cpu(cp->duration) :
10035 hdev->def_multi_adv_rotation_duration;
10037 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10038 __le32_to_cpu(cp->min_interval) :
10039 hdev->le_adv_min_interval;
10041 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10042 __le32_to_cpu(cp->max_interval) :
10043 hdev->le_adv_max_interval;
10045 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
10047 HCI_ADV_TX_POWER_NO_PREFERENCE;
10049 /* Create advertising instance with no advertising or response data */
10050 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
10051 timeout, duration, tx_power, min_interval,
10055 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10056 MGMT_STATUS_FAILED);
10060 /* Submit request for advertising params if ext adv available */
10061 if (ext_adv_capable(hdev)) {
10062 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
10066 hci_remove_adv_instance(hdev, cp->instance);
10070 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
10071 add_ext_adv_params_complete);
10073 mgmt_pending_free(cmd);
10075 rp.instance = cp->instance;
10076 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
10077 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10078 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10079 err = mgmt_cmd_complete(sk, hdev->id,
10080 MGMT_OP_ADD_EXT_ADV_PARAMS,
10081 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10085 hci_dev_unlock(hdev);
10090 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
10092 struct mgmt_pending_cmd *cmd = data;
10093 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
10094 struct mgmt_rp_add_advertising rp;
10096 add_adv_complete(hdev, cmd->sk, cp->instance, err);
10098 memset(&rp, 0, sizeof(rp));
10100 rp.instance = cp->instance;
10103 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10106 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10107 mgmt_status(err), &rp, sizeof(rp));
10109 mgmt_pending_free(cmd);
10112 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
10114 struct mgmt_pending_cmd *cmd = data;
10115 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
10118 if (ext_adv_capable(hdev)) {
10119 err = hci_update_adv_data_sync(hdev, cp->instance);
10123 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
10127 return hci_enable_ext_advertising_sync(hdev, cp->instance);
10130 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
10133 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
10136 struct mgmt_cp_add_ext_adv_data *cp = data;
10137 struct mgmt_rp_add_ext_adv_data rp;
10138 u8 schedule_instance = 0;
10139 struct adv_info *next_instance;
10140 struct adv_info *adv_instance;
10142 struct mgmt_pending_cmd *cmd;
10144 BT_DBG("%s", hdev->name);
10146 hci_dev_lock(hdev);
10148 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10150 if (!adv_instance) {
10151 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10152 MGMT_STATUS_INVALID_PARAMS);
10156 /* In new interface, we require that we are powered to register */
10157 if (!hdev_is_powered(hdev)) {
10158 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10159 MGMT_STATUS_REJECTED);
10160 goto clear_new_instance;
10163 if (adv_busy(hdev)) {
10164 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10166 goto clear_new_instance;
10169 /* Validate new data */
10170 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
10171 cp->adv_data_len, true) ||
10172 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
10173 cp->adv_data_len, cp->scan_rsp_len, false)) {
10174 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10175 MGMT_STATUS_INVALID_PARAMS);
10176 goto clear_new_instance;
10179 /* Set the data in the advertising instance */
10180 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
10181 cp->data, cp->scan_rsp_len,
10182 cp->data + cp->adv_data_len);
10184 /* If using software rotation, determine next instance to use */
10185 if (hdev->cur_adv_instance == cp->instance) {
10186 /* If the currently advertised instance is being changed
10187 * then cancel the current advertising and schedule the
10188 * next instance. If there is only one instance then the
10189 * overridden advertising data will be visible right
10192 cancel_adv_timeout(hdev);
10194 next_instance = hci_get_next_instance(hdev, cp->instance);
10196 schedule_instance = next_instance->instance;
10197 } else if (!hdev->adv_instance_timeout) {
10198 /* Immediately advertise the new instance if no other
10199 * instance is currently being advertised.
10201 schedule_instance = cp->instance;
10204 /* If the HCI_ADVERTISING flag is set or there is no instance to
10205 * be advertised then we have no HCI communication to make.
10208 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
10209 if (adv_instance->pending) {
10210 mgmt_advertising_added(sk, hdev, cp->instance);
10211 adv_instance->pending = false;
10213 rp.instance = cp->instance;
10214 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10215 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10219 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
10223 goto clear_new_instance;
10226 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
10227 add_ext_adv_data_complete);
10229 mgmt_pending_free(cmd);
10230 goto clear_new_instance;
10233 /* We were successful in updating data, so trigger advertising_added
10234 * event if this is an instance that wasn't previously advertising. If
10235 * a failure occurs in the requests we initiated, we will remove the
10236 * instance again in add_advertising_complete
10238 if (adv_instance->pending)
10239 mgmt_advertising_added(sk, hdev, cp->instance);
10243 clear_new_instance:
10244 hci_remove_adv_instance(hdev, cp->instance);
10247 hci_dev_unlock(hdev);
10252 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
10255 struct mgmt_pending_cmd *cmd = data;
10256 struct mgmt_cp_remove_advertising *cp = cmd->param;
10257 struct mgmt_rp_remove_advertising rp;
10259 bt_dev_dbg(hdev, "err %d", err);
10261 memset(&rp, 0, sizeof(rp));
10262 rp.instance = cp->instance;
10265 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10268 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10269 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10271 mgmt_pending_free(cmd);
10274 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
10276 struct mgmt_pending_cmd *cmd = data;
10277 struct mgmt_cp_remove_advertising *cp = cmd->param;
10280 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
10284 if (list_empty(&hdev->adv_instances))
10285 err = hci_disable_advertising_sync(hdev);
10290 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
10291 void *data, u16 data_len)
10293 struct mgmt_cp_remove_advertising *cp = data;
10294 struct mgmt_pending_cmd *cmd;
10297 bt_dev_dbg(hdev, "sock %p", sk);
10299 hci_dev_lock(hdev);
10301 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
10302 err = mgmt_cmd_status(sk, hdev->id,
10303 MGMT_OP_REMOVE_ADVERTISING,
10304 MGMT_STATUS_INVALID_PARAMS);
10308 if (pending_find(MGMT_OP_SET_LE, hdev)) {
10309 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10314 if (list_empty(&hdev->adv_instances)) {
10315 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10316 MGMT_STATUS_INVALID_PARAMS);
10320 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
10327 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
10328 remove_advertising_complete);
10330 mgmt_pending_free(cmd);
10333 hci_dev_unlock(hdev);
10338 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
10339 void *data, u16 data_len)
10341 struct mgmt_cp_get_adv_size_info *cp = data;
10342 struct mgmt_rp_get_adv_size_info rp;
10343 u32 flags, supported_flags;
10345 bt_dev_dbg(hdev, "sock %p", sk);
10347 if (!lmp_le_capable(hdev))
10348 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10349 MGMT_STATUS_REJECTED);
10351 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10352 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10353 MGMT_STATUS_INVALID_PARAMS);
10355 flags = __le32_to_cpu(cp->flags);
10357 /* The current implementation only supports a subset of the specified
10360 supported_flags = get_supported_adv_flags(hdev);
10361 if (flags & ~supported_flags)
10362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10363 MGMT_STATUS_INVALID_PARAMS);
10365 rp.instance = cp->instance;
10366 rp.flags = cp->flags;
10367 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10368 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10370 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10371 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10374 static const struct hci_mgmt_handler mgmt_handlers[] = {
10375 { NULL }, /* 0x0000 (no command) */
10376 { read_version, MGMT_READ_VERSION_SIZE,
10378 HCI_MGMT_UNTRUSTED },
10379 { read_commands, MGMT_READ_COMMANDS_SIZE,
10381 HCI_MGMT_UNTRUSTED },
10382 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
10384 HCI_MGMT_UNTRUSTED },
10385 { read_controller_info, MGMT_READ_INFO_SIZE,
10386 HCI_MGMT_UNTRUSTED },
10387 { set_powered, MGMT_SETTING_SIZE },
10388 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
10389 { set_connectable, MGMT_SETTING_SIZE },
10390 { set_fast_connectable, MGMT_SETTING_SIZE },
10391 { set_bondable, MGMT_SETTING_SIZE },
10392 { set_link_security, MGMT_SETTING_SIZE },
10393 { set_ssp, MGMT_SETTING_SIZE },
10394 { set_hs, MGMT_SETTING_SIZE },
10395 { set_le, MGMT_SETTING_SIZE },
10396 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
10397 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
10398 { add_uuid, MGMT_ADD_UUID_SIZE },
10399 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
10400 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
10401 HCI_MGMT_VAR_LEN },
10402 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
10403 HCI_MGMT_VAR_LEN },
10404 { disconnect, MGMT_DISCONNECT_SIZE },
10405 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
10406 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
10407 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
10408 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
10409 { pair_device, MGMT_PAIR_DEVICE_SIZE },
10410 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
10411 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
10412 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
10413 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
10414 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
10415 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
10416 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
10417 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
10418 HCI_MGMT_VAR_LEN },
10419 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
10420 { start_discovery, MGMT_START_DISCOVERY_SIZE },
10421 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
10422 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
10423 { block_device, MGMT_BLOCK_DEVICE_SIZE },
10424 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
10425 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
10426 { set_advertising, MGMT_SETTING_SIZE },
10427 { set_bredr, MGMT_SETTING_SIZE },
10428 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
10429 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
10430 { set_secure_conn, MGMT_SETTING_SIZE },
10431 { set_debug_keys, MGMT_SETTING_SIZE },
10432 { set_privacy, MGMT_SET_PRIVACY_SIZE },
10433 { load_irks, MGMT_LOAD_IRKS_SIZE,
10434 HCI_MGMT_VAR_LEN },
10435 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
10436 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
10437 { add_device, MGMT_ADD_DEVICE_SIZE },
10438 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
10439 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
10440 HCI_MGMT_VAR_LEN },
10441 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
10443 HCI_MGMT_UNTRUSTED },
10444 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
10445 HCI_MGMT_UNCONFIGURED |
10446 HCI_MGMT_UNTRUSTED },
10447 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
10448 HCI_MGMT_UNCONFIGURED },
10449 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
10450 HCI_MGMT_UNCONFIGURED },
10451 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
10452 HCI_MGMT_VAR_LEN },
10453 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
10454 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
10456 HCI_MGMT_UNTRUSTED },
10457 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
10458 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
10459 HCI_MGMT_VAR_LEN },
10460 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
10461 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
10462 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
10463 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
10464 HCI_MGMT_UNTRUSTED },
10465 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
10466 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
10467 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
10468 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
10469 HCI_MGMT_VAR_LEN },
10470 { set_wideband_speech, MGMT_SETTING_SIZE },
10471 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
10472 HCI_MGMT_UNTRUSTED },
10473 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
10474 HCI_MGMT_UNTRUSTED |
10475 HCI_MGMT_HDEV_OPTIONAL },
10476 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
10478 HCI_MGMT_HDEV_OPTIONAL },
10479 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
10480 HCI_MGMT_UNTRUSTED },
10481 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
10482 HCI_MGMT_VAR_LEN },
10483 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
10484 HCI_MGMT_UNTRUSTED },
10485 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
10486 HCI_MGMT_VAR_LEN },
10487 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
10488 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
10489 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
10490 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
10491 HCI_MGMT_VAR_LEN },
10492 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
10493 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
10494 HCI_MGMT_VAR_LEN },
10495 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
10496 HCI_MGMT_VAR_LEN },
10497 { add_adv_patterns_monitor_rssi,
10498 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE },
10499 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
10500 HCI_MGMT_VAR_LEN },
10501 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
10502 { mesh_send, MGMT_MESH_SEND_SIZE,
10503 HCI_MGMT_VAR_LEN },
10504 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
10508 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
10509 { NULL }, /* 0x0000 (no command) */
10510 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
10511 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
10512 HCI_MGMT_VAR_LEN },
10513 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
10514 HCI_MGMT_VAR_LEN },
10515 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
10516 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
10517 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
10518 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
10519 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
10520 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
10524 void mgmt_index_added(struct hci_dev *hdev)
10526 struct mgmt_ev_ext_index ev;
10528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10531 switch (hdev->dev_type) {
10533 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10534 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
10535 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10538 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
10539 HCI_MGMT_INDEX_EVENTS);
10550 ev.bus = hdev->bus;
10552 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
10553 HCI_MGMT_EXT_INDEX_EVENTS);
10556 void mgmt_index_removed(struct hci_dev *hdev)
10558 struct mgmt_ev_ext_index ev;
10559 u8 status = MGMT_STATUS_INVALID_INDEX;
10561 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
10564 switch (hdev->dev_type) {
10566 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10568 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
10569 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
10570 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
10573 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
10574 HCI_MGMT_INDEX_EVENTS);
10585 ev.bus = hdev->bus;
10587 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
10588 HCI_MGMT_EXT_INDEX_EVENTS);
10590 /* Cancel any remaining timed work */
10591 if (!hci_dev_test_flag(hdev, HCI_MGMT))
10593 cancel_delayed_work_sync(&hdev->discov_off);
10594 cancel_delayed_work_sync(&hdev->service_cache);
10595 cancel_delayed_work_sync(&hdev->rpa_expired);
10598 void mgmt_power_on(struct hci_dev *hdev, int err)
10600 struct cmd_lookup match = { NULL, hdev };
10602 bt_dev_dbg(hdev, "err %d", err);
10604 hci_dev_lock(hdev);
10607 restart_le_actions(hdev);
10608 hci_update_passive_scan(hdev);
10611 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10613 new_settings(hdev, match.sk);
10616 sock_put(match.sk);
10618 hci_dev_unlock(hdev);
10621 void __mgmt_power_off(struct hci_dev *hdev)
10623 struct cmd_lookup match = { NULL, hdev };
10624 u8 status, zero_cod[] = { 0, 0, 0 };
10626 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
10628 /* If the power off is because of hdev unregistration let
10629 * use the appropriate INVALID_INDEX status. Otherwise use
10630 * NOT_POWERED. We cover both scenarios here since later in
10631 * mgmt_index_removed() any hci_conn callbacks will have already
10632 * been triggered, potentially causing misleading DISCONNECTED
10633 * status responses.
10635 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
10636 status = MGMT_STATUS_INVALID_INDEX;
10638 status = MGMT_STATUS_NOT_POWERED;
10640 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
10642 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
10643 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
10644 zero_cod, sizeof(zero_cod),
10645 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10646 ext_info_changed(hdev, NULL);
10649 new_settings(hdev, match.sk);
10652 sock_put(match.sk);
10655 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
10657 struct mgmt_pending_cmd *cmd;
10660 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10664 if (err == -ERFKILL)
10665 status = MGMT_STATUS_RFKILLED;
10667 status = MGMT_STATUS_FAILED;
10669 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
10671 mgmt_pending_remove(cmd);
10674 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
10677 struct mgmt_ev_new_link_key ev;
10679 memset(&ev, 0, sizeof(ev));
10681 ev.store_hint = persistent;
10682 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10683 ev.key.addr.type = BDADDR_BREDR;
10684 ev.key.type = key->type;
10685 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
10686 ev.key.pin_len = key->pin_len;
10688 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
10691 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
10693 switch (ltk->type) {
10695 case SMP_LTK_RESPONDER:
10696 if (ltk->authenticated)
10697 return MGMT_LTK_AUTHENTICATED;
10698 return MGMT_LTK_UNAUTHENTICATED;
10700 if (ltk->authenticated)
10701 return MGMT_LTK_P256_AUTH;
10702 return MGMT_LTK_P256_UNAUTH;
10703 case SMP_LTK_P256_DEBUG:
10704 return MGMT_LTK_P256_DEBUG;
10707 return MGMT_LTK_UNAUTHENTICATED;
10710 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
10712 struct mgmt_ev_new_long_term_key ev;
10714 memset(&ev, 0, sizeof(ev));
10716 /* Devices using resolvable or non-resolvable random addresses
10717 * without providing an identity resolving key don't require
10718 * to store long term keys. Their addresses will change the
10719 * next time around.
10721 * Only when a remote device provides an identity address
10722 * make sure the long term key is stored. If the remote
10723 * identity is known, the long term keys are internally
10724 * mapped to the identity address. So allow static random
10725 * and public addresses here.
10727 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10728 (key->bdaddr.b[5] & 0xc0) != 0xc0)
10729 ev.store_hint = 0x00;
10731 ev.store_hint = persistent;
10733 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
10734 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
10735 ev.key.type = mgmt_ltk_type(key);
10736 ev.key.enc_size = key->enc_size;
10737 ev.key.ediv = key->ediv;
10738 ev.key.rand = key->rand;
10740 if (key->type == SMP_LTK)
10741 ev.key.initiator = 1;
10743 /* Make sure we copy only the significant bytes based on the
10744 * encryption key size, and set the rest of the value to zeroes.
10746 memcpy(ev.key.val, key->val, key->enc_size);
10747 memset(ev.key.val + key->enc_size, 0,
10748 sizeof(ev.key.val) - key->enc_size);
10750 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
10753 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
10755 struct mgmt_ev_new_irk ev;
10757 memset(&ev, 0, sizeof(ev));
10759 ev.store_hint = persistent;
10761 bacpy(&ev.rpa, &irk->rpa);
10762 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
10763 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
10764 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
10766 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
10769 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
10772 struct mgmt_ev_new_csrk ev;
10774 memset(&ev, 0, sizeof(ev));
10776 /* Devices using resolvable or non-resolvable random addresses
10777 * without providing an identity resolving key don't require
10778 * to store signature resolving keys. Their addresses will change
10779 * the next time around.
10781 * Only when a remote device provides an identity address
10782 * make sure the signature resolving key is stored. So allow
10783 * static random and public addresses here.
10785 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
10786 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
10787 ev.store_hint = 0x00;
10789 ev.store_hint = persistent;
10791 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
10792 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
10793 ev.key.type = csrk->type;
10794 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
10796 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
10799 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
10800 u8 bdaddr_type, u8 store_hint, u16 min_interval,
10801 u16 max_interval, u16 latency, u16 timeout)
10803 struct mgmt_ev_new_conn_param ev;
10805 if (!hci_is_identity_address(bdaddr, bdaddr_type))
10808 memset(&ev, 0, sizeof(ev));
10809 bacpy(&ev.addr.bdaddr, bdaddr);
10810 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
10811 ev.store_hint = store_hint;
10812 ev.min_interval = cpu_to_le16(min_interval);
10813 ev.max_interval = cpu_to_le16(max_interval);
10814 ev.latency = cpu_to_le16(latency);
10815 ev.timeout = cpu_to_le16(timeout);
10817 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
10820 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
10821 u8 *name, u8 name_len)
10823 struct sk_buff *skb;
10824 struct mgmt_ev_device_connected *ev;
10828 /* allocate buff for LE or BR/EDR adv */
10829 if (conn->le_adv_data_len > 0)
10830 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
10831 sizeof(*ev) + conn->le_adv_data_len);
10833 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
10834 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
10835 eir_precalc_len(sizeof(conn->dev_class)));
10837 ev = skb_put(skb, sizeof(*ev));
10838 bacpy(&ev->addr.bdaddr, &conn->dst);
10839 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10842 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
10844 ev->flags = __cpu_to_le32(flags);
10846 /* We must ensure that the EIR Data fields are ordered and
10847 * unique. Keep it simple for now and avoid the problem by not
10848 * adding any BR/EDR data to the LE adv.
10850 if (conn->le_adv_data_len > 0) {
10851 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
10852 eir_len = conn->le_adv_data_len;
10855 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10857 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
10858 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
10859 conn->dev_class, sizeof(conn->dev_class));
10862 ev->eir_len = cpu_to_le16(eir_len);
10864 mgmt_event_skb(skb, NULL);
10867 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
10869 struct sock **sk = data;
10871 cmd->cmd_complete(cmd, 0);
10876 mgmt_pending_remove(cmd);
10879 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
10881 struct hci_dev *hdev = data;
10882 struct mgmt_cp_unpair_device *cp = cmd->param;
10884 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
10886 cmd->cmd_complete(cmd, 0);
10887 mgmt_pending_remove(cmd);
10890 bool mgmt_powering_down(struct hci_dev *hdev)
10892 struct mgmt_pending_cmd *cmd;
10893 struct mgmt_mode *cp;
10895 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10906 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
10907 u8 link_type, u8 addr_type, u8 reason,
10908 bool mgmt_connected)
10910 struct mgmt_ev_device_disconnected ev;
10911 struct sock *sk = NULL;
10913 /* The connection is still in hci_conn_hash so test for 1
10914 * instead of 0 to know if this is the last one.
10916 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10917 cancel_delayed_work(&hdev->power_off);
10918 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10921 if (!mgmt_connected)
10924 if (link_type != ACL_LINK && link_type != LE_LINK)
10927 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
10929 bacpy(&ev.addr.bdaddr, bdaddr);
10930 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10931 ev.reason = reason;
10933 /* Report disconnects due to suspend */
10934 if (hdev->suspended)
10935 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
10937 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
10942 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10946 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
10947 u8 link_type, u8 addr_type, u8 status)
10949 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
10950 struct mgmt_cp_disconnect *cp;
10951 struct mgmt_pending_cmd *cmd;
10953 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10956 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
10962 if (bacmp(bdaddr, &cp->addr.bdaddr))
10965 if (cp->addr.type != bdaddr_type)
10968 cmd->cmd_complete(cmd, mgmt_status(status));
10969 mgmt_pending_remove(cmd);
10972 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10973 u8 addr_type, u8 status)
10975 struct mgmt_ev_connect_failed ev;
10977 /* The connection is still in hci_conn_hash so test for 1
10978 * instead of 0 to know if this is the last one.
10980 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10981 cancel_delayed_work(&hdev->power_off);
10982 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10985 bacpy(&ev.addr.bdaddr, bdaddr);
10986 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10987 ev.status = mgmt_status(status);
10989 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
10992 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
10994 struct mgmt_ev_pin_code_request ev;
10996 bacpy(&ev.addr.bdaddr, bdaddr);
10997 ev.addr.type = BDADDR_BREDR;
10998 ev.secure = secure;
11000 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
11003 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11006 struct mgmt_pending_cmd *cmd;
11008 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
11012 cmd->cmd_complete(cmd, mgmt_status(status));
11013 mgmt_pending_remove(cmd);
11016 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11019 struct mgmt_pending_cmd *cmd;
11021 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
11025 cmd->cmd_complete(cmd, mgmt_status(status));
11026 mgmt_pending_remove(cmd);
11029 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11030 u8 link_type, u8 addr_type, u32 value,
11033 struct mgmt_ev_user_confirm_request ev;
11035 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11037 bacpy(&ev.addr.bdaddr, bdaddr);
11038 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11039 ev.confirm_hint = confirm_hint;
11040 ev.value = cpu_to_le32(value);
11042 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
11046 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11047 u8 link_type, u8 addr_type)
11049 struct mgmt_ev_user_passkey_request ev;
11051 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11053 bacpy(&ev.addr.bdaddr, bdaddr);
11054 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11056 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
11060 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11061 u8 link_type, u8 addr_type, u8 status,
11064 struct mgmt_pending_cmd *cmd;
11066 cmd = pending_find(opcode, hdev);
11070 cmd->cmd_complete(cmd, mgmt_status(status));
11071 mgmt_pending_remove(cmd);
11076 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11077 u8 link_type, u8 addr_type, u8 status)
11079 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11080 status, MGMT_OP_USER_CONFIRM_REPLY);
11083 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11084 u8 link_type, u8 addr_type, u8 status)
11086 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11088 MGMT_OP_USER_CONFIRM_NEG_REPLY);
11091 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11092 u8 link_type, u8 addr_type, u8 status)
11094 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11095 status, MGMT_OP_USER_PASSKEY_REPLY);
11098 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11099 u8 link_type, u8 addr_type, u8 status)
11101 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11103 MGMT_OP_USER_PASSKEY_NEG_REPLY);
11106 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
11107 u8 link_type, u8 addr_type, u32 passkey,
11110 struct mgmt_ev_passkey_notify ev;
11112 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11114 bacpy(&ev.addr.bdaddr, bdaddr);
11115 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11116 ev.passkey = __cpu_to_le32(passkey);
11117 ev.entered = entered;
11119 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
11122 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
11124 struct mgmt_ev_auth_failed ev;
11125 struct mgmt_pending_cmd *cmd;
11126 u8 status = mgmt_status(hci_status);
11128 bacpy(&ev.addr.bdaddr, &conn->dst);
11129 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11130 ev.status = status;
11132 cmd = find_pairing(conn);
11134 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
11135 cmd ? cmd->sk : NULL);
11138 cmd->cmd_complete(cmd, status);
11139 mgmt_pending_remove(cmd);
11143 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
11145 struct cmd_lookup match = { NULL, hdev };
11149 u8 mgmt_err = mgmt_status(status);
11150 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
11151 cmd_status_rsp, &mgmt_err);
11155 if (test_bit(HCI_AUTH, &hdev->flags))
11156 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
11158 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
11160 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
11164 new_settings(hdev, match.sk);
11167 sock_put(match.sk);
11170 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
11172 struct cmd_lookup *match = data;
11174 if (match->sk == NULL) {
11175 match->sk = cmd->sk;
11176 sock_hold(match->sk);
11180 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
11183 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
11185 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
11186 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
11187 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
11190 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
11191 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11192 ext_info_changed(hdev, NULL);
11196 sock_put(match.sk);
11199 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
11201 struct mgmt_cp_set_local_name ev;
11202 struct mgmt_pending_cmd *cmd;
11207 memset(&ev, 0, sizeof(ev));
11208 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
11209 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
11211 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
11213 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
11215 /* If this is a HCI command related to powering on the
11216 * HCI dev don't send any mgmt signals.
11218 if (pending_find(MGMT_OP_SET_POWERED, hdev))
11222 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
11223 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
11224 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
11227 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
11231 for (i = 0; i < uuid_count; i++) {
11232 if (!memcmp(uuid, uuids[i], 16))
11239 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
11243 while (parsed < eir_len) {
11244 u8 field_len = eir[0];
11248 if (field_len == 0)
11251 if (eir_len - parsed < field_len + 1)
11255 case EIR_UUID16_ALL:
11256 case EIR_UUID16_SOME:
11257 for (i = 0; i + 3 <= field_len; i += 2) {
11258 memcpy(uuid, bluetooth_base_uuid, 16);
11259 uuid[13] = eir[i + 3];
11260 uuid[12] = eir[i + 2];
11261 if (has_uuid(uuid, uuid_count, uuids))
11265 case EIR_UUID32_ALL:
11266 case EIR_UUID32_SOME:
11267 for (i = 0; i + 5 <= field_len; i += 4) {
11268 memcpy(uuid, bluetooth_base_uuid, 16);
11269 uuid[15] = eir[i + 5];
11270 uuid[14] = eir[i + 4];
11271 uuid[13] = eir[i + 3];
11272 uuid[12] = eir[i + 2];
11273 if (has_uuid(uuid, uuid_count, uuids))
11277 case EIR_UUID128_ALL:
11278 case EIR_UUID128_SOME:
11279 for (i = 0; i + 17 <= field_len; i += 16) {
11280 memcpy(uuid, eir + i + 2, 16);
11281 if (has_uuid(uuid, uuid_count, uuids))
11287 parsed += field_len + 1;
11288 eir += field_len + 1;
11294 static void restart_le_scan(struct hci_dev *hdev)
11296 /* If controller is not scanning we are done. */
11297 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
11300 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
11301 hdev->discovery.scan_start +
11302 hdev->discovery.scan_duration))
11305 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
11306 DISCOV_LE_RESTART_DELAY);
11309 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
11310 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11312 /* If a RSSI threshold has been specified, and
11313 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
11314 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
11315 * is set, let it through for further processing, as we might need to
11316 * restart the scan.
11318 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
11319 * the results are also dropped.
11321 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11322 (rssi == HCI_RSSI_INVALID ||
11323 (rssi < hdev->discovery.rssi &&
11324 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
11327 if (hdev->discovery.uuid_count != 0) {
11328 /* If a list of UUIDs is provided in filter, results with no
11329 * matching UUID should be dropped.
11331 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
11332 hdev->discovery.uuids) &&
11333 !eir_has_uuids(scan_rsp, scan_rsp_len,
11334 hdev->discovery.uuid_count,
11335 hdev->discovery.uuids))
11339 /* If duplicate filtering does not report RSSI changes, then restart
11340 * scanning to ensure updated result with updated RSSI values.
11342 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
11343 restart_le_scan(hdev);
11345 /* Validate RSSI value against the RSSI threshold once more. */
11346 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11347 rssi < hdev->discovery.rssi)
11354 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
11355 bdaddr_t *bdaddr, u8 addr_type)
11357 struct mgmt_ev_adv_monitor_device_lost ev;
11359 ev.monitor_handle = cpu_to_le16(handle);
11360 bacpy(&ev.addr.bdaddr, bdaddr);
11361 ev.addr.type = addr_type;
11363 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
11367 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
11368 struct sk_buff *skb,
11369 struct sock *skip_sk,
11372 struct sk_buff *advmon_skb;
11373 size_t advmon_skb_len;
11374 __le16 *monitor_handle;
11379 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
11380 sizeof(struct mgmt_ev_device_found)) + skb->len;
11381 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
11386 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
11387 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
11388 * store monitor_handle of the matched monitor.
11390 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
11391 *monitor_handle = cpu_to_le16(handle);
11392 skb_put_data(advmon_skb, skb->data, skb->len);
11394 mgmt_event_skb(advmon_skb, skip_sk);
11397 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
11398 bdaddr_t *bdaddr, bool report_device,
11399 struct sk_buff *skb,
11400 struct sock *skip_sk)
11402 struct monitored_device *dev, *tmp;
11403 bool matched = false;
11404 bool notified = false;
11406 /* We have received the Advertisement Report because:
11407 * 1. the kernel has initiated active discovery
11408 * 2. if not, we have pend_le_reports > 0 in which case we are doing
11410 * 3. if none of the above is true, we have one or more active
11411 * Advertisement Monitor
11413 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
11414 * and report ONLY one advertisement per device for the matched Monitor
11415 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
11417 * For case 3, since we are not active scanning and all advertisements
11418 * received are due to a matched Advertisement Monitor, report all
11419 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
11421 if (report_device && !hdev->advmon_pend_notify) {
11422 mgmt_event_skb(skb, skip_sk);
11426 hdev->advmon_pend_notify = false;
11428 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
11429 if (!bacmp(&dev->bdaddr, bdaddr)) {
11432 if (!dev->notified) {
11433 mgmt_send_adv_monitor_device_found(hdev, skb,
11437 dev->notified = true;
11441 if (!dev->notified)
11442 hdev->advmon_pend_notify = true;
11445 if (!report_device &&
11446 ((matched && !notified) || !msft_monitor_supported(hdev))) {
11447 /* Handle 0 indicates that we are not active scanning and this
11448 * is a subsequent advertisement report for an already matched
11449 * Advertisement Monitor or the controller offloading support
11450 * is not available.
11452 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
11456 mgmt_event_skb(skb, skip_sk);
11461 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
11462 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
11463 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
11466 struct sk_buff *skb;
11467 struct mgmt_ev_mesh_device_found *ev;
11470 if (!hdev->mesh_ad_types[0])
11473 /* Scan for requested AD types */
11475 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
11476 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
11477 if (!hdev->mesh_ad_types[j])
11480 if (hdev->mesh_ad_types[j] == eir[i + 1])
11486 if (scan_rsp_len > 0) {
11487 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
11488 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
11489 if (!hdev->mesh_ad_types[j])
11492 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
11501 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
11502 sizeof(*ev) + eir_len + scan_rsp_len);
11506 ev = skb_put(skb, sizeof(*ev));
11508 bacpy(&ev->addr.bdaddr, bdaddr);
11509 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
11511 ev->flags = cpu_to_le32(flags);
11512 ev->instant = cpu_to_le64(instant);
11515 /* Copy EIR or advertising data into event */
11516 skb_put_data(skb, eir, eir_len);
11518 if (scan_rsp_len > 0)
11519 /* Append scan response data to event */
11520 skb_put_data(skb, scan_rsp, scan_rsp_len);
11522 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
11524 mgmt_event_skb(skb, NULL);
11527 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11528 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
11529 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
11532 struct sk_buff *skb;
11533 struct mgmt_ev_device_found *ev;
11534 bool report_device = hci_discovery_active(hdev);
11536 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
11537 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
11538 eir, eir_len, scan_rsp, scan_rsp_len,
11541 /* Don't send events for a non-kernel initiated discovery. With
11542 * LE one exception is if we have pend_le_reports > 0 in which
11543 * case we're doing passive scanning and want these events.
11545 if (!hci_discovery_active(hdev)) {
11546 if (link_type == ACL_LINK)
11548 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
11549 report_device = true;
11550 else if (!hci_is_adv_monitoring(hdev))
11554 if (hdev->discovery.result_filtering) {
11555 /* We are using service discovery */
11556 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
11561 if (hdev->discovery.limited) {
11562 /* Check for limited discoverable bit */
11564 if (!(dev_class[1] & 0x20))
11567 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
11568 if (!flags || !(flags[0] & LE_AD_LIMITED))
11573 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
11574 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
11575 sizeof(*ev) + eir_len + scan_rsp_len + 5);
11579 ev = skb_put(skb, sizeof(*ev));
11581 /* In case of device discovery with BR/EDR devices (pre 1.2), the
11582 * RSSI value was reported as 0 when not available. This behavior
11583 * is kept when using device discovery. This is required for full
11584 * backwards compatibility with the API.
11586 * However when using service discovery, the value 127 will be
11587 * returned when the RSSI is not available.
11589 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
11590 link_type == ACL_LINK)
11593 bacpy(&ev->addr.bdaddr, bdaddr);
11594 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11596 ev->flags = cpu_to_le32(flags);
11599 /* Copy EIR or advertising data into event */
11600 skb_put_data(skb, eir, eir_len);
11602 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
11605 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
11607 skb_put_data(skb, eir_cod, sizeof(eir_cod));
11610 if (scan_rsp_len > 0)
11611 /* Append scan response data to event */
11612 skb_put_data(skb, scan_rsp, scan_rsp_len);
11614 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
11616 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
11619 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11620 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
11622 struct sk_buff *skb;
11623 struct mgmt_ev_device_found *ev;
11627 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
11628 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
11630 ev = skb_put(skb, sizeof(*ev));
11631 bacpy(&ev->addr.bdaddr, bdaddr);
11632 ev->addr.type = link_to_bdaddr(link_type, addr_type);
11636 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
11638 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
11640 ev->eir_len = cpu_to_le16(eir_len);
11641 ev->flags = cpu_to_le32(flags);
11643 mgmt_event_skb(skb, NULL);
11646 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
11648 struct mgmt_ev_discovering ev;
11650 bt_dev_dbg(hdev, "discovering %u", discovering);
11652 memset(&ev, 0, sizeof(ev));
11653 ev.type = hdev->discovery.type;
11654 ev.discovering = discovering;
11656 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
11659 void mgmt_suspending(struct hci_dev *hdev, u8 state)
11661 struct mgmt_ev_controller_suspend ev;
11663 ev.suspend_state = state;
11664 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
11667 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
11670 struct mgmt_ev_controller_resume ev;
11672 ev.wake_reason = reason;
11674 bacpy(&ev.addr.bdaddr, bdaddr);
11675 ev.addr.type = addr_type;
11677 memset(&ev.addr, 0, sizeof(ev.addr));
11680 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
11683 static struct hci_mgmt_chan chan = {
11684 .channel = HCI_CHANNEL_CONTROL,
11685 .handler_count = ARRAY_SIZE(mgmt_handlers),
11686 .handlers = mgmt_handlers,
11688 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
11689 .tizen_handlers = tizen_mgmt_handlers,
11691 .hdev_init = mgmt_init_hdev,
11694 int mgmt_init(void)
11696 return hci_mgmt_chan_register(&chan);
11699 void mgmt_exit(void)
11701 hci_mgmt_chan_unregister(&chan);
11704 void mgmt_cleanup(struct sock *sk)
11706 struct mgmt_mesh_tx *mesh_tx;
11707 struct hci_dev *hdev;
11709 read_lock(&hci_dev_list_lock);
11711 list_for_each_entry(hdev, &hci_dev_list, list) {
11713 mesh_tx = mgmt_mesh_next(hdev, sk);
11716 mesh_send_complete(hdev, mesh_tx, true);
11720 read_unlock(&hci_dev_list_lock);