2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
39 #include "hci_request.h"
41 #include "mgmt_util.h"
42 #include "mgmt_config.h"
47 #define MGMT_VERSION 1
48 #define MGMT_REVISION 22
50 static const u16 mgmt_commands[] = {
51 MGMT_OP_READ_INDEX_LIST,
54 MGMT_OP_SET_DISCOVERABLE,
55 MGMT_OP_SET_CONNECTABLE,
56 MGMT_OP_SET_FAST_CONNECTABLE,
58 MGMT_OP_SET_LINK_SECURITY,
62 MGMT_OP_SET_DEV_CLASS,
63 MGMT_OP_SET_LOCAL_NAME,
66 MGMT_OP_LOAD_LINK_KEYS,
67 MGMT_OP_LOAD_LONG_TERM_KEYS,
69 MGMT_OP_GET_CONNECTIONS,
70 MGMT_OP_PIN_CODE_REPLY,
71 MGMT_OP_PIN_CODE_NEG_REPLY,
72 MGMT_OP_SET_IO_CAPABILITY,
74 MGMT_OP_CANCEL_PAIR_DEVICE,
75 MGMT_OP_UNPAIR_DEVICE,
76 MGMT_OP_USER_CONFIRM_REPLY,
77 MGMT_OP_USER_CONFIRM_NEG_REPLY,
78 MGMT_OP_USER_PASSKEY_REPLY,
79 MGMT_OP_USER_PASSKEY_NEG_REPLY,
80 MGMT_OP_READ_LOCAL_OOB_DATA,
81 MGMT_OP_ADD_REMOTE_OOB_DATA,
82 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
83 MGMT_OP_START_DISCOVERY,
84 MGMT_OP_STOP_DISCOVERY,
87 MGMT_OP_UNBLOCK_DEVICE,
88 MGMT_OP_SET_DEVICE_ID,
89 MGMT_OP_SET_ADVERTISING,
91 MGMT_OP_SET_STATIC_ADDRESS,
92 MGMT_OP_SET_SCAN_PARAMS,
93 MGMT_OP_SET_SECURE_CONN,
94 MGMT_OP_SET_DEBUG_KEYS,
97 MGMT_OP_GET_CONN_INFO,
98 MGMT_OP_GET_CLOCK_INFO,
100 MGMT_OP_REMOVE_DEVICE,
101 MGMT_OP_LOAD_CONN_PARAM,
102 MGMT_OP_READ_UNCONF_INDEX_LIST,
103 MGMT_OP_READ_CONFIG_INFO,
104 MGMT_OP_SET_EXTERNAL_CONFIG,
105 MGMT_OP_SET_PUBLIC_ADDRESS,
106 MGMT_OP_START_SERVICE_DISCOVERY,
107 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
108 MGMT_OP_READ_EXT_INDEX_LIST,
109 MGMT_OP_READ_ADV_FEATURES,
110 MGMT_OP_ADD_ADVERTISING,
111 MGMT_OP_REMOVE_ADVERTISING,
112 MGMT_OP_GET_ADV_SIZE_INFO,
113 MGMT_OP_START_LIMITED_DISCOVERY,
114 MGMT_OP_READ_EXT_INFO,
115 MGMT_OP_SET_APPEARANCE,
116 MGMT_OP_GET_PHY_CONFIGURATION,
117 MGMT_OP_SET_PHY_CONFIGURATION,
118 MGMT_OP_SET_BLOCKED_KEYS,
119 MGMT_OP_SET_WIDEBAND_SPEECH,
120 MGMT_OP_READ_CONTROLLER_CAP,
121 MGMT_OP_READ_EXP_FEATURES_INFO,
122 MGMT_OP_SET_EXP_FEATURE,
123 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
124 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
125 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
126 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
127 MGMT_OP_GET_DEVICE_FLAGS,
128 MGMT_OP_SET_DEVICE_FLAGS,
129 MGMT_OP_READ_ADV_MONITOR_FEATURES,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
131 MGMT_OP_REMOVE_ADV_MONITOR,
132 MGMT_OP_ADD_EXT_ADV_PARAMS,
133 MGMT_OP_ADD_EXT_ADV_DATA,
134 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
135 MGMT_OP_SET_MESH_RECEIVER,
136 MGMT_OP_MESH_READ_FEATURES,
138 MGMT_OP_MESH_SEND_CANCEL,
141 static const u16 mgmt_events[] = {
142 MGMT_EV_CONTROLLER_ERROR,
144 MGMT_EV_INDEX_REMOVED,
145 MGMT_EV_NEW_SETTINGS,
146 MGMT_EV_CLASS_OF_DEV_CHANGED,
147 MGMT_EV_LOCAL_NAME_CHANGED,
148 MGMT_EV_NEW_LINK_KEY,
149 MGMT_EV_NEW_LONG_TERM_KEY,
150 MGMT_EV_DEVICE_CONNECTED,
151 MGMT_EV_DEVICE_DISCONNECTED,
152 MGMT_EV_CONNECT_FAILED,
153 MGMT_EV_PIN_CODE_REQUEST,
154 MGMT_EV_USER_CONFIRM_REQUEST,
155 MGMT_EV_USER_PASSKEY_REQUEST,
157 MGMT_EV_DEVICE_FOUND,
159 MGMT_EV_DEVICE_BLOCKED,
160 MGMT_EV_DEVICE_UNBLOCKED,
161 MGMT_EV_DEVICE_UNPAIRED,
162 MGMT_EV_PASSKEY_NOTIFY,
165 MGMT_EV_DEVICE_ADDED,
166 MGMT_EV_DEVICE_REMOVED,
167 MGMT_EV_NEW_CONN_PARAM,
168 MGMT_EV_UNCONF_INDEX_ADDED,
169 MGMT_EV_UNCONF_INDEX_REMOVED,
170 MGMT_EV_NEW_CONFIG_OPTIONS,
171 MGMT_EV_EXT_INDEX_ADDED,
172 MGMT_EV_EXT_INDEX_REMOVED,
173 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
174 MGMT_EV_ADVERTISING_ADDED,
175 MGMT_EV_ADVERTISING_REMOVED,
176 MGMT_EV_EXT_INFO_CHANGED,
177 MGMT_EV_PHY_CONFIGURATION_CHANGED,
178 MGMT_EV_EXP_FEATURE_CHANGED,
179 MGMT_EV_DEVICE_FLAGS_CHANGED,
180 MGMT_EV_ADV_MONITOR_ADDED,
181 MGMT_EV_ADV_MONITOR_REMOVED,
182 MGMT_EV_CONTROLLER_SUSPEND,
183 MGMT_EV_CONTROLLER_RESUME,
184 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
185 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
188 static const u16 mgmt_untrusted_commands[] = {
189 MGMT_OP_READ_INDEX_LIST,
191 MGMT_OP_READ_UNCONF_INDEX_LIST,
192 MGMT_OP_READ_CONFIG_INFO,
193 MGMT_OP_READ_EXT_INDEX_LIST,
194 MGMT_OP_READ_EXT_INFO,
195 MGMT_OP_READ_CONTROLLER_CAP,
196 MGMT_OP_READ_EXP_FEATURES_INFO,
197 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
198 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
201 static const u16 mgmt_untrusted_events[] = {
203 MGMT_EV_INDEX_REMOVED,
204 MGMT_EV_NEW_SETTINGS,
205 MGMT_EV_CLASS_OF_DEV_CHANGED,
206 MGMT_EV_LOCAL_NAME_CHANGED,
207 MGMT_EV_UNCONF_INDEX_ADDED,
208 MGMT_EV_UNCONF_INDEX_REMOVED,
209 MGMT_EV_NEW_CONFIG_OPTIONS,
210 MGMT_EV_EXT_INDEX_ADDED,
211 MGMT_EV_EXT_INDEX_REMOVED,
212 MGMT_EV_EXT_INFO_CHANGED,
213 MGMT_EV_EXP_FEATURE_CHANGED,
216 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
218 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
219 "\x00\x00\x00\x00\x00\x00\x00\x00"
221 /* HCI to MGMT error code conversion table */
222 static const u8 mgmt_status_table[] = {
224 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
225 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
226 MGMT_STATUS_FAILED, /* Hardware Failure */
227 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
228 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
229 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
230 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
231 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
232 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
233 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
234 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
235 MGMT_STATUS_BUSY, /* Command Disallowed */
236 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
237 MGMT_STATUS_REJECTED, /* Rejected Security */
238 MGMT_STATUS_REJECTED, /* Rejected Personal */
239 MGMT_STATUS_TIMEOUT, /* Host Timeout */
240 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
241 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
242 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
243 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
244 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
245 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
246 MGMT_STATUS_BUSY, /* Repeated Attempts */
247 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
248 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
249 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
250 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
251 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
252 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
253 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
254 MGMT_STATUS_FAILED, /* Unspecified Error */
255 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
256 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
257 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
258 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
259 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
260 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
261 MGMT_STATUS_FAILED, /* Unit Link Key Used */
262 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
263 MGMT_STATUS_TIMEOUT, /* Instant Passed */
264 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
265 MGMT_STATUS_FAILED, /* Transaction Collision */
266 MGMT_STATUS_FAILED, /* Reserved for future use */
267 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
268 MGMT_STATUS_REJECTED, /* QoS Rejected */
269 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
270 MGMT_STATUS_REJECTED, /* Insufficient Security */
271 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
272 MGMT_STATUS_FAILED, /* Reserved for future use */
273 MGMT_STATUS_BUSY, /* Role Switch Pending */
274 MGMT_STATUS_FAILED, /* Reserved for future use */
275 MGMT_STATUS_FAILED, /* Slot Violation */
276 MGMT_STATUS_FAILED, /* Role Switch Failed */
277 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
278 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
279 MGMT_STATUS_BUSY, /* Host Busy Pairing */
280 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
281 MGMT_STATUS_BUSY, /* Controller Busy */
282 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
283 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
284 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
285 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
286 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
289 static u8 mgmt_errno_status(int err)
293 return MGMT_STATUS_SUCCESS;
295 return MGMT_STATUS_REJECTED;
297 return MGMT_STATUS_INVALID_PARAMS;
299 return MGMT_STATUS_NOT_SUPPORTED;
301 return MGMT_STATUS_BUSY;
303 return MGMT_STATUS_AUTH_FAILED;
305 return MGMT_STATUS_NO_RESOURCES;
307 return MGMT_STATUS_ALREADY_CONNECTED;
309 return MGMT_STATUS_DISCONNECTED;
312 return MGMT_STATUS_FAILED;
315 static u8 mgmt_status(int err)
318 return mgmt_errno_status(err);
320 if (err < ARRAY_SIZE(mgmt_status_table))
321 return mgmt_status_table[err];
323 return MGMT_STATUS_FAILED;
326 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
329 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
333 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
334 u16 len, int flag, struct sock *skip_sk)
336 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
340 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
341 struct sock *skip_sk)
343 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
344 HCI_SOCK_TRUSTED, skip_sk);
347 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
349 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
353 static u8 le_addr_type(u8 mgmt_addr_type)
355 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
356 return ADDR_LE_DEV_PUBLIC;
358 return ADDR_LE_DEV_RANDOM;
361 void mgmt_fill_version_info(void *ver)
363 struct mgmt_rp_read_version *rp = ver;
365 rp->version = MGMT_VERSION;
366 rp->revision = cpu_to_le16(MGMT_REVISION);
369 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
372 struct mgmt_rp_read_version rp;
374 bt_dev_dbg(hdev, "sock %p", sk);
376 mgmt_fill_version_info(&rp);
378 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
382 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
385 struct mgmt_rp_read_commands *rp;
386 u16 num_commands, num_events;
390 bt_dev_dbg(hdev, "sock %p", sk);
392 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
393 num_commands = ARRAY_SIZE(mgmt_commands);
394 num_events = ARRAY_SIZE(mgmt_events);
396 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
397 num_events = ARRAY_SIZE(mgmt_untrusted_events);
400 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
402 rp = kmalloc(rp_size, GFP_KERNEL);
406 rp->num_commands = cpu_to_le16(num_commands);
407 rp->num_events = cpu_to_le16(num_events);
409 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
410 __le16 *opcode = rp->opcodes;
412 for (i = 0; i < num_commands; i++, opcode++)
413 put_unaligned_le16(mgmt_commands[i], opcode);
415 for (i = 0; i < num_events; i++, opcode++)
416 put_unaligned_le16(mgmt_events[i], opcode);
418 __le16 *opcode = rp->opcodes;
420 for (i = 0; i < num_commands; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
423 for (i = 0; i < num_events; i++, opcode++)
424 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
427 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
434 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
437 struct mgmt_rp_read_index_list *rp;
443 bt_dev_dbg(hdev, "sock %p", sk);
445 read_lock(&hci_dev_list_lock);
448 list_for_each_entry(d, &hci_dev_list, list) {
449 if (d->dev_type == HCI_PRIMARY &&
450 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
454 rp_len = sizeof(*rp) + (2 * count);
455 rp = kmalloc(rp_len, GFP_ATOMIC);
457 read_unlock(&hci_dev_list_lock);
462 list_for_each_entry(d, &hci_dev_list, list) {
463 if (hci_dev_test_flag(d, HCI_SETUP) ||
464 hci_dev_test_flag(d, HCI_CONFIG) ||
465 hci_dev_test_flag(d, HCI_USER_CHANNEL))
468 /* Devices marked as raw-only are neither configured
469 * nor unconfigured controllers.
471 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
474 if (d->dev_type == HCI_PRIMARY &&
475 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
476 rp->index[count++] = cpu_to_le16(d->id);
477 bt_dev_dbg(hdev, "Added hci%u", d->id);
481 rp->num_controllers = cpu_to_le16(count);
482 rp_len = sizeof(*rp) + (2 * count);
484 read_unlock(&hci_dev_list_lock);
486 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
494 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
495 void *data, u16 data_len)
497 struct mgmt_rp_read_unconf_index_list *rp;
503 bt_dev_dbg(hdev, "sock %p", sk);
505 read_lock(&hci_dev_list_lock);
508 list_for_each_entry(d, &hci_dev_list, list) {
509 if (d->dev_type == HCI_PRIMARY &&
510 hci_dev_test_flag(d, HCI_UNCONFIGURED))
514 rp_len = sizeof(*rp) + (2 * count);
515 rp = kmalloc(rp_len, GFP_ATOMIC);
517 read_unlock(&hci_dev_list_lock);
522 list_for_each_entry(d, &hci_dev_list, list) {
523 if (hci_dev_test_flag(d, HCI_SETUP) ||
524 hci_dev_test_flag(d, HCI_CONFIG) ||
525 hci_dev_test_flag(d, HCI_USER_CHANNEL))
528 /* Devices marked as raw-only are neither configured
529 * nor unconfigured controllers.
531 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
534 if (d->dev_type == HCI_PRIMARY &&
535 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
536 rp->index[count++] = cpu_to_le16(d->id);
537 bt_dev_dbg(hdev, "Added hci%u", d->id);
541 rp->num_controllers = cpu_to_le16(count);
542 rp_len = sizeof(*rp) + (2 * count);
544 read_unlock(&hci_dev_list_lock);
546 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
547 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
554 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
555 void *data, u16 data_len)
557 struct mgmt_rp_read_ext_index_list *rp;
562 bt_dev_dbg(hdev, "sock %p", sk);
564 read_lock(&hci_dev_list_lock);
567 list_for_each_entry(d, &hci_dev_list, list) {
568 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
572 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
574 read_unlock(&hci_dev_list_lock);
579 list_for_each_entry(d, &hci_dev_list, list) {
580 if (hci_dev_test_flag(d, HCI_SETUP) ||
581 hci_dev_test_flag(d, HCI_CONFIG) ||
582 hci_dev_test_flag(d, HCI_USER_CHANNEL))
585 /* Devices marked as raw-only are neither configured
586 * nor unconfigured controllers.
588 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
591 if (d->dev_type == HCI_PRIMARY) {
592 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
593 rp->entry[count].type = 0x01;
595 rp->entry[count].type = 0x00;
596 } else if (d->dev_type == HCI_AMP) {
597 rp->entry[count].type = 0x02;
602 rp->entry[count].bus = d->bus;
603 rp->entry[count++].index = cpu_to_le16(d->id);
604 bt_dev_dbg(hdev, "Added hci%u", d->id);
607 rp->num_controllers = cpu_to_le16(count);
609 read_unlock(&hci_dev_list_lock);
611 /* If this command is called at least once, then all the
612 * default index and unconfigured index events are disabled
613 * and from now on only extended index events are used.
615 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
616 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
617 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
619 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
620 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
621 struct_size(rp, entry, count));
628 static bool is_configured(struct hci_dev *hdev)
630 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
631 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
634 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
635 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
636 !bacmp(&hdev->public_addr, BDADDR_ANY))
642 static __le32 get_missing_options(struct hci_dev *hdev)
646 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
647 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
648 options |= MGMT_OPTION_EXTERNAL_CONFIG;
650 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
651 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
652 !bacmp(&hdev->public_addr, BDADDR_ANY))
653 options |= MGMT_OPTION_PUBLIC_ADDRESS;
655 return cpu_to_le32(options);
658 static int new_options(struct hci_dev *hdev, struct sock *skip)
660 __le32 options = get_missing_options(hdev);
662 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
663 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
666 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
668 __le32 options = get_missing_options(hdev);
670 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
674 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
675 void *data, u16 data_len)
677 struct mgmt_rp_read_config_info rp;
680 bt_dev_dbg(hdev, "sock %p", sk);
684 memset(&rp, 0, sizeof(rp));
685 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
687 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
688 options |= MGMT_OPTION_EXTERNAL_CONFIG;
690 if (hdev->set_bdaddr)
691 options |= MGMT_OPTION_PUBLIC_ADDRESS;
693 rp.supported_options = cpu_to_le32(options);
694 rp.missing_options = get_missing_options(hdev);
696 hci_dev_unlock(hdev);
698 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
702 static u32 get_supported_phys(struct hci_dev *hdev)
704 u32 supported_phys = 0;
706 if (lmp_bredr_capable(hdev)) {
707 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
709 if (hdev->features[0][0] & LMP_3SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
712 if (hdev->features[0][0] & LMP_5SLOT)
713 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
715 if (lmp_edr_2m_capable(hdev)) {
716 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
718 if (lmp_edr_3slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
721 if (lmp_edr_5slot_capable(hdev))
722 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
724 if (lmp_edr_3m_capable(hdev)) {
725 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
727 if (lmp_edr_3slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
730 if (lmp_edr_5slot_capable(hdev))
731 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
736 if (lmp_le_capable(hdev)) {
737 supported_phys |= MGMT_PHY_LE_1M_TX;
738 supported_phys |= MGMT_PHY_LE_1M_RX;
740 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
741 supported_phys |= MGMT_PHY_LE_2M_TX;
742 supported_phys |= MGMT_PHY_LE_2M_RX;
745 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
746 supported_phys |= MGMT_PHY_LE_CODED_TX;
747 supported_phys |= MGMT_PHY_LE_CODED_RX;
751 return supported_phys;
754 static u32 get_selected_phys(struct hci_dev *hdev)
756 u32 selected_phys = 0;
758 if (lmp_bredr_capable(hdev)) {
759 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
761 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
762 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
764 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
765 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
767 if (lmp_edr_2m_capable(hdev)) {
768 if (!(hdev->pkt_type & HCI_2DH1))
769 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
771 if (lmp_edr_3slot_capable(hdev) &&
772 !(hdev->pkt_type & HCI_2DH3))
773 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
775 if (lmp_edr_5slot_capable(hdev) &&
776 !(hdev->pkt_type & HCI_2DH5))
777 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
779 if (lmp_edr_3m_capable(hdev)) {
780 if (!(hdev->pkt_type & HCI_3DH1))
781 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
783 if (lmp_edr_3slot_capable(hdev) &&
784 !(hdev->pkt_type & HCI_3DH3))
785 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
787 if (lmp_edr_5slot_capable(hdev) &&
788 !(hdev->pkt_type & HCI_3DH5))
789 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
794 if (lmp_le_capable(hdev)) {
795 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_TX;
798 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
799 selected_phys |= MGMT_PHY_LE_1M_RX;
801 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_TX;
804 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
805 selected_phys |= MGMT_PHY_LE_2M_RX;
807 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_TX;
810 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
811 selected_phys |= MGMT_PHY_LE_CODED_RX;
814 return selected_phys;
817 static u32 get_configurable_phys(struct hci_dev *hdev)
819 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
820 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
823 static u32 get_supported_settings(struct hci_dev *hdev)
827 settings |= MGMT_SETTING_POWERED;
828 settings |= MGMT_SETTING_BONDABLE;
829 settings |= MGMT_SETTING_DEBUG_KEYS;
830 settings |= MGMT_SETTING_CONNECTABLE;
831 settings |= MGMT_SETTING_DISCOVERABLE;
833 if (lmp_bredr_capable(hdev)) {
834 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
835 settings |= MGMT_SETTING_FAST_CONNECTABLE;
836 settings |= MGMT_SETTING_BREDR;
837 settings |= MGMT_SETTING_LINK_SECURITY;
839 if (lmp_ssp_capable(hdev)) {
840 settings |= MGMT_SETTING_SSP;
841 if (IS_ENABLED(CONFIG_BT_HS))
842 settings |= MGMT_SETTING_HS;
845 if (lmp_sc_capable(hdev))
846 settings |= MGMT_SETTING_SECURE_CONN;
848 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
850 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
853 if (lmp_le_capable(hdev)) {
854 settings |= MGMT_SETTING_LE;
855 settings |= MGMT_SETTING_SECURE_CONN;
856 settings |= MGMT_SETTING_PRIVACY;
857 settings |= MGMT_SETTING_STATIC_ADDRESS;
858 settings |= MGMT_SETTING_ADVERTISING;
861 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
863 settings |= MGMT_SETTING_CONFIGURATION;
865 if (cis_central_capable(hdev))
866 settings |= MGMT_SETTING_CIS_CENTRAL;
868 if (cis_peripheral_capable(hdev))
869 settings |= MGMT_SETTING_CIS_PERIPHERAL;
871 settings |= MGMT_SETTING_PHY_CONFIGURATION;
876 static u32 get_current_settings(struct hci_dev *hdev)
880 if (hdev_is_powered(hdev))
881 settings |= MGMT_SETTING_POWERED;
883 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
884 settings |= MGMT_SETTING_CONNECTABLE;
886 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
887 settings |= MGMT_SETTING_FAST_CONNECTABLE;
889 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
890 settings |= MGMT_SETTING_DISCOVERABLE;
892 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
893 settings |= MGMT_SETTING_BONDABLE;
895 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
896 settings |= MGMT_SETTING_BREDR;
898 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
899 settings |= MGMT_SETTING_LE;
901 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
902 settings |= MGMT_SETTING_LINK_SECURITY;
904 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
905 settings |= MGMT_SETTING_SSP;
907 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
908 settings |= MGMT_SETTING_HS;
910 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
911 settings |= MGMT_SETTING_ADVERTISING;
913 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
914 settings |= MGMT_SETTING_SECURE_CONN;
916 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
917 settings |= MGMT_SETTING_DEBUG_KEYS;
919 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
920 settings |= MGMT_SETTING_PRIVACY;
922 /* The current setting for static address has two purposes. The
923 * first is to indicate if the static address will be used and
924 * the second is to indicate if it is actually set.
926 * This means if the static address is not configured, this flag
927 * will never be set. If the address is configured, then if the
928 * address is actually used decides if the flag is set or not.
930 * For single mode LE only controllers and dual-mode controllers
931 * with BR/EDR disabled, the existence of the static address will
934 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
935 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
936 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
937 if (bacmp(&hdev->static_addr, BDADDR_ANY))
938 settings |= MGMT_SETTING_STATIC_ADDRESS;
941 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
942 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
944 if (cis_central_capable(hdev))
945 settings |= MGMT_SETTING_CIS_CENTRAL;
947 if (cis_peripheral_capable(hdev))
948 settings |= MGMT_SETTING_CIS_PERIPHERAL;
953 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
955 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
958 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
960 struct mgmt_pending_cmd *cmd;
962 /* If there's a pending mgmt command the flags will not yet have
963 * their final values, so check for this first.
965 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
967 struct mgmt_mode *cp = cmd->param;
969 return LE_AD_GENERAL;
970 else if (cp->val == 0x02)
971 return LE_AD_LIMITED;
973 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
974 return LE_AD_LIMITED;
975 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
976 return LE_AD_GENERAL;
982 bool mgmt_get_connectable(struct hci_dev *hdev)
984 struct mgmt_pending_cmd *cmd;
986 /* If there's a pending mgmt command the flag will not yet have
987 * it's final value, so check for this first.
989 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
991 struct mgmt_mode *cp = cmd->param;
996 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
999 static int service_cache_sync(struct hci_dev *hdev, void *data)
1001 hci_update_eir_sync(hdev);
1002 hci_update_class_sync(hdev);
1007 static void service_cache_off(struct work_struct *work)
1009 struct hci_dev *hdev = container_of(work, struct hci_dev,
1010 service_cache.work);
1012 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1015 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1018 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1020 /* The generation of a new RPA and programming it into the
1021 * controller happens in the hci_req_enable_advertising()
1024 if (ext_adv_capable(hdev))
1025 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1027 return hci_enable_advertising_sync(hdev);
1030 static void rpa_expired(struct work_struct *work)
1032 struct hci_dev *hdev = container_of(work, struct hci_dev,
1035 bt_dev_dbg(hdev, "");
1037 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1039 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1042 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1045 static void discov_off(struct work_struct *work)
1047 struct hci_dev *hdev = container_of(work, struct hci_dev,
1050 bt_dev_dbg(hdev, "");
1054 /* When discoverable timeout triggers, then just make sure
1055 * the limited discoverable flag is cleared. Even in the case
1056 * of a timeout triggered from general discoverable, it is
1057 * safe to unconditionally clear the flag.
1059 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1060 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1061 hdev->discov_timeout = 0;
1063 hci_update_discoverable(hdev);
1065 mgmt_new_settings(hdev);
1067 hci_dev_unlock(hdev);
1070 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1072 static void mesh_send_complete(struct hci_dev *hdev,
1073 struct mgmt_mesh_tx *mesh_tx, bool silent)
1075 u8 handle = mesh_tx->handle;
1078 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1079 sizeof(handle), NULL);
1081 mgmt_mesh_remove(mesh_tx);
1084 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1086 struct mgmt_mesh_tx *mesh_tx;
1088 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1089 hci_disable_advertising_sync(hdev);
1090 mesh_tx = mgmt_mesh_next(hdev, NULL);
1093 mesh_send_complete(hdev, mesh_tx, false);
1098 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1099 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1100 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1102 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1107 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1108 mesh_send_start_complete);
1111 mesh_send_complete(hdev, mesh_tx, false);
1113 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1116 static void mesh_send_done(struct work_struct *work)
1118 struct hci_dev *hdev = container_of(work, struct hci_dev,
1119 mesh_send_done.work);
1121 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1124 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1127 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1129 if (hci_dev_test_flag(hdev, HCI_MGMT))
1132 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1134 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1135 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1136 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1137 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1139 /* Non-mgmt controlled devices get this bit set
1140 * implicitly so that pairing works for them, however
1141 * for mgmt we require user-space to explicitly enable
1144 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1146 hci_dev_set_flag(hdev, HCI_MGMT);
1149 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1150 void *data, u16 data_len)
1152 struct mgmt_rp_read_info rp;
1154 bt_dev_dbg(hdev, "sock %p", sk);
1158 memset(&rp, 0, sizeof(rp));
1160 bacpy(&rp.bdaddr, &hdev->bdaddr);
1162 rp.version = hdev->hci_ver;
1163 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1165 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1166 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1168 memcpy(rp.dev_class, hdev->dev_class, 3);
1170 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1171 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1173 hci_dev_unlock(hdev);
1175 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1179 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1184 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1185 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1186 hdev->dev_class, 3);
1188 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1189 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1192 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1193 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1194 hdev->dev_name, name_len);
1196 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1197 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1198 hdev->short_name, name_len);
1203 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1204 void *data, u16 data_len)
1207 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1210 bt_dev_dbg(hdev, "sock %p", sk);
1212 memset(&buf, 0, sizeof(buf));
1216 bacpy(&rp->bdaddr, &hdev->bdaddr);
1218 rp->version = hdev->hci_ver;
1219 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1221 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1222 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1225 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1226 rp->eir_len = cpu_to_le16(eir_len);
1228 hci_dev_unlock(hdev);
1230 /* If this command is called at least once, then the events
1231 * for class of device and local name changes are disabled
1232 * and only the new extended controller information event
1235 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1236 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1237 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1239 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1240 sizeof(*rp) + eir_len);
1243 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1246 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1249 memset(buf, 0, sizeof(buf));
1251 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1252 ev->eir_len = cpu_to_le16(eir_len);
1254 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1255 sizeof(*ev) + eir_len,
1256 HCI_MGMT_EXT_INFO_EVENTS, skip);
1259 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1261 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1263 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1267 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1269 struct mgmt_ev_advertising_added ev;
1271 ev.instance = instance;
1273 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1276 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1279 struct mgmt_ev_advertising_removed ev;
1281 ev.instance = instance;
1283 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1286 static void cancel_adv_timeout(struct hci_dev *hdev)
1288 if (hdev->adv_instance_timeout) {
1289 hdev->adv_instance_timeout = 0;
1290 cancel_delayed_work(&hdev->adv_instance_expire);
1294 /* This function requires the caller holds hdev->lock */
1295 static void restart_le_actions(struct hci_dev *hdev)
1297 struct hci_conn_params *p;
1299 list_for_each_entry(p, &hdev->le_conn_params, list) {
1300 /* Needed for AUTO_OFF case where might not "really"
1301 * have been powered off.
1303 list_del_init(&p->action);
1305 switch (p->auto_connect) {
1306 case HCI_AUTO_CONN_DIRECT:
1307 case HCI_AUTO_CONN_ALWAYS:
1308 list_add(&p->action, &hdev->pend_le_conns);
1310 case HCI_AUTO_CONN_REPORT:
1311 list_add(&p->action, &hdev->pend_le_reports);
1319 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1321 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1323 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1324 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1327 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1329 struct mgmt_pending_cmd *cmd = data;
1330 struct mgmt_mode *cp;
1332 /* Make sure cmd still outstanding. */
1333 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1338 bt_dev_dbg(hdev, "err %d", err);
1343 restart_le_actions(hdev);
1344 hci_update_passive_scan(hdev);
1345 hci_dev_unlock(hdev);
1348 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1350 /* Only call new_setting for power on as power off is deferred
1351 * to hdev->power_off work which does call hci_dev_do_close.
1354 new_settings(hdev, cmd->sk);
1356 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1360 mgmt_pending_remove(cmd);
1363 static int set_powered_sync(struct hci_dev *hdev, void *data)
1365 struct mgmt_pending_cmd *cmd = data;
1366 struct mgmt_mode *cp = cmd->param;
1368 BT_DBG("%s", hdev->name);
1370 return hci_set_powered_sync(hdev, cp->val);
1373 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1376 struct mgmt_mode *cp = data;
1377 struct mgmt_pending_cmd *cmd;
1380 bt_dev_dbg(hdev, "sock %p", sk);
1382 if (cp->val != 0x00 && cp->val != 0x01)
1383 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1384 MGMT_STATUS_INVALID_PARAMS);
1388 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1389 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1394 if (!!cp->val == hdev_is_powered(hdev)) {
1395 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1405 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1406 mgmt_set_powered_complete);
1409 mgmt_pending_remove(cmd);
1412 hci_dev_unlock(hdev);
1416 int mgmt_new_settings(struct hci_dev *hdev)
1418 return new_settings(hdev, NULL);
1423 struct hci_dev *hdev;
1427 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1429 struct cmd_lookup *match = data;
1431 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1433 list_del(&cmd->list);
1435 if (match->sk == NULL) {
1436 match->sk = cmd->sk;
1437 sock_hold(match->sk);
1440 mgmt_pending_free(cmd);
1443 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1447 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1448 mgmt_pending_remove(cmd);
1451 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1453 if (cmd->cmd_complete) {
1456 cmd->cmd_complete(cmd, *status);
1457 mgmt_pending_remove(cmd);
1462 cmd_status_rsp(cmd, data);
1465 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1467 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1468 cmd->param, cmd->param_len);
1471 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1473 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1474 cmd->param, sizeof(struct mgmt_addr_info));
1477 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1479 if (!lmp_bredr_capable(hdev))
1480 return MGMT_STATUS_NOT_SUPPORTED;
1481 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1482 return MGMT_STATUS_REJECTED;
1484 return MGMT_STATUS_SUCCESS;
1487 static u8 mgmt_le_support(struct hci_dev *hdev)
1489 if (!lmp_le_capable(hdev))
1490 return MGMT_STATUS_NOT_SUPPORTED;
1491 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1492 return MGMT_STATUS_REJECTED;
1494 return MGMT_STATUS_SUCCESS;
1497 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1500 struct mgmt_pending_cmd *cmd = data;
1502 bt_dev_dbg(hdev, "err %d", err);
1504 /* Make sure cmd still outstanding. */
1505 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1511 u8 mgmt_err = mgmt_status(err);
1512 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1513 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1517 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1518 hdev->discov_timeout > 0) {
1519 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1520 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1523 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1524 new_settings(hdev, cmd->sk);
1527 mgmt_pending_remove(cmd);
1528 hci_dev_unlock(hdev);
1531 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1533 BT_DBG("%s", hdev->name);
1535 return hci_update_discoverable_sync(hdev);
1538 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1541 struct mgmt_cp_set_discoverable *cp = data;
1542 struct mgmt_pending_cmd *cmd;
1546 bt_dev_dbg(hdev, "sock %p", sk);
1548 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1549 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1550 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1551 MGMT_STATUS_REJECTED);
1553 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1554 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1555 MGMT_STATUS_INVALID_PARAMS);
1557 timeout = __le16_to_cpu(cp->timeout);
1559 /* Disabling discoverable requires that no timeout is set,
1560 * and enabling limited discoverable requires a timeout.
1562 if ((cp->val == 0x00 && timeout > 0) ||
1563 (cp->val == 0x02 && timeout == 0))
1564 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1565 MGMT_STATUS_INVALID_PARAMS);
1569 if (!hdev_is_powered(hdev) && timeout > 0) {
1570 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571 MGMT_STATUS_NOT_POWERED);
1575 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1576 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1577 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1582 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1583 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1584 MGMT_STATUS_REJECTED);
1588 if (hdev->advertising_paused) {
1589 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1594 if (!hdev_is_powered(hdev)) {
1595 bool changed = false;
1597 /* Setting limited discoverable when powered off is
1598 * not a valid operation since it requires a timeout
1599 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1601 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1602 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1606 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1611 err = new_settings(hdev, sk);
1616 /* If the current mode is the same, then just update the timeout
1617 * value with the new value. And if only the timeout gets updated,
1618 * then no need for any HCI transactions.
1620 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1621 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1622 HCI_LIMITED_DISCOVERABLE)) {
1623 cancel_delayed_work(&hdev->discov_off);
1624 hdev->discov_timeout = timeout;
1626 if (cp->val && hdev->discov_timeout > 0) {
1627 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1628 queue_delayed_work(hdev->req_workqueue,
1629 &hdev->discov_off, to);
1632 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1636 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1642 /* Cancel any potential discoverable timeout that might be
1643 * still active and store new timeout value. The arming of
1644 * the timeout happens in the complete handler.
1646 cancel_delayed_work(&hdev->discov_off);
1647 hdev->discov_timeout = timeout;
1650 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1652 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1654 /* Limited discoverable mode */
1655 if (cp->val == 0x02)
1656 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1658 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1660 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1661 mgmt_set_discoverable_complete);
1664 mgmt_pending_remove(cmd);
1667 hci_dev_unlock(hdev);
1671 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1674 struct mgmt_pending_cmd *cmd = data;
1676 bt_dev_dbg(hdev, "err %d", err);
1678 /* Make sure cmd still outstanding. */
1679 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1685 u8 mgmt_err = mgmt_status(err);
1686 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1690 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1691 new_settings(hdev, cmd->sk);
1695 mgmt_pending_remove(cmd);
1697 hci_dev_unlock(hdev);
1700 static int set_connectable_update_settings(struct hci_dev *hdev,
1701 struct sock *sk, u8 val)
1703 bool changed = false;
1706 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1710 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1712 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1713 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1716 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1721 hci_update_scan(hdev);
1722 hci_update_passive_scan(hdev);
1723 return new_settings(hdev, sk);
1729 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1731 BT_DBG("%s", hdev->name);
1733 return hci_update_connectable_sync(hdev);
1736 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1739 struct mgmt_mode *cp = data;
1740 struct mgmt_pending_cmd *cmd;
1743 bt_dev_dbg(hdev, "sock %p", sk);
1745 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1746 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1748 MGMT_STATUS_REJECTED);
1750 if (cp->val != 0x00 && cp->val != 0x01)
1751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1752 MGMT_STATUS_INVALID_PARAMS);
1756 if (!hdev_is_powered(hdev)) {
1757 err = set_connectable_update_settings(hdev, sk, cp->val);
1761 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1762 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1763 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1768 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1775 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1777 if (hdev->discov_timeout > 0)
1778 cancel_delayed_work(&hdev->discov_off);
1780 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1781 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1782 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1785 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1786 mgmt_set_connectable_complete);
1789 mgmt_pending_remove(cmd);
1792 hci_dev_unlock(hdev);
1796 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1799 struct mgmt_mode *cp = data;
1803 bt_dev_dbg(hdev, "sock %p", sk);
1805 if (cp->val != 0x00 && cp->val != 0x01)
1806 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1807 MGMT_STATUS_INVALID_PARAMS);
1812 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1814 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1816 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1821 /* In limited privacy mode the change of bondable mode
1822 * may affect the local advertising address.
1824 hci_update_discoverable(hdev);
1826 err = new_settings(hdev, sk);
1830 hci_dev_unlock(hdev);
1834 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1837 struct mgmt_mode *cp = data;
1838 struct mgmt_pending_cmd *cmd;
1842 bt_dev_dbg(hdev, "sock %p", sk);
1844 status = mgmt_bredr_support(hdev);
1846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1849 if (cp->val != 0x00 && cp->val != 0x01)
1850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1851 MGMT_STATUS_INVALID_PARAMS);
1855 if (!hdev_is_powered(hdev)) {
1856 bool changed = false;
1858 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1859 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1863 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1868 err = new_settings(hdev, sk);
1873 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1874 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1881 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1882 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1886 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1892 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1894 mgmt_pending_remove(cmd);
1899 hci_dev_unlock(hdev);
1903 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1905 struct cmd_lookup match = { NULL, hdev };
1906 struct mgmt_pending_cmd *cmd = data;
1907 struct mgmt_mode *cp = cmd->param;
1908 u8 enable = cp->val;
1911 /* Make sure cmd still outstanding. */
1912 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1916 u8 mgmt_err = mgmt_status(err);
1918 if (enable && hci_dev_test_and_clear_flag(hdev,
1920 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1921 new_settings(hdev, NULL);
1924 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1930 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1932 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1935 changed = hci_dev_test_and_clear_flag(hdev,
1938 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1941 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1944 new_settings(hdev, match.sk);
1949 hci_update_eir_sync(hdev);
1952 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1954 struct mgmt_pending_cmd *cmd = data;
1955 struct mgmt_mode *cp = cmd->param;
1956 bool changed = false;
1960 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1962 err = hci_write_ssp_mode_sync(hdev, cp->val);
1964 if (!err && changed)
1965 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1970 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1972 struct mgmt_mode *cp = data;
1973 struct mgmt_pending_cmd *cmd;
1977 bt_dev_dbg(hdev, "sock %p", sk);
1979 status = mgmt_bredr_support(hdev);
1981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1983 if (!lmp_ssp_capable(hdev))
1984 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1985 MGMT_STATUS_NOT_SUPPORTED);
1987 if (cp->val != 0x00 && cp->val != 0x01)
1988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1989 MGMT_STATUS_INVALID_PARAMS);
1993 if (!hdev_is_powered(hdev)) {
1997 changed = !hci_dev_test_and_set_flag(hdev,
2000 changed = hci_dev_test_and_clear_flag(hdev,
2003 changed = hci_dev_test_and_clear_flag(hdev,
2006 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2009 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2014 err = new_settings(hdev, sk);
2019 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2020 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2025 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2026 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2030 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2034 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2038 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2039 MGMT_STATUS_FAILED);
2042 mgmt_pending_remove(cmd);
2046 hci_dev_unlock(hdev);
2050 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2052 struct mgmt_mode *cp = data;
2057 bt_dev_dbg(hdev, "sock %p", sk);
2059 if (!IS_ENABLED(CONFIG_BT_HS))
2060 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2061 MGMT_STATUS_NOT_SUPPORTED);
2063 status = mgmt_bredr_support(hdev);
2065 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2067 if (!lmp_ssp_capable(hdev))
2068 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2069 MGMT_STATUS_NOT_SUPPORTED);
2071 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2072 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2073 MGMT_STATUS_REJECTED);
2075 if (cp->val != 0x00 && cp->val != 0x01)
2076 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2077 MGMT_STATUS_INVALID_PARAMS);
2081 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2082 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2088 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2090 if (hdev_is_powered(hdev)) {
2091 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2092 MGMT_STATUS_REJECTED);
2096 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2099 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2104 err = new_settings(hdev, sk);
2107 hci_dev_unlock(hdev);
2111 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2113 struct cmd_lookup match = { NULL, hdev };
2114 u8 status = mgmt_status(err);
2116 bt_dev_dbg(hdev, "err %d", err);
2119 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2124 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2126 new_settings(hdev, match.sk);
2132 static int set_le_sync(struct hci_dev *hdev, void *data)
2134 struct mgmt_pending_cmd *cmd = data;
2135 struct mgmt_mode *cp = cmd->param;
2140 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2142 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2143 hci_disable_advertising_sync(hdev);
2145 if (ext_adv_capable(hdev))
2146 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2148 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2151 err = hci_write_le_host_supported_sync(hdev, val, 0);
2153 /* Make sure the controller has a good default for
2154 * advertising data. Restrict the update to when LE
2155 * has actually been enabled. During power on, the
2156 * update in powered_update_hci will take care of it.
2158 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2159 if (ext_adv_capable(hdev)) {
2162 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2164 hci_update_scan_rsp_data_sync(hdev, 0x00);
2166 hci_update_adv_data_sync(hdev, 0x00);
2167 hci_update_scan_rsp_data_sync(hdev, 0x00);
2170 hci_update_passive_scan(hdev);
2176 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2178 struct mgmt_pending_cmd *cmd = data;
2179 u8 status = mgmt_status(err);
2180 struct sock *sk = cmd->sk;
2183 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2184 cmd_status_rsp, &status);
2188 mgmt_pending_remove(cmd);
2189 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2192 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2194 struct mgmt_pending_cmd *cmd = data;
2195 struct mgmt_cp_set_mesh *cp = cmd->param;
2196 size_t len = cmd->param_len;
2198 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2201 hci_dev_set_flag(hdev, HCI_MESH);
2203 hci_dev_clear_flag(hdev, HCI_MESH);
2207 /* If filters don't fit, forward all adv pkts */
2208 if (len <= sizeof(hdev->mesh_ad_types))
2209 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2211 hci_update_passive_scan_sync(hdev);
2215 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2217 struct mgmt_cp_set_mesh *cp = data;
2218 struct mgmt_pending_cmd *cmd;
2221 bt_dev_dbg(hdev, "sock %p", sk);
2223 if (!lmp_le_capable(hdev) ||
2224 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2225 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2226 MGMT_STATUS_NOT_SUPPORTED);
2228 if (cp->enable != 0x00 && cp->enable != 0x01)
2229 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2230 MGMT_STATUS_INVALID_PARAMS);
2234 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2238 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2242 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2243 MGMT_STATUS_FAILED);
2246 mgmt_pending_remove(cmd);
2249 hci_dev_unlock(hdev);
2253 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2255 struct mgmt_mesh_tx *mesh_tx = data;
2256 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2257 unsigned long mesh_send_interval;
2258 u8 mgmt_err = mgmt_status(err);
2260 /* Report any errors here, but don't report completion */
2263 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2264 /* Send Complete Error Code for handle */
2265 mesh_send_complete(hdev, mesh_tx, false);
2269 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2270 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2271 mesh_send_interval);
2274 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2276 struct mgmt_mesh_tx *mesh_tx = data;
2277 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2278 struct adv_info *adv, *next_instance;
2279 u8 instance = hdev->le_num_of_adv_sets + 1;
2280 u16 timeout, duration;
2283 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2284 return MGMT_STATUS_BUSY;
2287 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2288 adv = hci_add_adv_instance(hdev, instance, 0,
2289 send->adv_data_len, send->adv_data,
2292 HCI_ADV_TX_POWER_NO_PREFERENCE,
2293 hdev->le_adv_min_interval,
2294 hdev->le_adv_max_interval,
2298 mesh_tx->instance = instance;
2302 if (hdev->cur_adv_instance == instance) {
2303 /* If the currently advertised instance is being changed then
2304 * cancel the current advertising and schedule the next
2305 * instance. If there is only one instance then the overridden
2306 * advertising data will be visible right away.
2308 cancel_adv_timeout(hdev);
2310 next_instance = hci_get_next_instance(hdev, instance);
2312 instance = next_instance->instance;
2315 } else if (hdev->adv_instance_timeout) {
2316 /* Immediately advertise the new instance if no other, or
2317 * let it go naturally from queue if ADV is already happening
2323 return hci_schedule_adv_instance_sync(hdev, instance, true);
2328 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2330 struct mgmt_rp_mesh_read_features *rp = data;
2332 if (rp->used_handles >= rp->max_handles)
2335 rp->handles[rp->used_handles++] = mesh_tx->handle;
2338 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2339 void *data, u16 len)
2341 struct mgmt_rp_mesh_read_features rp;
2343 if (!lmp_le_capable(hdev) ||
2344 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2345 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2346 MGMT_STATUS_NOT_SUPPORTED);
2348 memset(&rp, 0, sizeof(rp));
2349 rp.index = cpu_to_le16(hdev->id);
2350 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2351 rp.max_handles = MESH_HANDLES_MAX;
2356 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2358 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2359 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2361 hci_dev_unlock(hdev);
2365 static int send_cancel(struct hci_dev *hdev, void *data)
2367 struct mgmt_pending_cmd *cmd = data;
2368 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2369 struct mgmt_mesh_tx *mesh_tx;
2371 if (!cancel->handle) {
2373 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2376 mesh_send_complete(hdev, mesh_tx, false);
2379 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2381 if (mesh_tx && mesh_tx->sk == cmd->sk)
2382 mesh_send_complete(hdev, mesh_tx, false);
2385 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2387 mgmt_pending_free(cmd);
2392 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2393 void *data, u16 len)
2395 struct mgmt_pending_cmd *cmd;
2398 if (!lmp_le_capable(hdev) ||
2399 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2400 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2401 MGMT_STATUS_NOT_SUPPORTED);
2403 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2404 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2405 MGMT_STATUS_REJECTED);
2408 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2412 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2415 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2416 MGMT_STATUS_FAILED);
2419 mgmt_pending_free(cmd);
2422 hci_dev_unlock(hdev);
2426 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2428 struct mgmt_mesh_tx *mesh_tx;
2429 struct mgmt_cp_mesh_send *send = data;
2430 struct mgmt_rp_mesh_read_features rp;
2434 if (!lmp_le_capable(hdev) ||
2435 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2436 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2437 MGMT_STATUS_NOT_SUPPORTED);
2438 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2439 len <= MGMT_MESH_SEND_SIZE ||
2440 len > (MGMT_MESH_SEND_SIZE + 31))
2441 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2442 MGMT_STATUS_REJECTED);
2446 memset(&rp, 0, sizeof(rp));
2447 rp.max_handles = MESH_HANDLES_MAX;
2449 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2451 if (rp.max_handles <= rp.used_handles) {
2452 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2457 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2458 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2463 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2464 mesh_send_start_complete);
2467 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2468 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2469 MGMT_STATUS_FAILED);
2473 mgmt_mesh_remove(mesh_tx);
2476 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2478 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2479 &mesh_tx->handle, 1);
2483 hci_dev_unlock(hdev);
2487 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2489 struct mgmt_mode *cp = data;
2490 struct mgmt_pending_cmd *cmd;
2494 bt_dev_dbg(hdev, "sock %p", sk);
2496 if (!lmp_le_capable(hdev))
2497 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2498 MGMT_STATUS_NOT_SUPPORTED);
2500 if (cp->val != 0x00 && cp->val != 0x01)
2501 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2502 MGMT_STATUS_INVALID_PARAMS);
2504 /* Bluetooth single mode LE only controllers or dual-mode
2505 * controllers configured as LE only devices, do not allow
2506 * switching LE off. These have either LE enabled explicitly
2507 * or BR/EDR has been previously switched off.
2509 * When trying to enable an already enabled LE, then gracefully
2510 * send a positive response. Trying to disable it however will
2511 * result into rejection.
2513 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2514 if (cp->val == 0x01)
2515 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2517 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2518 MGMT_STATUS_REJECTED);
2524 enabled = lmp_host_le_capable(hdev);
2526 if (!hdev_is_powered(hdev) || val == enabled) {
2527 bool changed = false;
2529 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2530 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2534 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2535 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2539 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2544 err = new_settings(hdev, sk);
2549 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2550 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2551 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2556 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2560 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2564 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2565 MGMT_STATUS_FAILED);
2568 mgmt_pending_remove(cmd);
2572 hci_dev_unlock(hdev);
2576 /* This is a helper function to test for pending mgmt commands that can
2577 * cause CoD or EIR HCI commands. We can only allow one such pending
2578 * mgmt command at a time since otherwise we cannot easily track what
2579 * the current values are, will be, and based on that calculate if a new
2580 * HCI command needs to be sent and if yes with what value.
2582 static bool pending_eir_or_class(struct hci_dev *hdev)
2584 struct mgmt_pending_cmd *cmd;
2586 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2587 switch (cmd->opcode) {
2588 case MGMT_OP_ADD_UUID:
2589 case MGMT_OP_REMOVE_UUID:
2590 case MGMT_OP_SET_DEV_CLASS:
2591 case MGMT_OP_SET_POWERED:
2599 static const u8 bluetooth_base_uuid[] = {
2600 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2601 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2604 static u8 get_uuid_size(const u8 *uuid)
2608 if (memcmp(uuid, bluetooth_base_uuid, 12))
2611 val = get_unaligned_le32(&uuid[12]);
2618 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2620 struct mgmt_pending_cmd *cmd = data;
2622 bt_dev_dbg(hdev, "err %d", err);
2624 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2625 mgmt_status(err), hdev->dev_class, 3);
2627 mgmt_pending_free(cmd);
2630 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2634 err = hci_update_class_sync(hdev);
2638 return hci_update_eir_sync(hdev);
2641 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2643 struct mgmt_cp_add_uuid *cp = data;
2644 struct mgmt_pending_cmd *cmd;
2645 struct bt_uuid *uuid;
2648 bt_dev_dbg(hdev, "sock %p", sk);
2652 if (pending_eir_or_class(hdev)) {
2653 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2658 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2664 memcpy(uuid->uuid, cp->uuid, 16);
2665 uuid->svc_hint = cp->svc_hint;
2666 uuid->size = get_uuid_size(cp->uuid);
2668 list_add_tail(&uuid->list, &hdev->uuids);
2670 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2676 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2678 mgmt_pending_free(cmd);
2683 hci_dev_unlock(hdev);
2687 static bool enable_service_cache(struct hci_dev *hdev)
2689 if (!hdev_is_powered(hdev))
2692 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2693 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2701 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2705 err = hci_update_class_sync(hdev);
2709 return hci_update_eir_sync(hdev);
2712 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2715 struct mgmt_cp_remove_uuid *cp = data;
2716 struct mgmt_pending_cmd *cmd;
2717 struct bt_uuid *match, *tmp;
2718 static const u8 bt_uuid_any[] = {
2719 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2723 bt_dev_dbg(hdev, "sock %p", sk);
2727 if (pending_eir_or_class(hdev)) {
2728 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2733 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2734 hci_uuids_clear(hdev);
2736 if (enable_service_cache(hdev)) {
2737 err = mgmt_cmd_complete(sk, hdev->id,
2738 MGMT_OP_REMOVE_UUID,
2739 0, hdev->dev_class, 3);
2748 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2749 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2752 list_del(&match->list);
2758 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2759 MGMT_STATUS_INVALID_PARAMS);
2764 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2770 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2771 mgmt_class_complete);
2773 mgmt_pending_free(cmd);
2776 hci_dev_unlock(hdev);
2780 static int set_class_sync(struct hci_dev *hdev, void *data)
2784 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2785 cancel_delayed_work_sync(&hdev->service_cache);
2786 err = hci_update_eir_sync(hdev);
2792 return hci_update_class_sync(hdev);
2795 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2798 struct mgmt_cp_set_dev_class *cp = data;
2799 struct mgmt_pending_cmd *cmd;
2802 bt_dev_dbg(hdev, "sock %p", sk);
2804 if (!lmp_bredr_capable(hdev))
2805 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2806 MGMT_STATUS_NOT_SUPPORTED);
2810 if (pending_eir_or_class(hdev)) {
2811 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2816 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2817 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2818 MGMT_STATUS_INVALID_PARAMS);
2822 hdev->major_class = cp->major;
2823 hdev->minor_class = cp->minor;
2825 if (!hdev_is_powered(hdev)) {
2826 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2827 hdev->dev_class, 3);
2831 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2837 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2838 mgmt_class_complete);
2840 mgmt_pending_free(cmd);
2843 hci_dev_unlock(hdev);
2847 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2850 struct mgmt_cp_load_link_keys *cp = data;
2851 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2852 sizeof(struct mgmt_link_key_info));
2853 u16 key_count, expected_len;
2857 bt_dev_dbg(hdev, "sock %p", sk);
2859 if (!lmp_bredr_capable(hdev))
2860 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2861 MGMT_STATUS_NOT_SUPPORTED);
2863 key_count = __le16_to_cpu(cp->key_count);
2864 if (key_count > max_key_count) {
2865 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2868 MGMT_STATUS_INVALID_PARAMS);
2871 expected_len = struct_size(cp, keys, key_count);
2872 if (expected_len != len) {
2873 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2876 MGMT_STATUS_INVALID_PARAMS);
2879 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2880 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2881 MGMT_STATUS_INVALID_PARAMS);
2883 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2886 for (i = 0; i < key_count; i++) {
2887 struct mgmt_link_key_info *key = &cp->keys[i];
2889 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2890 return mgmt_cmd_status(sk, hdev->id,
2891 MGMT_OP_LOAD_LINK_KEYS,
2892 MGMT_STATUS_INVALID_PARAMS);
2897 hci_link_keys_clear(hdev);
2900 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2902 changed = hci_dev_test_and_clear_flag(hdev,
2903 HCI_KEEP_DEBUG_KEYS);
2906 new_settings(hdev, NULL);
2908 for (i = 0; i < key_count; i++) {
2909 struct mgmt_link_key_info *key = &cp->keys[i];
2911 if (hci_is_blocked_key(hdev,
2912 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2914 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2919 /* Always ignore debug keys and require a new pairing if
2920 * the user wants to use them.
2922 if (key->type == HCI_LK_DEBUG_COMBINATION)
2925 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2926 key->type, key->pin_len, NULL);
2929 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2931 hci_dev_unlock(hdev);
2936 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2937 u8 addr_type, struct sock *skip_sk)
2939 struct mgmt_ev_device_unpaired ev;
2941 bacpy(&ev.addr.bdaddr, bdaddr);
2942 ev.addr.type = addr_type;
2944 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2948 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2950 struct mgmt_pending_cmd *cmd = data;
2951 struct mgmt_cp_unpair_device *cp = cmd->param;
2954 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2956 cmd->cmd_complete(cmd, err);
2957 mgmt_pending_free(cmd);
2960 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2962 struct mgmt_pending_cmd *cmd = data;
2963 struct mgmt_cp_unpair_device *cp = cmd->param;
2964 struct hci_conn *conn;
2966 if (cp->addr.type == BDADDR_BREDR)
2967 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2970 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2971 le_addr_type(cp->addr.type));
2976 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2979 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2982 struct mgmt_cp_unpair_device *cp = data;
2983 struct mgmt_rp_unpair_device rp;
2984 struct hci_conn_params *params;
2985 struct mgmt_pending_cmd *cmd;
2986 struct hci_conn *conn;
2990 memset(&rp, 0, sizeof(rp));
2991 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2992 rp.addr.type = cp->addr.type;
2994 if (!bdaddr_type_is_valid(cp->addr.type))
2995 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2996 MGMT_STATUS_INVALID_PARAMS,
2999 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3000 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3001 MGMT_STATUS_INVALID_PARAMS,
3006 if (!hdev_is_powered(hdev)) {
3007 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3008 MGMT_STATUS_NOT_POWERED, &rp,
3013 if (cp->addr.type == BDADDR_BREDR) {
3014 /* If disconnection is requested, then look up the
3015 * connection. If the remote device is connected, it
3016 * will be later used to terminate the link.
3018 * Setting it to NULL explicitly will cause no
3019 * termination of the link.
3022 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3027 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3029 err = mgmt_cmd_complete(sk, hdev->id,
3030 MGMT_OP_UNPAIR_DEVICE,
3031 MGMT_STATUS_NOT_PAIRED, &rp,
3039 /* LE address type */
3040 addr_type = le_addr_type(cp->addr.type);
3042 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3043 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3045 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3046 MGMT_STATUS_NOT_PAIRED, &rp,
3051 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3053 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3058 /* Defer clearing up the connection parameters until closing to
3059 * give a chance of keeping them if a repairing happens.
3061 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3063 /* Disable auto-connection parameters if present */
3064 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3066 if (params->explicit_connect)
3067 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3069 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3072 /* If disconnection is not requested, then clear the connection
3073 * variable so that the link is not terminated.
3075 if (!cp->disconnect)
3079 /* If the connection variable is set, then termination of the
3080 * link is requested.
3083 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3085 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3089 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3096 cmd->cmd_complete = addr_cmd_complete;
3098 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3099 unpair_device_complete);
3101 mgmt_pending_free(cmd);
3104 hci_dev_unlock(hdev);
3108 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3111 struct mgmt_cp_disconnect *cp = data;
3112 struct mgmt_rp_disconnect rp;
3113 struct mgmt_pending_cmd *cmd;
3114 struct hci_conn *conn;
3117 bt_dev_dbg(hdev, "sock %p", sk);
3119 memset(&rp, 0, sizeof(rp));
3120 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3121 rp.addr.type = cp->addr.type;
3123 if (!bdaddr_type_is_valid(cp->addr.type))
3124 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3125 MGMT_STATUS_INVALID_PARAMS,
3130 if (!test_bit(HCI_UP, &hdev->flags)) {
3131 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3132 MGMT_STATUS_NOT_POWERED, &rp,
3137 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3138 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3139 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3143 if (cp->addr.type == BDADDR_BREDR)
3144 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3147 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3148 le_addr_type(cp->addr.type));
3150 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3151 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3152 MGMT_STATUS_NOT_CONNECTED, &rp,
3157 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3163 cmd->cmd_complete = generic_cmd_complete;
3165 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3167 mgmt_pending_remove(cmd);
3170 hci_dev_unlock(hdev);
3174 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3176 switch (link_type) {
3178 switch (addr_type) {
3179 case ADDR_LE_DEV_PUBLIC:
3180 return BDADDR_LE_PUBLIC;
3183 /* Fallback to LE Random address type */
3184 return BDADDR_LE_RANDOM;
3188 /* Fallback to BR/EDR type */
3189 return BDADDR_BREDR;
3193 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3196 struct mgmt_rp_get_connections *rp;
3201 bt_dev_dbg(hdev, "sock %p", sk);
3205 if (!hdev_is_powered(hdev)) {
3206 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3207 MGMT_STATUS_NOT_POWERED);
3212 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3213 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3217 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3224 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3225 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3227 bacpy(&rp->addr[i].bdaddr, &c->dst);
3228 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3229 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3234 rp->conn_count = cpu_to_le16(i);
3236 /* Recalculate length in case of filtered SCO connections, etc */
3237 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3238 struct_size(rp, addr, i));
3243 hci_dev_unlock(hdev);
3247 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3248 struct mgmt_cp_pin_code_neg_reply *cp)
3250 struct mgmt_pending_cmd *cmd;
3253 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3258 cmd->cmd_complete = addr_cmd_complete;
3260 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3261 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3263 mgmt_pending_remove(cmd);
3268 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3271 struct hci_conn *conn;
3272 struct mgmt_cp_pin_code_reply *cp = data;
3273 struct hci_cp_pin_code_reply reply;
3274 struct mgmt_pending_cmd *cmd;
3277 bt_dev_dbg(hdev, "sock %p", sk);
3281 if (!hdev_is_powered(hdev)) {
3282 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3283 MGMT_STATUS_NOT_POWERED);
3287 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3289 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3290 MGMT_STATUS_NOT_CONNECTED);
3294 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3295 struct mgmt_cp_pin_code_neg_reply ncp;
3297 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3299 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3301 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3303 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3304 MGMT_STATUS_INVALID_PARAMS);
3309 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3315 cmd->cmd_complete = addr_cmd_complete;
3317 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3318 reply.pin_len = cp->pin_len;
3319 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3321 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3323 mgmt_pending_remove(cmd);
3326 hci_dev_unlock(hdev);
3330 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3333 struct mgmt_cp_set_io_capability *cp = data;
3335 bt_dev_dbg(hdev, "sock %p", sk);
3337 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3338 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3339 MGMT_STATUS_INVALID_PARAMS);
3343 hdev->io_capability = cp->io_capability;
3345 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3347 hci_dev_unlock(hdev);
3349 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3353 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3355 struct hci_dev *hdev = conn->hdev;
3356 struct mgmt_pending_cmd *cmd;
3358 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3359 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3362 if (cmd->user_data != conn)
3371 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3373 struct mgmt_rp_pair_device rp;
3374 struct hci_conn *conn = cmd->user_data;
3377 bacpy(&rp.addr.bdaddr, &conn->dst);
3378 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3380 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3381 status, &rp, sizeof(rp));
3383 /* So we don't get further callbacks for this connection */
3384 conn->connect_cfm_cb = NULL;
3385 conn->security_cfm_cb = NULL;
3386 conn->disconn_cfm_cb = NULL;
3388 hci_conn_drop(conn);
3390 /* The device is paired so there is no need to remove
3391 * its connection parameters anymore.
3393 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3400 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3402 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3403 struct mgmt_pending_cmd *cmd;
3405 cmd = find_pairing(conn);
3407 cmd->cmd_complete(cmd, status);
3408 mgmt_pending_remove(cmd);
3412 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3414 struct mgmt_pending_cmd *cmd;
3416 BT_DBG("status %u", status);
3418 cmd = find_pairing(conn);
3420 BT_DBG("Unable to find a pending command");
3424 cmd->cmd_complete(cmd, mgmt_status(status));
3425 mgmt_pending_remove(cmd);
3428 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3430 struct mgmt_pending_cmd *cmd;
3432 BT_DBG("status %u", status);
3437 cmd = find_pairing(conn);
3439 BT_DBG("Unable to find a pending command");
3443 cmd->cmd_complete(cmd, mgmt_status(status));
3444 mgmt_pending_remove(cmd);
3447 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3450 struct mgmt_cp_pair_device *cp = data;
3451 struct mgmt_rp_pair_device rp;
3452 struct mgmt_pending_cmd *cmd;
3453 u8 sec_level, auth_type;
3454 struct hci_conn *conn;
3457 bt_dev_dbg(hdev, "sock %p", sk);
3459 memset(&rp, 0, sizeof(rp));
3460 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3461 rp.addr.type = cp->addr.type;
3463 if (!bdaddr_type_is_valid(cp->addr.type))
3464 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3465 MGMT_STATUS_INVALID_PARAMS,
3468 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3469 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3470 MGMT_STATUS_INVALID_PARAMS,
3475 if (!hdev_is_powered(hdev)) {
3476 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3477 MGMT_STATUS_NOT_POWERED, &rp,
3482 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3483 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3484 MGMT_STATUS_ALREADY_PAIRED, &rp,
3489 sec_level = BT_SECURITY_MEDIUM;
3490 auth_type = HCI_AT_DEDICATED_BONDING;
3492 if (cp->addr.type == BDADDR_BREDR) {
3493 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3494 auth_type, CONN_REASON_PAIR_DEVICE);
3496 u8 addr_type = le_addr_type(cp->addr.type);
3497 struct hci_conn_params *p;
3499 /* When pairing a new device, it is expected to remember
3500 * this device for future connections. Adding the connection
3501 * parameter information ahead of time allows tracking
3502 * of the peripheral preferred values and will speed up any
3503 * further connection establishment.
3505 * If connection parameters already exist, then they
3506 * will be kept and this function does nothing.
3508 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3510 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3511 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3513 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3514 sec_level, HCI_LE_CONN_TIMEOUT,
3515 CONN_REASON_PAIR_DEVICE);
3521 if (PTR_ERR(conn) == -EBUSY)
3522 status = MGMT_STATUS_BUSY;
3523 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3524 status = MGMT_STATUS_NOT_SUPPORTED;
3525 else if (PTR_ERR(conn) == -ECONNREFUSED)
3526 status = MGMT_STATUS_REJECTED;
3528 status = MGMT_STATUS_CONNECT_FAILED;
3530 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3531 status, &rp, sizeof(rp));
3535 if (conn->connect_cfm_cb) {
3536 hci_conn_drop(conn);
3537 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3538 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3542 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3545 hci_conn_drop(conn);
3549 cmd->cmd_complete = pairing_complete;
3551 /* For LE, just connecting isn't a proof that the pairing finished */
3552 if (cp->addr.type == BDADDR_BREDR) {
3553 conn->connect_cfm_cb = pairing_complete_cb;
3554 conn->security_cfm_cb = pairing_complete_cb;
3555 conn->disconn_cfm_cb = pairing_complete_cb;
3557 conn->connect_cfm_cb = le_pairing_complete_cb;
3558 conn->security_cfm_cb = le_pairing_complete_cb;
3559 conn->disconn_cfm_cb = le_pairing_complete_cb;
3562 conn->io_capability = cp->io_cap;
3563 cmd->user_data = hci_conn_get(conn);
3565 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3566 hci_conn_security(conn, sec_level, auth_type, true)) {
3567 cmd->cmd_complete(cmd, 0);
3568 mgmt_pending_remove(cmd);
3574 hci_dev_unlock(hdev);
3578 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3580 struct hci_conn *conn;
3581 u16 handle = PTR_ERR(data);
3583 conn = hci_conn_hash_lookup_handle(hdev, handle);
3587 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3590 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3593 struct mgmt_addr_info *addr = data;
3594 struct mgmt_pending_cmd *cmd;
3595 struct hci_conn *conn;
3598 bt_dev_dbg(hdev, "sock %p", sk);
3602 if (!hdev_is_powered(hdev)) {
3603 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3604 MGMT_STATUS_NOT_POWERED);
3608 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3610 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3611 MGMT_STATUS_INVALID_PARAMS);
3615 conn = cmd->user_data;
3617 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3618 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3619 MGMT_STATUS_INVALID_PARAMS);
3623 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3624 mgmt_pending_remove(cmd);
3626 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3627 addr, sizeof(*addr));
3629 /* Since user doesn't want to proceed with the connection, abort any
3630 * ongoing pairing and then terminate the link if it was created
3631 * because of the pair device action.
3633 if (addr->type == BDADDR_BREDR)
3634 hci_remove_link_key(hdev, &addr->bdaddr);
3636 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3637 le_addr_type(addr->type));
3639 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3640 hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3644 hci_dev_unlock(hdev);
3648 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3649 struct mgmt_addr_info *addr, u16 mgmt_op,
3650 u16 hci_op, __le32 passkey)
3652 struct mgmt_pending_cmd *cmd;
3653 struct hci_conn *conn;
3658 if (!hdev_is_powered(hdev)) {
3659 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3660 MGMT_STATUS_NOT_POWERED, addr,
3665 if (addr->type == BDADDR_BREDR)
3666 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3668 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3669 le_addr_type(addr->type));
3672 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3673 MGMT_STATUS_NOT_CONNECTED, addr,
3678 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3679 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3681 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3682 MGMT_STATUS_SUCCESS, addr,
3685 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3686 MGMT_STATUS_FAILED, addr,
3692 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3698 cmd->cmd_complete = addr_cmd_complete;
3700 /* Continue with pairing via HCI */
3701 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3702 struct hci_cp_user_passkey_reply cp;
3704 bacpy(&cp.bdaddr, &addr->bdaddr);
3705 cp.passkey = passkey;
3706 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3708 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3712 mgmt_pending_remove(cmd);
3715 hci_dev_unlock(hdev);
3719 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3720 void *data, u16 len)
3722 struct mgmt_cp_pin_code_neg_reply *cp = data;
3724 bt_dev_dbg(hdev, "sock %p", sk);
3726 return user_pairing_resp(sk, hdev, &cp->addr,
3727 MGMT_OP_PIN_CODE_NEG_REPLY,
3728 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3731 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3734 struct mgmt_cp_user_confirm_reply *cp = data;
3736 bt_dev_dbg(hdev, "sock %p", sk);
3738 if (len != sizeof(*cp))
3739 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3740 MGMT_STATUS_INVALID_PARAMS);
3742 return user_pairing_resp(sk, hdev, &cp->addr,
3743 MGMT_OP_USER_CONFIRM_REPLY,
3744 HCI_OP_USER_CONFIRM_REPLY, 0);
3747 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3748 void *data, u16 len)
3750 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3752 bt_dev_dbg(hdev, "sock %p", sk);
3754 return user_pairing_resp(sk, hdev, &cp->addr,
3755 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3756 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3759 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3762 struct mgmt_cp_user_passkey_reply *cp = data;
3764 bt_dev_dbg(hdev, "sock %p", sk);
3766 return user_pairing_resp(sk, hdev, &cp->addr,
3767 MGMT_OP_USER_PASSKEY_REPLY,
3768 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3771 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3772 void *data, u16 len)
3774 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3776 bt_dev_dbg(hdev, "sock %p", sk);
3778 return user_pairing_resp(sk, hdev, &cp->addr,
3779 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3780 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3783 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3785 struct adv_info *adv_instance;
3787 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3791 /* stop if current instance doesn't need to be changed */
3792 if (!(adv_instance->flags & flags))
3795 cancel_adv_timeout(hdev);
3797 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3801 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3806 static int name_changed_sync(struct hci_dev *hdev, void *data)
3808 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3811 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3813 struct mgmt_pending_cmd *cmd = data;
3814 struct mgmt_cp_set_local_name *cp = cmd->param;
3815 u8 status = mgmt_status(err);
3817 bt_dev_dbg(hdev, "err %d", err);
3819 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3823 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3826 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3829 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3830 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3833 mgmt_pending_remove(cmd);
3836 static int set_name_sync(struct hci_dev *hdev, void *data)
3838 if (lmp_bredr_capable(hdev)) {
3839 hci_update_name_sync(hdev);
3840 hci_update_eir_sync(hdev);
3843 /* The name is stored in the scan response data and so
3844 * no need to update the advertising data here.
3846 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3847 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3852 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3855 struct mgmt_cp_set_local_name *cp = data;
3856 struct mgmt_pending_cmd *cmd;
3859 bt_dev_dbg(hdev, "sock %p", sk);
3863 /* If the old values are the same as the new ones just return a
3864 * direct command complete event.
3866 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3867 !memcmp(hdev->short_name, cp->short_name,
3868 sizeof(hdev->short_name))) {
3869 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3874 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3876 if (!hdev_is_powered(hdev)) {
3877 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3879 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3884 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3885 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3886 ext_info_changed(hdev, sk);
3891 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3895 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3899 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3900 MGMT_STATUS_FAILED);
3903 mgmt_pending_remove(cmd);
3908 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3911 hci_dev_unlock(hdev);
3915 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3917 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3920 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3923 struct mgmt_cp_set_appearance *cp = data;
3927 bt_dev_dbg(hdev, "sock %p", sk);
3929 if (!lmp_le_capable(hdev))
3930 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3931 MGMT_STATUS_NOT_SUPPORTED);
3933 appearance = le16_to_cpu(cp->appearance);
3937 if (hdev->appearance != appearance) {
3938 hdev->appearance = appearance;
3940 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3941 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3944 ext_info_changed(hdev, sk);
3947 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3950 hci_dev_unlock(hdev);
3955 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3956 void *data, u16 len)
3958 struct mgmt_rp_get_phy_configuration rp;
3960 bt_dev_dbg(hdev, "sock %p", sk);
3964 memset(&rp, 0, sizeof(rp));
3966 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3967 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3968 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3970 hci_dev_unlock(hdev);
3972 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3976 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3978 struct mgmt_ev_phy_configuration_changed ev;
3980 memset(&ev, 0, sizeof(ev));
3982 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3984 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3988 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3990 struct mgmt_pending_cmd *cmd = data;
3991 struct sk_buff *skb = cmd->skb;
3992 u8 status = mgmt_status(err);
3994 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3999 status = MGMT_STATUS_FAILED;
4000 else if (IS_ERR(skb))
4001 status = mgmt_status(PTR_ERR(skb));
4003 status = mgmt_status(skb->data[0]);
4006 bt_dev_dbg(hdev, "status %d", status);
4009 mgmt_cmd_status(cmd->sk, hdev->id,
4010 MGMT_OP_SET_PHY_CONFIGURATION, status);
4012 mgmt_cmd_complete(cmd->sk, hdev->id,
4013 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4016 mgmt_phy_configuration_changed(hdev, cmd->sk);
4019 if (skb && !IS_ERR(skb))
4022 mgmt_pending_remove(cmd);
4025 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4027 struct mgmt_pending_cmd *cmd = data;
4028 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4029 struct hci_cp_le_set_default_phy cp_phy;
4030 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4032 memset(&cp_phy, 0, sizeof(cp_phy));
4034 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4035 cp_phy.all_phys |= 0x01;
4037 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4038 cp_phy.all_phys |= 0x02;
4040 if (selected_phys & MGMT_PHY_LE_1M_TX)
4041 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4043 if (selected_phys & MGMT_PHY_LE_2M_TX)
4044 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4046 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4047 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4049 if (selected_phys & MGMT_PHY_LE_1M_RX)
4050 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4052 if (selected_phys & MGMT_PHY_LE_2M_RX)
4053 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4055 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4056 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4058 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4059 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4064 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4065 void *data, u16 len)
4067 struct mgmt_cp_set_phy_configuration *cp = data;
4068 struct mgmt_pending_cmd *cmd;
4069 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4070 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4071 bool changed = false;
4074 bt_dev_dbg(hdev, "sock %p", sk);
4076 configurable_phys = get_configurable_phys(hdev);
4077 supported_phys = get_supported_phys(hdev);
4078 selected_phys = __le32_to_cpu(cp->selected_phys);
4080 if (selected_phys & ~supported_phys)
4081 return mgmt_cmd_status(sk, hdev->id,
4082 MGMT_OP_SET_PHY_CONFIGURATION,
4083 MGMT_STATUS_INVALID_PARAMS);
4085 unconfigure_phys = supported_phys & ~configurable_phys;
4087 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4088 return mgmt_cmd_status(sk, hdev->id,
4089 MGMT_OP_SET_PHY_CONFIGURATION,
4090 MGMT_STATUS_INVALID_PARAMS);
4092 if (selected_phys == get_selected_phys(hdev))
4093 return mgmt_cmd_complete(sk, hdev->id,
4094 MGMT_OP_SET_PHY_CONFIGURATION,
4099 if (!hdev_is_powered(hdev)) {
4100 err = mgmt_cmd_status(sk, hdev->id,
4101 MGMT_OP_SET_PHY_CONFIGURATION,
4102 MGMT_STATUS_REJECTED);
4106 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4107 err = mgmt_cmd_status(sk, hdev->id,
4108 MGMT_OP_SET_PHY_CONFIGURATION,
4113 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4114 pkt_type |= (HCI_DH3 | HCI_DM3);
4116 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4118 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4119 pkt_type |= (HCI_DH5 | HCI_DM5);
4121 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4123 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4124 pkt_type &= ~HCI_2DH1;
4126 pkt_type |= HCI_2DH1;
4128 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4129 pkt_type &= ~HCI_2DH3;
4131 pkt_type |= HCI_2DH3;
4133 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4134 pkt_type &= ~HCI_2DH5;
4136 pkt_type |= HCI_2DH5;
4138 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4139 pkt_type &= ~HCI_3DH1;
4141 pkt_type |= HCI_3DH1;
4143 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4144 pkt_type &= ~HCI_3DH3;
4146 pkt_type |= HCI_3DH3;
4148 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4149 pkt_type &= ~HCI_3DH5;
4151 pkt_type |= HCI_3DH5;
4153 if (pkt_type != hdev->pkt_type) {
4154 hdev->pkt_type = pkt_type;
4158 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4159 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4161 mgmt_phy_configuration_changed(hdev, sk);
4163 err = mgmt_cmd_complete(sk, hdev->id,
4164 MGMT_OP_SET_PHY_CONFIGURATION,
4170 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4175 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4176 set_default_phy_complete);
4179 err = mgmt_cmd_status(sk, hdev->id,
4180 MGMT_OP_SET_PHY_CONFIGURATION,
4181 MGMT_STATUS_FAILED);
4184 mgmt_pending_remove(cmd);
4188 hci_dev_unlock(hdev);
4193 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4196 int err = MGMT_STATUS_SUCCESS;
4197 struct mgmt_cp_set_blocked_keys *keys = data;
4198 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4199 sizeof(struct mgmt_blocked_key_info));
4200 u16 key_count, expected_len;
4203 bt_dev_dbg(hdev, "sock %p", sk);
4205 key_count = __le16_to_cpu(keys->key_count);
4206 if (key_count > max_key_count) {
4207 bt_dev_err(hdev, "too big key_count value %u", key_count);
4208 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4209 MGMT_STATUS_INVALID_PARAMS);
4212 expected_len = struct_size(keys, keys, key_count);
4213 if (expected_len != len) {
4214 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4216 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4217 MGMT_STATUS_INVALID_PARAMS);
4222 hci_blocked_keys_clear(hdev);
4224 for (i = 0; i < key_count; ++i) {
4225 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4228 err = MGMT_STATUS_NO_RESOURCES;
4232 b->type = keys->keys[i].type;
4233 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4234 list_add_rcu(&b->list, &hdev->blocked_keys);
4236 hci_dev_unlock(hdev);
4238 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4242 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4243 void *data, u16 len)
4245 struct mgmt_mode *cp = data;
4247 bool changed = false;
4249 bt_dev_dbg(hdev, "sock %p", sk);
4251 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4252 return mgmt_cmd_status(sk, hdev->id,
4253 MGMT_OP_SET_WIDEBAND_SPEECH,
4254 MGMT_STATUS_NOT_SUPPORTED);
4256 if (cp->val != 0x00 && cp->val != 0x01)
4257 return mgmt_cmd_status(sk, hdev->id,
4258 MGMT_OP_SET_WIDEBAND_SPEECH,
4259 MGMT_STATUS_INVALID_PARAMS);
4263 if (hdev_is_powered(hdev) &&
4264 !!cp->val != hci_dev_test_flag(hdev,
4265 HCI_WIDEBAND_SPEECH_ENABLED)) {
4266 err = mgmt_cmd_status(sk, hdev->id,
4267 MGMT_OP_SET_WIDEBAND_SPEECH,
4268 MGMT_STATUS_REJECTED);
4273 changed = !hci_dev_test_and_set_flag(hdev,
4274 HCI_WIDEBAND_SPEECH_ENABLED);
4276 changed = hci_dev_test_and_clear_flag(hdev,
4277 HCI_WIDEBAND_SPEECH_ENABLED);
4279 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4284 err = new_settings(hdev, sk);
4287 hci_dev_unlock(hdev);
4291 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4292 void *data, u16 data_len)
4295 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4298 u8 tx_power_range[2];
4300 bt_dev_dbg(hdev, "sock %p", sk);
4302 memset(&buf, 0, sizeof(buf));
4306 /* When the Read Simple Pairing Options command is supported, then
4307 * the remote public key validation is supported.
4309 * Alternatively, when Microsoft extensions are available, they can
4310 * indicate support for public key validation as well.
4312 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4313 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4315 flags |= 0x02; /* Remote public key validation (LE) */
4317 /* When the Read Encryption Key Size command is supported, then the
4318 * encryption key size is enforced.
4320 if (hdev->commands[20] & 0x10)
4321 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4323 flags |= 0x08; /* Encryption key size enforcement (LE) */
4325 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4328 /* When the Read Simple Pairing Options command is supported, then
4329 * also max encryption key size information is provided.
4331 if (hdev->commands[41] & 0x08)
4332 cap_len = eir_append_le16(rp->cap, cap_len,
4333 MGMT_CAP_MAX_ENC_KEY_SIZE,
4334 hdev->max_enc_key_size);
4336 cap_len = eir_append_le16(rp->cap, cap_len,
4337 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4338 SMP_MAX_ENC_KEY_SIZE);
4340 /* Append the min/max LE tx power parameters if we were able to fetch
4341 * it from the controller
4343 if (hdev->commands[38] & 0x80) {
4344 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4345 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4346 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4350 rp->cap_len = cpu_to_le16(cap_len);
4352 hci_dev_unlock(hdev);
4354 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4355 rp, sizeof(*rp) + cap_len);
4358 #ifdef CONFIG_BT_FEATURE_DEBUG
4359 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4360 static const u8 debug_uuid[16] = {
4361 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4362 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4366 /* 330859bc-7506-492d-9370-9a6f0614037f */
4367 static const u8 quality_report_uuid[16] = {
4368 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4369 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4372 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4373 static const u8 offload_codecs_uuid[16] = {
4374 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4375 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4378 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4379 static const u8 le_simultaneous_roles_uuid[16] = {
4380 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4381 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4384 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4385 static const u8 rpa_resolution_uuid[16] = {
4386 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4387 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4390 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4391 static const u8 iso_socket_uuid[16] = {
4392 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4393 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4396 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4397 static const u8 mgmt_mesh_uuid[16] = {
4398 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4399 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4402 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4403 void *data, u16 data_len)
4405 struct mgmt_rp_read_exp_features_info *rp;
4411 bt_dev_dbg(hdev, "sock %p", sk);
4413 /* Enough space for 7 features */
4414 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4415 rp = kzalloc(len, GFP_KERNEL);
4419 #ifdef CONFIG_BT_FEATURE_DEBUG
4421 flags = bt_dbg_get() ? BIT(0) : 0;
4423 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4424 rp->features[idx].flags = cpu_to_le32(flags);
4429 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4430 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4435 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4436 rp->features[idx].flags = cpu_to_le32(flags);
4440 if (hdev && ll_privacy_capable(hdev)) {
4441 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4442 flags = BIT(0) | BIT(1);
4446 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4447 rp->features[idx].flags = cpu_to_le32(flags);
4451 if (hdev && (aosp_has_quality_report(hdev) ||
4452 hdev->set_quality_report)) {
4453 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4458 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4459 rp->features[idx].flags = cpu_to_le32(flags);
4463 if (hdev && hdev->get_data_path_id) {
4464 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4469 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4470 rp->features[idx].flags = cpu_to_le32(flags);
4474 if (IS_ENABLED(CONFIG_BT_LE)) {
4475 flags = iso_enabled() ? BIT(0) : 0;
4476 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4477 rp->features[idx].flags = cpu_to_le32(flags);
4481 if (hdev && lmp_le_capable(hdev)) {
4482 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4487 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4488 rp->features[idx].flags = cpu_to_le32(flags);
4492 rp->feature_count = cpu_to_le16(idx);
4494 /* After reading the experimental features information, enable
4495 * the events to update client on any future change.
4497 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4499 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4500 MGMT_OP_READ_EXP_FEATURES_INFO,
4501 0, rp, sizeof(*rp) + (20 * idx));
4507 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4510 struct mgmt_ev_exp_feature_changed ev;
4512 memset(&ev, 0, sizeof(ev));
4513 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4514 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4516 // Do we need to be atomic with the conn_flags?
4517 if (enabled && privacy_mode_capable(hdev))
4518 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4520 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4522 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4524 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4528 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4529 bool enabled, struct sock *skip)
4531 struct mgmt_ev_exp_feature_changed ev;
4533 memset(&ev, 0, sizeof(ev));
4534 memcpy(ev.uuid, uuid, 16);
4535 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4537 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4539 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4542 #define EXP_FEAT(_uuid, _set_func) \
4545 .set_func = _set_func, \
4548 /* The zero key uuid is special. Multiple exp features are set through it. */
4549 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4550 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4552 struct mgmt_rp_set_exp_feature rp;
4554 memset(rp.uuid, 0, 16);
4555 rp.flags = cpu_to_le32(0);
4557 #ifdef CONFIG_BT_FEATURE_DEBUG
4559 bool changed = bt_dbg_get();
4564 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4568 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4571 changed = hci_dev_test_and_clear_flag(hdev,
4572 HCI_ENABLE_LL_PRIVACY);
4574 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4578 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4580 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4581 MGMT_OP_SET_EXP_FEATURE, 0,
4585 #ifdef CONFIG_BT_FEATURE_DEBUG
4586 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4587 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4589 struct mgmt_rp_set_exp_feature rp;
4594 /* Command requires to use the non-controller index */
4596 return mgmt_cmd_status(sk, hdev->id,
4597 MGMT_OP_SET_EXP_FEATURE,
4598 MGMT_STATUS_INVALID_INDEX);
4600 /* Parameters are limited to a single octet */
4601 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4602 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4603 MGMT_OP_SET_EXP_FEATURE,
4604 MGMT_STATUS_INVALID_PARAMS);
4606 /* Only boolean on/off is supported */
4607 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4608 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4609 MGMT_OP_SET_EXP_FEATURE,
4610 MGMT_STATUS_INVALID_PARAMS);
4612 val = !!cp->param[0];
4613 changed = val ? !bt_dbg_get() : bt_dbg_get();
4616 memcpy(rp.uuid, debug_uuid, 16);
4617 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4619 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4621 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4622 MGMT_OP_SET_EXP_FEATURE, 0,
4626 exp_feature_changed(hdev, debug_uuid, val, sk);
4632 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4633 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4635 struct mgmt_rp_set_exp_feature rp;
4639 /* Command requires to use the controller index */
4641 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4642 MGMT_OP_SET_EXP_FEATURE,
4643 MGMT_STATUS_INVALID_INDEX);
4645 /* Parameters are limited to a single octet */
4646 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4647 return mgmt_cmd_status(sk, hdev->id,
4648 MGMT_OP_SET_EXP_FEATURE,
4649 MGMT_STATUS_INVALID_PARAMS);
4651 /* Only boolean on/off is supported */
4652 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4653 return mgmt_cmd_status(sk, hdev->id,
4654 MGMT_OP_SET_EXP_FEATURE,
4655 MGMT_STATUS_INVALID_PARAMS);
4657 val = !!cp->param[0];
4660 changed = !hci_dev_test_and_set_flag(hdev,
4661 HCI_MESH_EXPERIMENTAL);
4663 hci_dev_clear_flag(hdev, HCI_MESH);
4664 changed = hci_dev_test_and_clear_flag(hdev,
4665 HCI_MESH_EXPERIMENTAL);
4668 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4669 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4671 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4673 err = mgmt_cmd_complete(sk, hdev->id,
4674 MGMT_OP_SET_EXP_FEATURE, 0,
4678 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4683 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4684 struct mgmt_cp_set_exp_feature *cp,
4687 struct mgmt_rp_set_exp_feature rp;
4692 /* Command requires to use the controller index */
4694 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4695 MGMT_OP_SET_EXP_FEATURE,
4696 MGMT_STATUS_INVALID_INDEX);
4698 /* Changes can only be made when controller is powered down */
4699 if (hdev_is_powered(hdev))
4700 return mgmt_cmd_status(sk, hdev->id,
4701 MGMT_OP_SET_EXP_FEATURE,
4702 MGMT_STATUS_REJECTED);
4704 /* Parameters are limited to a single octet */
4705 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4706 return mgmt_cmd_status(sk, hdev->id,
4707 MGMT_OP_SET_EXP_FEATURE,
4708 MGMT_STATUS_INVALID_PARAMS);
4710 /* Only boolean on/off is supported */
4711 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4712 return mgmt_cmd_status(sk, hdev->id,
4713 MGMT_OP_SET_EXP_FEATURE,
4714 MGMT_STATUS_INVALID_PARAMS);
4716 val = !!cp->param[0];
4719 changed = !hci_dev_test_and_set_flag(hdev,
4720 HCI_ENABLE_LL_PRIVACY);
4721 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4723 /* Enable LL privacy + supported settings changed */
4724 flags = BIT(0) | BIT(1);
4726 changed = hci_dev_test_and_clear_flag(hdev,
4727 HCI_ENABLE_LL_PRIVACY);
4729 /* Disable LL privacy + supported settings changed */
4733 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4734 rp.flags = cpu_to_le32(flags);
4736 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4738 err = mgmt_cmd_complete(sk, hdev->id,
4739 MGMT_OP_SET_EXP_FEATURE, 0,
4743 exp_ll_privacy_feature_changed(val, hdev, sk);
4748 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4749 struct mgmt_cp_set_exp_feature *cp,
4752 struct mgmt_rp_set_exp_feature rp;
4756 /* Command requires to use a valid controller index */
4758 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4759 MGMT_OP_SET_EXP_FEATURE,
4760 MGMT_STATUS_INVALID_INDEX);
4762 /* Parameters are limited to a single octet */
4763 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4764 return mgmt_cmd_status(sk, hdev->id,
4765 MGMT_OP_SET_EXP_FEATURE,
4766 MGMT_STATUS_INVALID_PARAMS);
4768 /* Only boolean on/off is supported */
4769 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4770 return mgmt_cmd_status(sk, hdev->id,
4771 MGMT_OP_SET_EXP_FEATURE,
4772 MGMT_STATUS_INVALID_PARAMS);
4774 hci_req_sync_lock(hdev);
4776 val = !!cp->param[0];
4777 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4779 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4780 err = mgmt_cmd_status(sk, hdev->id,
4781 MGMT_OP_SET_EXP_FEATURE,
4782 MGMT_STATUS_NOT_SUPPORTED);
4783 goto unlock_quality_report;
4787 if (hdev->set_quality_report)
4788 err = hdev->set_quality_report(hdev, val);
4790 err = aosp_set_quality_report(hdev, val);
4793 err = mgmt_cmd_status(sk, hdev->id,
4794 MGMT_OP_SET_EXP_FEATURE,
4795 MGMT_STATUS_FAILED);
4796 goto unlock_quality_report;
4800 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4802 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4805 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4807 memcpy(rp.uuid, quality_report_uuid, 16);
4808 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4809 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4811 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4815 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4817 unlock_quality_report:
4818 hci_req_sync_unlock(hdev);
4822 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4823 struct mgmt_cp_set_exp_feature *cp,
4828 struct mgmt_rp_set_exp_feature rp;
4830 /* Command requires to use a valid controller index */
4832 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4833 MGMT_OP_SET_EXP_FEATURE,
4834 MGMT_STATUS_INVALID_INDEX);
4836 /* Parameters are limited to a single octet */
4837 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4838 return mgmt_cmd_status(sk, hdev->id,
4839 MGMT_OP_SET_EXP_FEATURE,
4840 MGMT_STATUS_INVALID_PARAMS);
4842 /* Only boolean on/off is supported */
4843 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4844 return mgmt_cmd_status(sk, hdev->id,
4845 MGMT_OP_SET_EXP_FEATURE,
4846 MGMT_STATUS_INVALID_PARAMS);
4848 val = !!cp->param[0];
4849 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4851 if (!hdev->get_data_path_id) {
4852 return mgmt_cmd_status(sk, hdev->id,
4853 MGMT_OP_SET_EXP_FEATURE,
4854 MGMT_STATUS_NOT_SUPPORTED);
4859 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4861 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4864 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4867 memcpy(rp.uuid, offload_codecs_uuid, 16);
4868 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4869 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4870 err = mgmt_cmd_complete(sk, hdev->id,
4871 MGMT_OP_SET_EXP_FEATURE, 0,
4875 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4880 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4881 struct mgmt_cp_set_exp_feature *cp,
4886 struct mgmt_rp_set_exp_feature rp;
4888 /* Command requires to use a valid controller index */
4890 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4891 MGMT_OP_SET_EXP_FEATURE,
4892 MGMT_STATUS_INVALID_INDEX);
4894 /* Parameters are limited to a single octet */
4895 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4896 return mgmt_cmd_status(sk, hdev->id,
4897 MGMT_OP_SET_EXP_FEATURE,
4898 MGMT_STATUS_INVALID_PARAMS);
4900 /* Only boolean on/off is supported */
4901 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4902 return mgmt_cmd_status(sk, hdev->id,
4903 MGMT_OP_SET_EXP_FEATURE,
4904 MGMT_STATUS_INVALID_PARAMS);
4906 val = !!cp->param[0];
4907 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4909 if (!hci_dev_le_state_simultaneous(hdev)) {
4910 return mgmt_cmd_status(sk, hdev->id,
4911 MGMT_OP_SET_EXP_FEATURE,
4912 MGMT_STATUS_NOT_SUPPORTED);
4917 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4919 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4922 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4925 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4926 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4927 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4928 err = mgmt_cmd_complete(sk, hdev->id,
4929 MGMT_OP_SET_EXP_FEATURE, 0,
4933 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4939 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4940 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4942 struct mgmt_rp_set_exp_feature rp;
4943 bool val, changed = false;
4946 /* Command requires to use the non-controller index */
4948 return mgmt_cmd_status(sk, hdev->id,
4949 MGMT_OP_SET_EXP_FEATURE,
4950 MGMT_STATUS_INVALID_INDEX);
4952 /* Parameters are limited to a single octet */
4953 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4954 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4955 MGMT_OP_SET_EXP_FEATURE,
4956 MGMT_STATUS_INVALID_PARAMS);
4958 /* Only boolean on/off is supported */
4959 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4960 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4961 MGMT_OP_SET_EXP_FEATURE,
4962 MGMT_STATUS_INVALID_PARAMS);
4964 val = cp->param[0] ? true : false;
4973 memcpy(rp.uuid, iso_socket_uuid, 16);
4974 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4976 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4978 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4979 MGMT_OP_SET_EXP_FEATURE, 0,
4983 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4989 static const struct mgmt_exp_feature {
4991 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4992 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4993 } exp_features[] = {
4994 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4995 #ifdef CONFIG_BT_FEATURE_DEBUG
4996 EXP_FEAT(debug_uuid, set_debug_func),
4998 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4999 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5000 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5001 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5002 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5004 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5007 /* end with a null feature */
5008 EXP_FEAT(NULL, NULL)
5011 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5012 void *data, u16 data_len)
5014 struct mgmt_cp_set_exp_feature *cp = data;
5017 bt_dev_dbg(hdev, "sock %p", sk);
5019 for (i = 0; exp_features[i].uuid; i++) {
5020 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5021 return exp_features[i].set_func(sk, hdev, cp, data_len);
5024 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5025 MGMT_OP_SET_EXP_FEATURE,
5026 MGMT_STATUS_NOT_SUPPORTED);
5029 static u32 get_params_flags(struct hci_dev *hdev,
5030 struct hci_conn_params *params)
5032 u32 flags = hdev->conn_flags;
5034 /* Devices using RPAs can only be programmed in the acceptlist if
5035 * LL Privacy has been enable otherwise they cannot mark
5036 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5038 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5039 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5040 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5045 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5048 struct mgmt_cp_get_device_flags *cp = data;
5049 struct mgmt_rp_get_device_flags rp;
5050 struct bdaddr_list_with_flags *br_params;
5051 struct hci_conn_params *params;
5052 u32 supported_flags;
5053 u32 current_flags = 0;
5054 u8 status = MGMT_STATUS_INVALID_PARAMS;
5056 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5057 &cp->addr.bdaddr, cp->addr.type);
5061 supported_flags = hdev->conn_flags;
5063 memset(&rp, 0, sizeof(rp));
5065 if (cp->addr.type == BDADDR_BREDR) {
5066 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5072 current_flags = br_params->flags;
5074 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5075 le_addr_type(cp->addr.type));
5079 supported_flags = get_params_flags(hdev, params);
5080 current_flags = params->flags;
5083 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5084 rp.addr.type = cp->addr.type;
5085 rp.supported_flags = cpu_to_le32(supported_flags);
5086 rp.current_flags = cpu_to_le32(current_flags);
5088 status = MGMT_STATUS_SUCCESS;
5091 hci_dev_unlock(hdev);
5093 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5097 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5098 bdaddr_t *bdaddr, u8 bdaddr_type,
5099 u32 supported_flags, u32 current_flags)
5101 struct mgmt_ev_device_flags_changed ev;
5103 bacpy(&ev.addr.bdaddr, bdaddr);
5104 ev.addr.type = bdaddr_type;
5105 ev.supported_flags = cpu_to_le32(supported_flags);
5106 ev.current_flags = cpu_to_le32(current_flags);
5108 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5111 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5114 struct mgmt_cp_set_device_flags *cp = data;
5115 struct bdaddr_list_with_flags *br_params;
5116 struct hci_conn_params *params;
5117 u8 status = MGMT_STATUS_INVALID_PARAMS;
5118 u32 supported_flags;
5119 u32 current_flags = __le32_to_cpu(cp->current_flags);
5121 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5122 &cp->addr.bdaddr, cp->addr.type, current_flags);
5124 // We should take hci_dev_lock() early, I think.. conn_flags can change
5125 supported_flags = hdev->conn_flags;
5127 if ((supported_flags | current_flags) != supported_flags) {
5128 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5129 current_flags, supported_flags);
5135 if (cp->addr.type == BDADDR_BREDR) {
5136 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5141 br_params->flags = current_flags;
5142 status = MGMT_STATUS_SUCCESS;
5144 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5145 &cp->addr.bdaddr, cp->addr.type);
5151 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5152 le_addr_type(cp->addr.type));
5154 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5155 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5159 supported_flags = get_params_flags(hdev, params);
5161 if ((supported_flags | current_flags) != supported_flags) {
5162 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5163 current_flags, supported_flags);
5167 params->flags = current_flags;
5168 status = MGMT_STATUS_SUCCESS;
5170 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5173 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5174 hci_update_passive_scan(hdev);
5177 hci_dev_unlock(hdev);
5180 if (status == MGMT_STATUS_SUCCESS)
5181 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5182 supported_flags, current_flags);
5184 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5185 &cp->addr, sizeof(cp->addr));
5188 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5191 struct mgmt_ev_adv_monitor_added ev;
5193 ev.monitor_handle = cpu_to_le16(handle);
5195 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5198 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5200 struct mgmt_ev_adv_monitor_removed ev;
5201 struct mgmt_pending_cmd *cmd;
5202 struct sock *sk_skip = NULL;
5203 struct mgmt_cp_remove_adv_monitor *cp;
5205 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5209 if (cp->monitor_handle)
5213 ev.monitor_handle = cpu_to_le16(handle);
5215 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5218 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5219 void *data, u16 len)
5221 struct adv_monitor *monitor = NULL;
5222 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5225 __u32 supported = 0;
5227 __u16 num_handles = 0;
5228 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5230 BT_DBG("request for %s", hdev->name);
5234 if (msft_monitor_supported(hdev))
5235 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5237 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5238 handles[num_handles++] = monitor->handle;
5240 hci_dev_unlock(hdev);
5242 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5243 rp = kmalloc(rp_size, GFP_KERNEL);
5247 /* All supported features are currently enabled */
5248 enabled = supported;
5250 rp->supported_features = cpu_to_le32(supported);
5251 rp->enabled_features = cpu_to_le32(enabled);
5252 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5253 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5254 rp->num_handles = cpu_to_le16(num_handles);
5256 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5258 err = mgmt_cmd_complete(sk, hdev->id,
5259 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5260 MGMT_STATUS_SUCCESS, rp, rp_size);
5267 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5268 void *data, int status)
5270 struct mgmt_rp_add_adv_patterns_monitor rp;
5271 struct mgmt_pending_cmd *cmd = data;
5272 struct adv_monitor *monitor = cmd->user_data;
5276 rp.monitor_handle = cpu_to_le16(monitor->handle);
5279 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5280 hdev->adv_monitors_cnt++;
5281 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5282 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5283 hci_update_passive_scan(hdev);
5286 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5287 mgmt_status(status), &rp, sizeof(rp));
5288 mgmt_pending_remove(cmd);
5290 hci_dev_unlock(hdev);
5291 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5292 rp.monitor_handle, status);
5295 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5297 struct mgmt_pending_cmd *cmd = data;
5298 struct adv_monitor *monitor = cmd->user_data;
5300 return hci_add_adv_monitor(hdev, monitor);
5303 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5304 struct adv_monitor *m, u8 status,
5305 void *data, u16 len, u16 op)
5307 struct mgmt_pending_cmd *cmd;
5315 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5316 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5317 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5318 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5319 status = MGMT_STATUS_BUSY;
5323 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5325 status = MGMT_STATUS_NO_RESOURCES;
5330 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5331 mgmt_add_adv_patterns_monitor_complete);
5334 status = MGMT_STATUS_NO_RESOURCES;
5336 status = MGMT_STATUS_FAILED;
5341 hci_dev_unlock(hdev);
5346 hci_free_adv_monitor(hdev, m);
5347 hci_dev_unlock(hdev);
5348 return mgmt_cmd_status(sk, hdev->id, op, status);
5351 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5352 struct mgmt_adv_rssi_thresholds *rssi)
5355 m->rssi.low_threshold = rssi->low_threshold;
5356 m->rssi.low_threshold_timeout =
5357 __le16_to_cpu(rssi->low_threshold_timeout);
5358 m->rssi.high_threshold = rssi->high_threshold;
5359 m->rssi.high_threshold_timeout =
5360 __le16_to_cpu(rssi->high_threshold_timeout);
5361 m->rssi.sampling_period = rssi->sampling_period;
5363 /* Default values. These numbers are the least constricting
5364 * parameters for MSFT API to work, so it behaves as if there
5365 * are no rssi parameter to consider. May need to be changed
5366 * if other API are to be supported.
5368 m->rssi.low_threshold = -127;
5369 m->rssi.low_threshold_timeout = 60;
5370 m->rssi.high_threshold = -127;
5371 m->rssi.high_threshold_timeout = 0;
5372 m->rssi.sampling_period = 0;
5376 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5377 struct mgmt_adv_pattern *patterns)
5379 u8 offset = 0, length = 0;
5380 struct adv_pattern *p = NULL;
5383 for (i = 0; i < pattern_count; i++) {
5384 offset = patterns[i].offset;
5385 length = patterns[i].length;
5386 if (offset >= HCI_MAX_AD_LENGTH ||
5387 length > HCI_MAX_AD_LENGTH ||
5388 (offset + length) > HCI_MAX_AD_LENGTH)
5389 return MGMT_STATUS_INVALID_PARAMS;
5391 p = kmalloc(sizeof(*p), GFP_KERNEL);
5393 return MGMT_STATUS_NO_RESOURCES;
5395 p->ad_type = patterns[i].ad_type;
5396 p->offset = patterns[i].offset;
5397 p->length = patterns[i].length;
5398 memcpy(p->value, patterns[i].value, p->length);
5400 INIT_LIST_HEAD(&p->list);
5401 list_add(&p->list, &m->patterns);
5404 return MGMT_STATUS_SUCCESS;
5407 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5408 void *data, u16 len)
5410 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5411 struct adv_monitor *m = NULL;
5412 u8 status = MGMT_STATUS_SUCCESS;
5413 size_t expected_size = sizeof(*cp);
5415 BT_DBG("request for %s", hdev->name);
5417 if (len <= sizeof(*cp)) {
5418 status = MGMT_STATUS_INVALID_PARAMS;
5422 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5423 if (len != expected_size) {
5424 status = MGMT_STATUS_INVALID_PARAMS;
5428 m = kzalloc(sizeof(*m), GFP_KERNEL);
5430 status = MGMT_STATUS_NO_RESOURCES;
5434 INIT_LIST_HEAD(&m->patterns);
5436 parse_adv_monitor_rssi(m, NULL);
5437 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5440 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5441 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5444 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5445 void *data, u16 len)
5447 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5448 struct adv_monitor *m = NULL;
5449 u8 status = MGMT_STATUS_SUCCESS;
5450 size_t expected_size = sizeof(*cp);
5452 BT_DBG("request for %s", hdev->name);
5454 if (len <= sizeof(*cp)) {
5455 status = MGMT_STATUS_INVALID_PARAMS;
5459 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5460 if (len != expected_size) {
5461 status = MGMT_STATUS_INVALID_PARAMS;
5465 m = kzalloc(sizeof(*m), GFP_KERNEL);
5467 status = MGMT_STATUS_NO_RESOURCES;
5471 INIT_LIST_HEAD(&m->patterns);
5473 parse_adv_monitor_rssi(m, &cp->rssi);
5474 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5477 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5478 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5481 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5482 void *data, int status)
5484 struct mgmt_rp_remove_adv_monitor rp;
5485 struct mgmt_pending_cmd *cmd = data;
5486 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5490 rp.monitor_handle = cp->monitor_handle;
5493 hci_update_passive_scan(hdev);
5495 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5496 mgmt_status(status), &rp, sizeof(rp));
5497 mgmt_pending_remove(cmd);
5499 hci_dev_unlock(hdev);
5500 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5501 rp.monitor_handle, status);
5504 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5506 struct mgmt_pending_cmd *cmd = data;
5507 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5508 u16 handle = __le16_to_cpu(cp->monitor_handle);
5511 return hci_remove_all_adv_monitor(hdev);
5513 return hci_remove_single_adv_monitor(hdev, handle);
5516 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5517 void *data, u16 len)
5519 struct mgmt_pending_cmd *cmd;
5524 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5525 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5526 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5527 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5528 status = MGMT_STATUS_BUSY;
5532 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5534 status = MGMT_STATUS_NO_RESOURCES;
5538 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5539 mgmt_remove_adv_monitor_complete);
5542 mgmt_pending_remove(cmd);
5545 status = MGMT_STATUS_NO_RESOURCES;
5547 status = MGMT_STATUS_FAILED;
5552 hci_dev_unlock(hdev);
5557 hci_dev_unlock(hdev);
5558 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5562 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5564 struct mgmt_rp_read_local_oob_data mgmt_rp;
5565 size_t rp_size = sizeof(mgmt_rp);
5566 struct mgmt_pending_cmd *cmd = data;
5567 struct sk_buff *skb = cmd->skb;
5568 u8 status = mgmt_status(err);
5572 status = MGMT_STATUS_FAILED;
5573 else if (IS_ERR(skb))
5574 status = mgmt_status(PTR_ERR(skb));
5576 status = mgmt_status(skb->data[0]);
5579 bt_dev_dbg(hdev, "status %d", status);
5582 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5586 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5588 if (!bredr_sc_enabled(hdev)) {
5589 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5591 if (skb->len < sizeof(*rp)) {
5592 mgmt_cmd_status(cmd->sk, hdev->id,
5593 MGMT_OP_READ_LOCAL_OOB_DATA,
5594 MGMT_STATUS_FAILED);
5598 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5599 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5601 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5603 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5605 if (skb->len < sizeof(*rp)) {
5606 mgmt_cmd_status(cmd->sk, hdev->id,
5607 MGMT_OP_READ_LOCAL_OOB_DATA,
5608 MGMT_STATUS_FAILED);
5612 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5613 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5615 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5616 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5619 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5620 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5623 if (skb && !IS_ERR(skb))
5626 mgmt_pending_free(cmd);
5629 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5631 struct mgmt_pending_cmd *cmd = data;
5633 if (bredr_sc_enabled(hdev))
5634 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5636 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5638 if (IS_ERR(cmd->skb))
5639 return PTR_ERR(cmd->skb);
5644 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5645 void *data, u16 data_len)
5647 struct mgmt_pending_cmd *cmd;
5650 bt_dev_dbg(hdev, "sock %p", sk);
5654 if (!hdev_is_powered(hdev)) {
5655 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5656 MGMT_STATUS_NOT_POWERED);
5660 if (!lmp_ssp_capable(hdev)) {
5661 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5662 MGMT_STATUS_NOT_SUPPORTED);
5666 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5670 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5671 read_local_oob_data_complete);
5674 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5675 MGMT_STATUS_FAILED);
5678 mgmt_pending_free(cmd);
5682 hci_dev_unlock(hdev);
5686 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5687 void *data, u16 len)
5689 struct mgmt_addr_info *addr = data;
5692 bt_dev_dbg(hdev, "sock %p", sk);
5694 if (!bdaddr_type_is_valid(addr->type))
5695 return mgmt_cmd_complete(sk, hdev->id,
5696 MGMT_OP_ADD_REMOTE_OOB_DATA,
5697 MGMT_STATUS_INVALID_PARAMS,
5698 addr, sizeof(*addr));
5702 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5703 struct mgmt_cp_add_remote_oob_data *cp = data;
5706 if (cp->addr.type != BDADDR_BREDR) {
5707 err = mgmt_cmd_complete(sk, hdev->id,
5708 MGMT_OP_ADD_REMOTE_OOB_DATA,
5709 MGMT_STATUS_INVALID_PARAMS,
5710 &cp->addr, sizeof(cp->addr));
5714 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5715 cp->addr.type, cp->hash,
5716 cp->rand, NULL, NULL);
5718 status = MGMT_STATUS_FAILED;
5720 status = MGMT_STATUS_SUCCESS;
5722 err = mgmt_cmd_complete(sk, hdev->id,
5723 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5724 &cp->addr, sizeof(cp->addr));
5725 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5726 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5727 u8 *rand192, *hash192, *rand256, *hash256;
5730 if (bdaddr_type_is_le(cp->addr.type)) {
5731 /* Enforce zero-valued 192-bit parameters as
5732 * long as legacy SMP OOB isn't implemented.
5734 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5735 memcmp(cp->hash192, ZERO_KEY, 16)) {
5736 err = mgmt_cmd_complete(sk, hdev->id,
5737 MGMT_OP_ADD_REMOTE_OOB_DATA,
5738 MGMT_STATUS_INVALID_PARAMS,
5739 addr, sizeof(*addr));
5746 /* In case one of the P-192 values is set to zero,
5747 * then just disable OOB data for P-192.
5749 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5750 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5754 rand192 = cp->rand192;
5755 hash192 = cp->hash192;
5759 /* In case one of the P-256 values is set to zero, then just
5760 * disable OOB data for P-256.
5762 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5763 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5767 rand256 = cp->rand256;
5768 hash256 = cp->hash256;
5771 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5772 cp->addr.type, hash192, rand192,
5775 status = MGMT_STATUS_FAILED;
5777 status = MGMT_STATUS_SUCCESS;
5779 err = mgmt_cmd_complete(sk, hdev->id,
5780 MGMT_OP_ADD_REMOTE_OOB_DATA,
5781 status, &cp->addr, sizeof(cp->addr));
5783 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5785 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5786 MGMT_STATUS_INVALID_PARAMS);
5790 hci_dev_unlock(hdev);
5794 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5795 void *data, u16 len)
5797 struct mgmt_cp_remove_remote_oob_data *cp = data;
5801 bt_dev_dbg(hdev, "sock %p", sk);
5803 if (cp->addr.type != BDADDR_BREDR)
5804 return mgmt_cmd_complete(sk, hdev->id,
5805 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5806 MGMT_STATUS_INVALID_PARAMS,
5807 &cp->addr, sizeof(cp->addr));
5811 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5812 hci_remote_oob_data_clear(hdev);
5813 status = MGMT_STATUS_SUCCESS;
5817 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5819 status = MGMT_STATUS_INVALID_PARAMS;
5821 status = MGMT_STATUS_SUCCESS;
5824 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5825 status, &cp->addr, sizeof(cp->addr));
5827 hci_dev_unlock(hdev);
5831 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5833 struct mgmt_pending_cmd *cmd;
5835 bt_dev_dbg(hdev, "status %u", status);
5839 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5841 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5844 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5847 cmd->cmd_complete(cmd, mgmt_status(status));
5848 mgmt_pending_remove(cmd);
5851 hci_dev_unlock(hdev);
5854 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5855 uint8_t *mgmt_status)
5858 case DISCOV_TYPE_LE:
5859 *mgmt_status = mgmt_le_support(hdev);
5863 case DISCOV_TYPE_INTERLEAVED:
5864 *mgmt_status = mgmt_le_support(hdev);
5868 case DISCOV_TYPE_BREDR:
5869 *mgmt_status = mgmt_bredr_support(hdev);
5874 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5881 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5883 struct mgmt_pending_cmd *cmd = data;
5885 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5886 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5887 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5890 bt_dev_dbg(hdev, "err %d", err);
5892 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5894 mgmt_pending_remove(cmd);
5896 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5900 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5902 return hci_start_discovery_sync(hdev);
5905 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5906 u16 op, void *data, u16 len)
5908 struct mgmt_cp_start_discovery *cp = data;
5909 struct mgmt_pending_cmd *cmd;
5913 bt_dev_dbg(hdev, "sock %p", sk);
5917 if (!hdev_is_powered(hdev)) {
5918 err = mgmt_cmd_complete(sk, hdev->id, op,
5919 MGMT_STATUS_NOT_POWERED,
5920 &cp->type, sizeof(cp->type));
5924 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5925 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5926 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5927 &cp->type, sizeof(cp->type));
5931 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5932 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5933 &cp->type, sizeof(cp->type));
5937 /* Can't start discovery when it is paused */
5938 if (hdev->discovery_paused) {
5939 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5940 &cp->type, sizeof(cp->type));
5944 /* Clear the discovery filter first to free any previously
5945 * allocated memory for the UUID list.
5947 hci_discovery_filter_clear(hdev);
5949 hdev->discovery.type = cp->type;
5950 hdev->discovery.report_invalid_rssi = false;
5951 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5952 hdev->discovery.limited = true;
5954 hdev->discovery.limited = false;
5956 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5962 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5963 start_discovery_complete);
5965 mgmt_pending_remove(cmd);
5969 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5972 hci_dev_unlock(hdev);
5976 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5977 void *data, u16 len)
5979 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5983 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5984 void *data, u16 len)
5986 return start_discovery_internal(sk, hdev,
5987 MGMT_OP_START_LIMITED_DISCOVERY,
5991 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5992 void *data, u16 len)
5994 struct mgmt_cp_start_service_discovery *cp = data;
5995 struct mgmt_pending_cmd *cmd;
5996 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5997 u16 uuid_count, expected_len;
6001 bt_dev_dbg(hdev, "sock %p", sk);
6005 if (!hdev_is_powered(hdev)) {
6006 err = mgmt_cmd_complete(sk, hdev->id,
6007 MGMT_OP_START_SERVICE_DISCOVERY,
6008 MGMT_STATUS_NOT_POWERED,
6009 &cp->type, sizeof(cp->type));
6013 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6014 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6015 err = mgmt_cmd_complete(sk, hdev->id,
6016 MGMT_OP_START_SERVICE_DISCOVERY,
6017 MGMT_STATUS_BUSY, &cp->type,
6022 if (hdev->discovery_paused) {
6023 err = mgmt_cmd_complete(sk, hdev->id,
6024 MGMT_OP_START_SERVICE_DISCOVERY,
6025 MGMT_STATUS_BUSY, &cp->type,
6030 uuid_count = __le16_to_cpu(cp->uuid_count);
6031 if (uuid_count > max_uuid_count) {
6032 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6034 err = mgmt_cmd_complete(sk, hdev->id,
6035 MGMT_OP_START_SERVICE_DISCOVERY,
6036 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6041 expected_len = sizeof(*cp) + uuid_count * 16;
6042 if (expected_len != len) {
6043 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6045 err = mgmt_cmd_complete(sk, hdev->id,
6046 MGMT_OP_START_SERVICE_DISCOVERY,
6047 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6052 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6053 err = mgmt_cmd_complete(sk, hdev->id,
6054 MGMT_OP_START_SERVICE_DISCOVERY,
6055 status, &cp->type, sizeof(cp->type));
6059 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6066 /* Clear the discovery filter first to free any previously
6067 * allocated memory for the UUID list.
6069 hci_discovery_filter_clear(hdev);
6071 hdev->discovery.result_filtering = true;
6072 hdev->discovery.type = cp->type;
6073 hdev->discovery.rssi = cp->rssi;
6074 hdev->discovery.uuid_count = uuid_count;
6076 if (uuid_count > 0) {
6077 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6079 if (!hdev->discovery.uuids) {
6080 err = mgmt_cmd_complete(sk, hdev->id,
6081 MGMT_OP_START_SERVICE_DISCOVERY,
6083 &cp->type, sizeof(cp->type));
6084 mgmt_pending_remove(cmd);
6089 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6090 start_discovery_complete);
6092 mgmt_pending_remove(cmd);
6096 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6099 hci_dev_unlock(hdev);
6103 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6105 struct mgmt_pending_cmd *cmd;
6107 bt_dev_dbg(hdev, "status %u", status);
6111 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6113 cmd->cmd_complete(cmd, mgmt_status(status));
6114 mgmt_pending_remove(cmd);
6117 hci_dev_unlock(hdev);
6120 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6122 struct mgmt_pending_cmd *cmd = data;
6124 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6127 bt_dev_dbg(hdev, "err %d", err);
6129 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6131 mgmt_pending_remove(cmd);
6134 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6137 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6139 return hci_stop_discovery_sync(hdev);
6142 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6145 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6146 struct mgmt_pending_cmd *cmd;
6149 bt_dev_dbg(hdev, "sock %p", sk);
6153 if (!hci_discovery_active(hdev)) {
6154 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6155 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6156 sizeof(mgmt_cp->type));
6160 if (hdev->discovery.type != mgmt_cp->type) {
6161 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6162 MGMT_STATUS_INVALID_PARAMS,
6163 &mgmt_cp->type, sizeof(mgmt_cp->type));
6167 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6173 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6174 stop_discovery_complete);
6176 mgmt_pending_remove(cmd);
6180 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6183 hci_dev_unlock(hdev);
6187 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6190 struct mgmt_cp_confirm_name *cp = data;
6191 struct inquiry_entry *e;
6194 bt_dev_dbg(hdev, "sock %p", sk);
6198 if (!hci_discovery_active(hdev)) {
6199 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6200 MGMT_STATUS_FAILED, &cp->addr,
6205 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6207 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6208 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6213 if (cp->name_known) {
6214 e->name_state = NAME_KNOWN;
6217 e->name_state = NAME_NEEDED;
6218 hci_inquiry_cache_update_resolve(hdev, e);
6221 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6222 &cp->addr, sizeof(cp->addr));
6225 hci_dev_unlock(hdev);
6229 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6232 struct mgmt_cp_block_device *cp = data;
6236 bt_dev_dbg(hdev, "sock %p", sk);
6238 if (!bdaddr_type_is_valid(cp->addr.type))
6239 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6240 MGMT_STATUS_INVALID_PARAMS,
6241 &cp->addr, sizeof(cp->addr));
6245 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6248 status = MGMT_STATUS_FAILED;
6252 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6254 status = MGMT_STATUS_SUCCESS;
6257 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6258 &cp->addr, sizeof(cp->addr));
6260 hci_dev_unlock(hdev);
6265 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6268 struct mgmt_cp_unblock_device *cp = data;
6272 bt_dev_dbg(hdev, "sock %p", sk);
6274 if (!bdaddr_type_is_valid(cp->addr.type))
6275 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6276 MGMT_STATUS_INVALID_PARAMS,
6277 &cp->addr, sizeof(cp->addr));
6281 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6284 status = MGMT_STATUS_INVALID_PARAMS;
6288 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6290 status = MGMT_STATUS_SUCCESS;
6293 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6294 &cp->addr, sizeof(cp->addr));
6296 hci_dev_unlock(hdev);
6301 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6303 return hci_update_eir_sync(hdev);
6306 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6309 struct mgmt_cp_set_device_id *cp = data;
6313 bt_dev_dbg(hdev, "sock %p", sk);
6315 source = __le16_to_cpu(cp->source);
6317 if (source > 0x0002)
6318 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6319 MGMT_STATUS_INVALID_PARAMS);
6323 hdev->devid_source = source;
6324 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6325 hdev->devid_product = __le16_to_cpu(cp->product);
6326 hdev->devid_version = __le16_to_cpu(cp->version);
6328 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6331 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6333 hci_dev_unlock(hdev);
6338 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6341 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6343 bt_dev_dbg(hdev, "status %d", err);
6346 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6348 struct cmd_lookup match = { NULL, hdev };
6350 struct adv_info *adv_instance;
6351 u8 status = mgmt_status(err);
6354 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6355 cmd_status_rsp, &status);
6359 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6360 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6362 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6364 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6367 new_settings(hdev, match.sk);
6372 /* If "Set Advertising" was just disabled and instance advertising was
6373 * set up earlier, then re-enable multi-instance advertising.
6375 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6376 list_empty(&hdev->adv_instances))
6379 instance = hdev->cur_adv_instance;
6381 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6382 struct adv_info, list);
6386 instance = adv_instance->instance;
6389 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6391 enable_advertising_instance(hdev, err);
6394 static int set_adv_sync(struct hci_dev *hdev, void *data)
6396 struct mgmt_pending_cmd *cmd = data;
6397 struct mgmt_mode *cp = cmd->param;
6400 if (cp->val == 0x02)
6401 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6403 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6405 cancel_adv_timeout(hdev);
6408 /* Switch to instance "0" for the Set Advertising setting.
6409 * We cannot use update_[adv|scan_rsp]_data() here as the
6410 * HCI_ADVERTISING flag is not yet set.
6412 hdev->cur_adv_instance = 0x00;
6414 if (ext_adv_capable(hdev)) {
6415 hci_start_ext_adv_sync(hdev, 0x00);
6417 hci_update_adv_data_sync(hdev, 0x00);
6418 hci_update_scan_rsp_data_sync(hdev, 0x00);
6419 hci_enable_advertising_sync(hdev);
6422 hci_disable_advertising_sync(hdev);
6428 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6431 struct mgmt_mode *cp = data;
6432 struct mgmt_pending_cmd *cmd;
6436 bt_dev_dbg(hdev, "sock %p", sk);
6438 status = mgmt_le_support(hdev);
6440 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6443 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6444 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6445 MGMT_STATUS_INVALID_PARAMS);
6447 if (hdev->advertising_paused)
6448 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6455 /* The following conditions are ones which mean that we should
6456 * not do any HCI communication but directly send a mgmt
6457 * response to user space (after toggling the flag if
6460 if (!hdev_is_powered(hdev) ||
6461 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6462 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6463 hci_dev_test_flag(hdev, HCI_MESH) ||
6464 hci_conn_num(hdev, LE_LINK) > 0 ||
6465 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6466 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6470 hdev->cur_adv_instance = 0x00;
6471 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6472 if (cp->val == 0x02)
6473 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6475 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6477 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6478 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6481 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6486 err = new_settings(hdev, sk);
6491 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6492 pending_find(MGMT_OP_SET_LE, hdev)) {
6493 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6498 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6502 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6503 set_advertising_complete);
6506 mgmt_pending_remove(cmd);
6509 hci_dev_unlock(hdev);
6513 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6514 void *data, u16 len)
6516 struct mgmt_cp_set_static_address *cp = data;
6519 bt_dev_dbg(hdev, "sock %p", sk);
6521 if (!lmp_le_capable(hdev))
6522 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6523 MGMT_STATUS_NOT_SUPPORTED);
6525 if (hdev_is_powered(hdev))
6526 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6527 MGMT_STATUS_REJECTED);
6529 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6530 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6531 return mgmt_cmd_status(sk, hdev->id,
6532 MGMT_OP_SET_STATIC_ADDRESS,
6533 MGMT_STATUS_INVALID_PARAMS);
6535 /* Two most significant bits shall be set */
6536 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6537 return mgmt_cmd_status(sk, hdev->id,
6538 MGMT_OP_SET_STATIC_ADDRESS,
6539 MGMT_STATUS_INVALID_PARAMS);
6544 bacpy(&hdev->static_addr, &cp->bdaddr);
6546 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6550 err = new_settings(hdev, sk);
6553 hci_dev_unlock(hdev);
6557 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6558 void *data, u16 len)
6560 struct mgmt_cp_set_scan_params *cp = data;
6561 __u16 interval, window;
6564 bt_dev_dbg(hdev, "sock %p", sk);
6566 if (!lmp_le_capable(hdev))
6567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6568 MGMT_STATUS_NOT_SUPPORTED);
6570 interval = __le16_to_cpu(cp->interval);
6572 if (interval < 0x0004 || interval > 0x4000)
6573 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6574 MGMT_STATUS_INVALID_PARAMS);
6576 window = __le16_to_cpu(cp->window);
6578 if (window < 0x0004 || window > 0x4000)
6579 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6580 MGMT_STATUS_INVALID_PARAMS);
6582 if (window > interval)
6583 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6584 MGMT_STATUS_INVALID_PARAMS);
6588 hdev->le_scan_interval = interval;
6589 hdev->le_scan_window = window;
6591 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6594 /* If background scan is running, restart it so new parameters are
6597 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6598 hdev->discovery.state == DISCOVERY_STOPPED)
6599 hci_update_passive_scan(hdev);
6601 hci_dev_unlock(hdev);
6606 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6608 struct mgmt_pending_cmd *cmd = data;
6610 bt_dev_dbg(hdev, "err %d", err);
6613 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6616 struct mgmt_mode *cp = cmd->param;
6619 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6621 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6623 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6624 new_settings(hdev, cmd->sk);
6627 mgmt_pending_free(cmd);
6630 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6632 struct mgmt_pending_cmd *cmd = data;
6633 struct mgmt_mode *cp = cmd->param;
6635 return hci_write_fast_connectable_sync(hdev, cp->val);
6638 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6639 void *data, u16 len)
6641 struct mgmt_mode *cp = data;
6642 struct mgmt_pending_cmd *cmd;
6645 bt_dev_dbg(hdev, "sock %p", sk);
6647 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6648 hdev->hci_ver < BLUETOOTH_VER_1_2)
6649 return mgmt_cmd_status(sk, hdev->id,
6650 MGMT_OP_SET_FAST_CONNECTABLE,
6651 MGMT_STATUS_NOT_SUPPORTED);
6653 if (cp->val != 0x00 && cp->val != 0x01)
6654 return mgmt_cmd_status(sk, hdev->id,
6655 MGMT_OP_SET_FAST_CONNECTABLE,
6656 MGMT_STATUS_INVALID_PARAMS);
6660 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6661 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6665 if (!hdev_is_powered(hdev)) {
6666 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6667 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6668 new_settings(hdev, sk);
6672 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6677 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6678 fast_connectable_complete);
6681 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6682 MGMT_STATUS_FAILED);
6685 mgmt_pending_free(cmd);
6689 hci_dev_unlock(hdev);
6694 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6696 struct mgmt_pending_cmd *cmd = data;
6698 bt_dev_dbg(hdev, "err %d", err);
6701 u8 mgmt_err = mgmt_status(err);
6703 /* We need to restore the flag if related HCI commands
6706 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6708 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6710 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6711 new_settings(hdev, cmd->sk);
6714 mgmt_pending_free(cmd);
6717 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6721 status = hci_write_fast_connectable_sync(hdev, false);
6724 status = hci_update_scan_sync(hdev);
6726 /* Since only the advertising data flags will change, there
6727 * is no need to update the scan response data.
6730 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6735 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6737 struct mgmt_mode *cp = data;
6738 struct mgmt_pending_cmd *cmd;
6741 bt_dev_dbg(hdev, "sock %p", sk);
6743 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6744 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6745 MGMT_STATUS_NOT_SUPPORTED);
6747 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6749 MGMT_STATUS_REJECTED);
6751 if (cp->val != 0x00 && cp->val != 0x01)
6752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6753 MGMT_STATUS_INVALID_PARAMS);
6757 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6758 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6762 if (!hdev_is_powered(hdev)) {
6764 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6765 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6766 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6767 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6768 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6771 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6773 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6777 err = new_settings(hdev, sk);
6781 /* Reject disabling when powered on */
6783 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6784 MGMT_STATUS_REJECTED);
6787 /* When configuring a dual-mode controller to operate
6788 * with LE only and using a static address, then switching
6789 * BR/EDR back on is not allowed.
6791 * Dual-mode controllers shall operate with the public
6792 * address as its identity address for BR/EDR and LE. So
6793 * reject the attempt to create an invalid configuration.
6795 * The same restrictions applies when secure connections
6796 * has been enabled. For BR/EDR this is a controller feature
6797 * while for LE it is a host stack feature. This means that
6798 * switching BR/EDR back on when secure connections has been
6799 * enabled is not a supported transaction.
6801 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6802 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6803 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6804 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6805 MGMT_STATUS_REJECTED);
6810 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6814 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6815 set_bredr_complete);
6818 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6819 MGMT_STATUS_FAILED);
6821 mgmt_pending_free(cmd);
6826 /* We need to flip the bit already here so that
6827 * hci_req_update_adv_data generates the correct flags.
6829 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6832 hci_dev_unlock(hdev);
6836 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6838 struct mgmt_pending_cmd *cmd = data;
6839 struct mgmt_mode *cp;
6841 bt_dev_dbg(hdev, "err %d", err);
6844 u8 mgmt_err = mgmt_status(err);
6846 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6854 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6855 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6858 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6859 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6862 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6863 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6867 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6868 new_settings(hdev, cmd->sk);
6871 mgmt_pending_free(cmd);
6874 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6876 struct mgmt_pending_cmd *cmd = data;
6877 struct mgmt_mode *cp = cmd->param;
6880 /* Force write of val */
6881 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6883 return hci_write_sc_support_sync(hdev, val);
6886 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6887 void *data, u16 len)
6889 struct mgmt_mode *cp = data;
6890 struct mgmt_pending_cmd *cmd;
6894 bt_dev_dbg(hdev, "sock %p", sk);
6896 if (!lmp_sc_capable(hdev) &&
6897 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6898 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6899 MGMT_STATUS_NOT_SUPPORTED);
6901 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6902 lmp_sc_capable(hdev) &&
6903 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6904 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6905 MGMT_STATUS_REJECTED);
6907 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6908 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6909 MGMT_STATUS_INVALID_PARAMS);
6913 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6914 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6918 changed = !hci_dev_test_and_set_flag(hdev,
6920 if (cp->val == 0x02)
6921 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6923 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6925 changed = hci_dev_test_and_clear_flag(hdev,
6927 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6930 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6935 err = new_settings(hdev, sk);
6942 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6943 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6944 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6948 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6952 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6953 set_secure_conn_complete);
6956 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6957 MGMT_STATUS_FAILED);
6959 mgmt_pending_free(cmd);
6963 hci_dev_unlock(hdev);
6967 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6968 void *data, u16 len)
6970 struct mgmt_mode *cp = data;
6971 bool changed, use_changed;
6974 bt_dev_dbg(hdev, "sock %p", sk);
6976 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6978 MGMT_STATUS_INVALID_PARAMS);
6983 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6985 changed = hci_dev_test_and_clear_flag(hdev,
6986 HCI_KEEP_DEBUG_KEYS);
6988 if (cp->val == 0x02)
6989 use_changed = !hci_dev_test_and_set_flag(hdev,
6990 HCI_USE_DEBUG_KEYS);
6992 use_changed = hci_dev_test_and_clear_flag(hdev,
6993 HCI_USE_DEBUG_KEYS);
6995 if (hdev_is_powered(hdev) && use_changed &&
6996 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6997 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6998 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6999 sizeof(mode), &mode);
7002 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7007 err = new_settings(hdev, sk);
7010 hci_dev_unlock(hdev);
7014 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7017 struct mgmt_cp_set_privacy *cp = cp_data;
7021 bt_dev_dbg(hdev, "sock %p", sk);
7023 if (!lmp_le_capable(hdev))
7024 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7025 MGMT_STATUS_NOT_SUPPORTED);
7027 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7028 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7029 MGMT_STATUS_INVALID_PARAMS);
7031 if (hdev_is_powered(hdev))
7032 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7033 MGMT_STATUS_REJECTED);
7037 /* If user space supports this command it is also expected to
7038 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7040 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7043 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7044 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7045 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7046 hci_adv_instances_set_rpa_expired(hdev, true);
7047 if (cp->privacy == 0x02)
7048 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7050 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7052 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7053 memset(hdev->irk, 0, sizeof(hdev->irk));
7054 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7055 hci_adv_instances_set_rpa_expired(hdev, false);
7056 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7059 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7064 err = new_settings(hdev, sk);
7067 hci_dev_unlock(hdev);
7071 static bool irk_is_valid(struct mgmt_irk_info *irk)
7073 switch (irk->addr.type) {
7074 case BDADDR_LE_PUBLIC:
7077 case BDADDR_LE_RANDOM:
7078 /* Two most significant bits shall be set */
7079 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7087 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7090 struct mgmt_cp_load_irks *cp = cp_data;
7091 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7092 sizeof(struct mgmt_irk_info));
7093 u16 irk_count, expected_len;
7096 bt_dev_dbg(hdev, "sock %p", sk);
7098 if (!lmp_le_capable(hdev))
7099 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7100 MGMT_STATUS_NOT_SUPPORTED);
7102 irk_count = __le16_to_cpu(cp->irk_count);
7103 if (irk_count > max_irk_count) {
7104 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7106 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7107 MGMT_STATUS_INVALID_PARAMS);
7110 expected_len = struct_size(cp, irks, irk_count);
7111 if (expected_len != len) {
7112 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7115 MGMT_STATUS_INVALID_PARAMS);
7118 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7120 for (i = 0; i < irk_count; i++) {
7121 struct mgmt_irk_info *key = &cp->irks[i];
7123 if (!irk_is_valid(key))
7124 return mgmt_cmd_status(sk, hdev->id,
7126 MGMT_STATUS_INVALID_PARAMS);
7131 hci_smp_irks_clear(hdev);
7133 for (i = 0; i < irk_count; i++) {
7134 struct mgmt_irk_info *irk = &cp->irks[i];
7136 if (hci_is_blocked_key(hdev,
7137 HCI_BLOCKED_KEY_TYPE_IRK,
7139 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7144 hci_add_irk(hdev, &irk->addr.bdaddr,
7145 le_addr_type(irk->addr.type), irk->val,
7149 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7151 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7153 hci_dev_unlock(hdev);
7159 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
7160 void *data, u16 len)
7162 struct mgmt_cp_set_advertising_params *cp = data;
7167 BT_DBG("%s", hdev->name);
7169 if (!lmp_le_capable(hdev))
7170 return mgmt_cmd_status(sk, hdev->id,
7171 MGMT_OP_SET_ADVERTISING_PARAMS,
7172 MGMT_STATUS_NOT_SUPPORTED);
7174 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7175 return mgmt_cmd_status(sk, hdev->id,
7176 MGMT_OP_SET_ADVERTISING_PARAMS,
7179 min_interval = __le16_to_cpu(cp->interval_min);
7180 max_interval = __le16_to_cpu(cp->interval_max);
7182 if (min_interval > max_interval ||
7183 min_interval < 0x0020 || max_interval > 0x4000)
7184 return mgmt_cmd_status(sk, hdev->id,
7185 MGMT_OP_SET_ADVERTISING_PARAMS,
7186 MGMT_STATUS_INVALID_PARAMS);
7190 hdev->le_adv_min_interval = min_interval;
7191 hdev->le_adv_max_interval = max_interval;
7192 hdev->adv_filter_policy = cp->filter_policy;
7193 hdev->adv_type = cp->type;
7195 err = mgmt_cmd_complete(sk, hdev->id,
7196 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
7198 hci_dev_unlock(hdev);
7203 static void set_advertising_data_complete(struct hci_dev *hdev,
7204 u8 status, u16 opcode)
7206 struct mgmt_cp_set_advertising_data *cp;
7207 struct mgmt_pending_cmd *cmd;
7209 BT_DBG("status 0x%02x", status);
7213 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
7220 mgmt_cmd_status(cmd->sk, hdev->id,
7221 MGMT_OP_SET_ADVERTISING_DATA,
7222 mgmt_status(status));
7224 mgmt_cmd_complete(cmd->sk, hdev->id,
7225 MGMT_OP_SET_ADVERTISING_DATA, 0,
7228 mgmt_pending_remove(cmd);
7231 hci_dev_unlock(hdev);
7234 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
7235 void *data, u16 len)
7237 struct mgmt_pending_cmd *cmd;
7238 struct hci_request req;
7239 struct mgmt_cp_set_advertising_data *cp = data;
7240 struct hci_cp_le_set_adv_data adv;
7243 BT_DBG("%s", hdev->name);
7245 if (!lmp_le_capable(hdev)) {
7246 return mgmt_cmd_status(sk, hdev->id,
7247 MGMT_OP_SET_ADVERTISING_DATA,
7248 MGMT_STATUS_NOT_SUPPORTED);
7253 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
7254 err = mgmt_cmd_status(sk, hdev->id,
7255 MGMT_OP_SET_ADVERTISING_DATA,
7260 if (len > HCI_MAX_AD_LENGTH) {
7261 err = mgmt_cmd_status(sk, hdev->id,
7262 MGMT_OP_SET_ADVERTISING_DATA,
7263 MGMT_STATUS_INVALID_PARAMS);
7267 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
7274 hci_req_init(&req, hdev);
7276 memset(&adv, 0, sizeof(adv));
7277 memcpy(adv.data, cp->data, len);
7280 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
7282 err = hci_req_run(&req, set_advertising_data_complete);
7284 mgmt_pending_remove(cmd);
7287 hci_dev_unlock(hdev);
7292 /* Adv White List feature */
7293 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7295 struct mgmt_cp_add_dev_white_list *cp;
7296 struct mgmt_pending_cmd *cmd;
7298 BT_DBG("status 0x%02x", status);
7302 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
7309 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7310 mgmt_status(status));
7312 mgmt_cmd_complete(cmd->sk, hdev->id,
7313 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
7315 mgmt_pending_remove(cmd);
7318 hci_dev_unlock(hdev);
7321 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
7322 void *data, u16 len)
7324 struct mgmt_pending_cmd *cmd;
7325 struct mgmt_cp_add_dev_white_list *cp = data;
7326 struct hci_request req;
7329 BT_DBG("%s", hdev->name);
7331 if (!lmp_le_capable(hdev))
7332 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7333 MGMT_STATUS_NOT_SUPPORTED);
7335 if (!hdev_is_powered(hdev))
7336 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7337 MGMT_STATUS_REJECTED);
7341 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
7342 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7347 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
7353 hci_req_init(&req, hdev);
7355 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
7357 err = hci_req_run(&req, add_white_list_complete);
7359 mgmt_pending_remove(cmd);
7364 hci_dev_unlock(hdev);
7369 static void remove_from_white_list_complete(struct hci_dev *hdev,
7370 u8 status, u16 opcode)
7372 struct mgmt_cp_remove_dev_from_white_list *cp;
7373 struct mgmt_pending_cmd *cmd;
7375 BT_DBG("status 0x%02x", status);
7379 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
7386 mgmt_cmd_status(cmd->sk, hdev->id,
7387 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7388 mgmt_status(status));
7390 mgmt_cmd_complete(cmd->sk, hdev->id,
7391 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
7394 mgmt_pending_remove(cmd);
7397 hci_dev_unlock(hdev);
7400 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
7401 void *data, u16 len)
7403 struct mgmt_pending_cmd *cmd;
7404 struct mgmt_cp_remove_dev_from_white_list *cp = data;
7405 struct hci_request req;
7408 BT_DBG("%s", hdev->name);
7410 if (!lmp_le_capable(hdev))
7411 return mgmt_cmd_status(sk, hdev->id,
7412 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7413 MGMT_STATUS_NOT_SUPPORTED);
7415 if (!hdev_is_powered(hdev))
7416 return mgmt_cmd_status(sk, hdev->id,
7417 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7418 MGMT_STATUS_REJECTED);
7422 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
7423 err = mgmt_cmd_status(sk, hdev->id,
7424 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7429 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7436 hci_req_init(&req, hdev);
7438 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
7440 err = hci_req_run(&req, remove_from_white_list_complete);
7442 mgmt_pending_remove(cmd);
7447 hci_dev_unlock(hdev);
7452 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
7455 struct mgmt_pending_cmd *cmd;
7457 BT_DBG("status 0x%02x", status);
7461 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
7466 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7467 mgmt_status(status));
7469 mgmt_cmd_complete(cmd->sk, hdev->id,
7470 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7473 mgmt_pending_remove(cmd);
7476 hci_dev_unlock(hdev);
7479 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
7480 void *data, u16 len)
7482 struct mgmt_pending_cmd *cmd;
7483 struct hci_request req;
7486 BT_DBG("%s", hdev->name);
7488 if (!lmp_le_capable(hdev))
7489 return mgmt_cmd_status(sk, hdev->id,
7490 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7491 MGMT_STATUS_NOT_SUPPORTED);
7493 if (!hdev_is_powered(hdev))
7494 return mgmt_cmd_status(sk, hdev->id,
7495 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7496 MGMT_STATUS_REJECTED);
7500 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
7501 err = mgmt_cmd_status(sk, hdev->id,
7502 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7507 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7514 hci_req_init(&req, hdev);
7516 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
7518 err = hci_req_run(&req, clear_white_list_complete);
7520 mgmt_pending_remove(cmd);
7525 hci_dev_unlock(hdev);
7530 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
7533 struct mgmt_cp_set_scan_rsp_data *cp;
7534 struct mgmt_pending_cmd *cmd;
7536 BT_DBG("status 0x%02x", status);
7540 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
7547 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7548 mgmt_status(status));
7550 mgmt_cmd_complete(cmd->sk, hdev->id,
7551 MGMT_OP_SET_SCAN_RSP_DATA, 0,
7554 mgmt_pending_remove(cmd);
7557 hci_dev_unlock(hdev);
7560 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
7563 struct mgmt_pending_cmd *cmd;
7564 struct hci_request req;
7565 struct mgmt_cp_set_scan_rsp_data *cp = data;
7566 struct hci_cp_le_set_scan_rsp_data rsp;
7569 BT_DBG("%s", hdev->name);
7571 if (!lmp_le_capable(hdev))
7572 return mgmt_cmd_status(sk, hdev->id,
7573 MGMT_OP_SET_SCAN_RSP_DATA,
7574 MGMT_STATUS_NOT_SUPPORTED);
7578 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
7579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7584 if (len > HCI_MAX_AD_LENGTH) {
7585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7586 MGMT_STATUS_INVALID_PARAMS);
7590 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
7596 hci_req_init(&req, hdev);
7598 memset(&rsp, 0, sizeof(rsp));
7599 memcpy(rsp.data, cp->data, len);
7602 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
7604 err = hci_req_run(&req, set_scan_rsp_data_complete);
7606 mgmt_pending_remove(cmd);
7609 hci_dev_unlock(hdev);
7614 static void set_rssi_threshold_complete(struct hci_dev *hdev,
7615 u8 status, u16 opcode)
7617 struct mgmt_pending_cmd *cmd;
7619 BT_DBG("status 0x%02x", status);
7623 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7628 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7629 mgmt_status(status));
7631 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7634 mgmt_pending_remove(cmd);
7637 hci_dev_unlock(hdev);
7640 static void set_rssi_disable_complete(struct hci_dev *hdev,
7641 u8 status, u16 opcode)
7643 struct mgmt_pending_cmd *cmd;
7645 BT_DBG("status 0x%02x", status);
7649 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7654 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7655 mgmt_status(status));
7657 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7660 mgmt_pending_remove(cmd);
7663 hci_dev_unlock(hdev);
7666 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
7667 void *data, u16 len)
7670 struct hci_cp_set_rssi_threshold th = { 0, };
7671 struct mgmt_cp_set_enable_rssi *cp = data;
7672 struct hci_conn *conn;
7673 struct mgmt_pending_cmd *cmd;
7674 struct hci_request req;
7679 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7681 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7682 MGMT_STATUS_FAILED);
7686 if (!lmp_le_capable(hdev)) {
7687 mgmt_pending_remove(cmd);
7688 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7689 MGMT_STATUS_NOT_SUPPORTED);
7693 if (!hdev_is_powered(hdev)) {
7694 BT_DBG("%s", hdev->name);
7695 mgmt_pending_remove(cmd);
7696 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7697 MGMT_STATUS_NOT_POWERED);
7701 if (cp->link_type == 0x01)
7702 dest_type = LE_LINK;
7704 dest_type = ACL_LINK;
7706 /* Get LE/ACL link handle info */
7707 conn = hci_conn_hash_lookup_ba(hdev,
7708 dest_type, &cp->bdaddr);
7711 err = mgmt_cmd_complete(sk, hdev->id,
7712 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
7713 mgmt_pending_remove(cmd);
7717 hci_req_init(&req, hdev);
7719 th.hci_le_ext_opcode = 0x0B;
7721 th.conn_handle = conn->handle;
7722 th.alert_mask = 0x07;
7723 th.low_th = cp->low_th;
7724 th.in_range_th = cp->in_range_th;
7725 th.high_th = cp->high_th;
7727 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7728 err = hci_req_run(&req, set_rssi_threshold_complete);
7731 mgmt_pending_remove(cmd);
7732 BT_ERR("Error in requesting hci_req_run");
7737 hci_dev_unlock(hdev);
7741 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
7742 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7744 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
7745 struct mgmt_cp_set_enable_rssi *cp = data;
7746 struct mgmt_pending_cmd *cmd;
7751 mgmt_rp.status = rp->status;
7752 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7753 mgmt_rp.bt_address = cp->bdaddr;
7754 mgmt_rp.link_type = cp->link_type;
7756 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7757 MGMT_STATUS_SUCCESS, &mgmt_rp,
7758 sizeof(struct mgmt_cc_rsp_enable_rssi));
7760 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
7761 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7763 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
7764 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7765 &mgmt_rp.bt_address, true);
7769 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7771 mgmt_pending_remove(cmd);
7773 hci_dev_unlock(hdev);
7776 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
7777 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7779 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
7780 struct mgmt_cp_disable_rssi *cp = data;
7781 struct mgmt_pending_cmd *cmd;
7786 mgmt_rp.status = rp->status;
7787 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7788 mgmt_rp.bt_address = cp->bdaddr;
7789 mgmt_rp.link_type = cp->link_type;
7791 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7792 MGMT_STATUS_SUCCESS, &mgmt_rp,
7793 sizeof(struct mgmt_cc_rsp_enable_rssi));
7795 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
7796 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7798 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7799 &mgmt_rp.bt_address, false);
7803 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7805 mgmt_pending_remove(cmd);
7807 hci_dev_unlock(hdev);
7810 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
7811 void *data, u16 len)
7813 struct mgmt_pending_cmd *cmd;
7814 struct hci_request req;
7815 struct hci_cp_set_enable_rssi cp_en = { 0, };
7818 BT_DBG("Set Disable RSSI.");
7820 cp_en.hci_le_ext_opcode = 0x01;
7821 cp_en.le_enable_cs_Features = 0x00;
7822 cp_en.data[0] = 0x00;
7823 cp_en.data[1] = 0x00;
7824 cp_en.data[2] = 0x00;
7828 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7830 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7831 MGMT_STATUS_FAILED);
7835 if (!lmp_le_capable(hdev)) {
7836 mgmt_pending_remove(cmd);
7837 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7838 MGMT_STATUS_NOT_SUPPORTED);
7842 if (!hdev_is_powered(hdev)) {
7843 BT_DBG("%s", hdev->name);
7844 mgmt_pending_remove(cmd);
7845 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7846 MGMT_STATUS_NOT_POWERED);
7850 hci_req_init(&req, hdev);
7852 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7853 sizeof(struct hci_cp_set_enable_rssi),
7854 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7855 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7857 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7858 err = hci_req_run(&req, set_rssi_disable_complete);
7861 mgmt_pending_remove(cmd);
7862 BT_ERR("Error in requesting hci_req_run");
7867 hci_dev_unlock(hdev);
7871 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
7873 struct hci_cc_rsp_enable_rssi *rp = response;
7874 struct mgmt_pending_cmd *cmd_enable = NULL;
7875 struct mgmt_pending_cmd *cmd_disable = NULL;
7876 struct mgmt_cp_set_enable_rssi *cp_en;
7877 struct mgmt_cp_disable_rssi *cp_dis;
7880 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7881 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7882 hci_dev_unlock(hdev);
7885 BT_DBG("Enable Request");
7888 BT_DBG("Disable Request");
7891 cp_en = cmd_enable->param;
7896 switch (rp->le_ext_opcode) {
7898 BT_DBG("RSSI enabled.. Setting Threshold...");
7899 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
7900 cp_en, sizeof(*cp_en));
7904 BT_DBG("Sending RSSI enable success");
7905 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
7906 cp_en, rp, rp->status);
7910 } else if (cmd_disable) {
7911 cp_dis = cmd_disable->param;
7916 switch (rp->le_ext_opcode) {
7918 BT_DBG("Sending RSSI disable success");
7919 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7920 cp_dis, rp, rp->status);
7925 * Only unset RSSI Threshold values for the Link if
7926 * RSSI is monitored for other BREDR or LE Links
7928 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
7929 BT_DBG("Unset Threshold. Other links being monitored");
7930 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7931 cp_dis, rp, rp->status);
7933 BT_DBG("Unset Threshold. Disabling...");
7934 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
7935 cp_dis, sizeof(*cp_dis));
7942 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
7945 struct mgmt_pending_cmd *cmd;
7947 BT_DBG("status 0x%02x", status);
7951 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7956 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7957 mgmt_status(status));
7959 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7962 mgmt_pending_remove(cmd);
7965 hci_dev_unlock(hdev);
7968 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7969 void *data, u16 len)
7971 struct mgmt_pending_cmd *cmd;
7972 struct hci_request req;
7973 struct mgmt_cp_set_enable_rssi *cp = data;
7974 struct hci_cp_set_enable_rssi cp_en = { 0, };
7977 BT_DBG("Set Enable RSSI.");
7979 cp_en.hci_le_ext_opcode = 0x01;
7980 cp_en.le_enable_cs_Features = 0x04;
7981 cp_en.data[0] = 0x00;
7982 cp_en.data[1] = 0x00;
7983 cp_en.data[2] = 0x00;
7987 if (!lmp_le_capable(hdev)) {
7988 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7989 MGMT_STATUS_NOT_SUPPORTED);
7993 if (!hdev_is_powered(hdev)) {
7994 BT_DBG("%s", hdev->name);
7995 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7996 MGMT_STATUS_NOT_POWERED);
8000 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
8001 BT_DBG("%s", hdev->name);
8002 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8007 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
8010 BT_DBG("%s", hdev->name);
8015 /* If RSSI is already enabled directly set Threshold values */
8016 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
8017 hci_dev_unlock(hdev);
8018 BT_DBG("RSSI Enabled. Directly set Threshold");
8019 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
8023 hci_req_init(&req, hdev);
8025 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
8026 sizeof(struct hci_cp_set_enable_rssi),
8027 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
8028 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
8030 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
8031 err = hci_req_run(&req, set_rssi_enable_complete);
8034 mgmt_pending_remove(cmd);
8035 BT_ERR("Error in requesting hci_req_run");
8040 hci_dev_unlock(hdev);
8045 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8047 struct mgmt_pending_cmd *cmd;
8049 BT_DBG("status 0x%02x", status);
8053 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
8057 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8058 MGMT_STATUS_SUCCESS, &status, 1);
8060 mgmt_pending_remove(cmd);
8063 hci_dev_unlock(hdev);
8066 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
8069 struct mgmt_pending_cmd *cmd;
8070 struct hci_request req;
8071 struct mgmt_cp_get_raw_rssi *cp = data;
8072 struct hci_cp_get_raw_rssi hci_cp;
8074 struct hci_conn *conn;
8078 BT_DBG("Get Raw RSSI.");
8082 if (!lmp_le_capable(hdev)) {
8083 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8084 MGMT_STATUS_NOT_SUPPORTED);
8088 if (cp->link_type == 0x01)
8089 dest_type = LE_LINK;
8091 dest_type = ACL_LINK;
8093 /* Get LE/BREDR link handle info */
8094 conn = hci_conn_hash_lookup_ba(hdev,
8095 dest_type, &cp->bt_address);
8097 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8098 MGMT_STATUS_NOT_CONNECTED);
8101 hci_cp.conn_handle = conn->handle;
8103 if (!hdev_is_powered(hdev)) {
8104 BT_DBG("%s", hdev->name);
8105 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8106 MGMT_STATUS_NOT_POWERED);
8110 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
8111 BT_DBG("%s", hdev->name);
8112 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8117 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
8119 BT_DBG("%s", hdev->name);
8124 hci_req_init(&req, hdev);
8126 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
8127 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
8128 err = hci_req_run(&req, get_raw_rssi_complete);
8131 mgmt_pending_remove(cmd);
8132 BT_ERR("Error in requesting hci_req_run");
8136 hci_dev_unlock(hdev);
8141 void mgmt_raw_rssi_response(struct hci_dev *hdev,
8142 struct hci_cc_rp_get_raw_rssi *rp, int success)
8144 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
8145 struct hci_conn *conn;
8147 mgmt_rp.status = rp->status;
8148 mgmt_rp.rssi_dbm = rp->rssi_dbm;
8150 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
8154 bacpy(&mgmt_rp.bt_address, &conn->dst);
8155 if (conn->type == LE_LINK)
8156 mgmt_rp.link_type = 0x01;
8158 mgmt_rp.link_type = 0x00;
8160 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
8161 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
8164 static void set_disable_threshold_complete(struct hci_dev *hdev,
8165 u8 status, u16 opcode)
8167 struct mgmt_pending_cmd *cmd;
8169 BT_DBG("status 0x%02x", status);
8173 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
8177 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8178 MGMT_STATUS_SUCCESS, &status, 1);
8180 mgmt_pending_remove(cmd);
8183 hci_dev_unlock(hdev);
8186 /** Removes monitoring for a link*/
8187 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
8188 void *data, u16 len)
8191 struct hci_cp_set_rssi_threshold th = { 0, };
8192 struct mgmt_cp_disable_rssi *cp = data;
8193 struct hci_conn *conn;
8194 struct mgmt_pending_cmd *cmd;
8195 struct hci_request req;
8198 BT_DBG("Set Disable RSSI.");
8202 if (!lmp_le_capable(hdev)) {
8203 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8204 MGMT_STATUS_NOT_SUPPORTED);
8208 /* Get LE/ACL link handle info*/
8209 if (cp->link_type == 0x01)
8210 dest_type = LE_LINK;
8212 dest_type = ACL_LINK;
8214 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
8216 err = mgmt_cmd_complete(sk, hdev->id,
8217 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
8221 th.hci_le_ext_opcode = 0x0B;
8223 th.conn_handle = conn->handle;
8224 th.alert_mask = 0x00;
8226 th.in_range_th = 0x00;
8229 if (!hdev_is_powered(hdev)) {
8230 BT_DBG("%s", hdev->name);
8231 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8236 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
8237 BT_DBG("%s", hdev->name);
8238 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8243 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
8246 BT_DBG("%s", hdev->name);
8251 hci_req_init(&req, hdev);
8253 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
8254 err = hci_req_run(&req, set_disable_threshold_complete);
8256 mgmt_pending_remove(cmd);
8257 BT_ERR("Error in requesting hci_req_run");
8262 hci_dev_unlock(hdev);
8267 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
8268 s8 alert_type, s8 rssi_dbm)
8270 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
8271 struct hci_conn *conn;
8273 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
8274 conn_handle, alert_type, rssi_dbm);
8276 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
8279 BT_ERR("RSSI alert Error: Device not found for handle");
8282 bacpy(&mgmt_ev.bdaddr, &conn->dst);
8284 if (conn->type == LE_LINK)
8285 mgmt_ev.link_type = 0x01;
8287 mgmt_ev.link_type = 0x00;
8289 mgmt_ev.alert_type = alert_type;
8290 mgmt_ev.rssi_dbm = rssi_dbm;
8292 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
8293 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
8297 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
8299 struct mgmt_pending_cmd *cmd;
8303 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8305 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8309 type = hdev->le_discovery.type;
8311 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8312 mgmt_status(status), &type, sizeof(type));
8313 mgmt_pending_remove(cmd);
8318 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
8321 unsigned long timeout = 0;
8323 BT_DBG("status %d", status);
8327 mgmt_start_le_discovery_failed(hdev, status);
8328 hci_dev_unlock(hdev);
8333 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
8334 hci_dev_unlock(hdev);
8336 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
8337 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
8342 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
8345 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
8346 void *data, u16 len)
8348 struct mgmt_cp_start_le_discovery *cp = data;
8349 struct mgmt_pending_cmd *cmd;
8350 struct hci_cp_le_set_scan_param param_cp;
8351 struct hci_cp_le_set_scan_enable enable_cp;
8352 struct hci_request req;
8353 u8 status, own_addr_type;
8356 BT_DBG("%s", hdev->name);
8358 if (!hdev_is_powered(hdev)) {
8359 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8360 MGMT_STATUS_NOT_POWERED);
8364 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
8365 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8370 if (cp->type != DISCOV_TYPE_LE) {
8371 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8372 MGMT_STATUS_INVALID_PARAMS);
8376 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
8382 hdev->le_discovery.type = cp->type;
8384 hci_req_init(&req, hdev);
8386 status = mgmt_le_support(hdev);
8388 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8390 mgmt_pending_remove(cmd);
8394 /* If controller is scanning, it means the background scanning
8395 * is running. Thus, we should temporarily stop it in order to
8396 * set the discovery scanning parameters.
8398 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
8399 hci_req_add_le_scan_disable(&req, false);
8401 memset(¶m_cp, 0, sizeof(param_cp));
8403 /* All active scans will be done with either a resolvable
8404 * private address (when privacy feature has been enabled)
8405 * or unresolvable private address.
8407 err = hci_update_random_address_sync(hdev, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
8409 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8410 MGMT_STATUS_FAILED);
8411 mgmt_pending_remove(cmd);
8415 param_cp.type = hdev->le_scan_type;
8416 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
8417 param_cp.window = cpu_to_le16(hdev->le_scan_window);
8418 param_cp.own_address_type = own_addr_type;
8419 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
8422 memset(&enable_cp, 0, sizeof(enable_cp));
8423 enable_cp.enable = LE_SCAN_ENABLE;
8424 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
8426 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
8429 err = hci_req_run(&req, start_le_discovery_complete);
8431 mgmt_pending_remove(cmd);
8433 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
8439 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
8441 struct mgmt_pending_cmd *cmd;
8444 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8448 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8449 mgmt_status(status), &hdev->le_discovery.type,
8450 sizeof(hdev->le_discovery.type));
8451 mgmt_pending_remove(cmd);
8456 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
8459 BT_DBG("status %d", status);
8464 mgmt_stop_le_discovery_failed(hdev, status);
8468 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8471 hci_dev_unlock(hdev);
8474 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
8475 void *data, u16 len)
8477 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
8478 struct mgmt_pending_cmd *cmd;
8479 struct hci_request req;
8482 BT_DBG("%s", hdev->name);
8486 if (!hci_le_discovery_active(hdev)) {
8487 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8488 MGMT_STATUS_REJECTED, &mgmt_cp->type,
8489 sizeof(mgmt_cp->type));
8493 if (hdev->le_discovery.type != mgmt_cp->type) {
8494 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8495 MGMT_STATUS_INVALID_PARAMS,
8496 &mgmt_cp->type, sizeof(mgmt_cp->type));
8500 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
8506 hci_req_init(&req, hdev);
8508 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
8509 BT_DBG("unknown le discovery state %u",
8510 hdev->le_discovery.state);
8512 mgmt_pending_remove(cmd);
8513 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8514 MGMT_STATUS_FAILED, &mgmt_cp->type,
8515 sizeof(mgmt_cp->type));
8519 cancel_delayed_work(&hdev->le_scan_disable);
8520 hci_req_add_le_scan_disable(&req, false);
8522 err = hci_req_run(&req, stop_le_discovery_complete);
8524 mgmt_pending_remove(cmd);
8526 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
8529 hci_dev_unlock(hdev);
8533 /* Separate LE discovery */
8534 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
8536 struct mgmt_ev_discovering ev;
8537 struct mgmt_pending_cmd *cmd;
8539 BT_DBG("%s le discovering %u", hdev->name, discovering);
8542 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8544 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8547 u8 type = hdev->le_discovery.type;
8549 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
8551 mgmt_pending_remove(cmd);
8554 memset(&ev, 0, sizeof(ev));
8555 ev.type = hdev->le_discovery.type;
8556 ev.discovering = discovering;
8558 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8561 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
8562 void *data, u16 len)
8566 BT_DBG("%s", hdev->name);
8570 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
8572 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
8574 hci_dev_unlock(hdev);
8579 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
8584 if (min > max || min < 6 || max > 3200)
8587 if (to_multiplier < 10 || to_multiplier > 3200)
8590 if (max >= to_multiplier * 8)
8593 max_latency = (to_multiplier * 8 / max) - 1;
8595 if (latency > 499 || latency > max_latency)
8601 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
8604 struct mgmt_cp_le_conn_update *cp = data;
8606 struct hci_conn *conn;
8607 u16 min, max, latency, supervision_timeout;
8610 if (!hdev_is_powered(hdev))
8611 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8612 MGMT_STATUS_NOT_POWERED);
8614 min = __le16_to_cpu(cp->conn_interval_min);
8615 max = __le16_to_cpu(cp->conn_interval_max);
8616 latency = __le16_to_cpu(cp->conn_latency);
8617 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
8619 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
8620 min, max, latency, supervision_timeout);
8622 err = check_le_conn_update_param(min, max, latency,
8623 supervision_timeout);
8626 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8627 MGMT_STATUS_INVALID_PARAMS);
8631 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
8633 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8634 MGMT_STATUS_NOT_CONNECTED);
8635 hci_dev_unlock(hdev);
8639 hci_dev_unlock(hdev);
8641 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
8643 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
8647 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
8650 struct mgmt_cp_set_manufacturer_data *cp;
8651 struct mgmt_pending_cmd *cmd;
8653 BT_DBG("status 0x%02x", status);
8657 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
8664 mgmt_cmd_status(cmd->sk, hdev->id,
8665 MGMT_OP_SET_MANUFACTURER_DATA,
8666 mgmt_status(status));
8668 mgmt_cmd_complete(cmd->sk, hdev->id,
8669 MGMT_OP_SET_MANUFACTURER_DATA, 0,
8672 mgmt_pending_remove(cmd);
8675 hci_dev_unlock(hdev);
8678 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
8679 void *data, u16 len)
8681 struct mgmt_pending_cmd *cmd;
8682 struct hci_request req;
8683 struct mgmt_cp_set_manufacturer_data *cp = data;
8684 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
8688 BT_DBG("%s", hdev->name);
8690 if (!lmp_bredr_capable(hdev))
8691 return mgmt_cmd_status(sk, hdev->id,
8692 MGMT_OP_SET_MANUFACTURER_DATA,
8693 MGMT_STATUS_NOT_SUPPORTED);
8695 if (cp->data[0] == 0 ||
8696 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
8697 return mgmt_cmd_status(sk, hdev->id,
8698 MGMT_OP_SET_MANUFACTURER_DATA,
8699 MGMT_STATUS_INVALID_PARAMS);
8701 if (cp->data[1] != 0xFF)
8702 return mgmt_cmd_status(sk, hdev->id,
8703 MGMT_OP_SET_MANUFACTURER_DATA,
8704 MGMT_STATUS_NOT_SUPPORTED);
8708 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
8709 err = mgmt_cmd_status(sk, hdev->id,
8710 MGMT_OP_SET_MANUFACTURER_DATA,
8715 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
8722 hci_req_init(&req, hdev);
8724 /* if new data is same as previous data then return command
8727 if (hdev->manufacturer_len == cp->data[0] - 1 &&
8728 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
8729 mgmt_pending_remove(cmd);
8730 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
8731 0, cp, sizeof(*cp));
8736 old_len = hdev->manufacturer_len;
8738 memcpy(old_data, hdev->manufacturer_data, old_len);
8740 hdev->manufacturer_len = cp->data[0] - 1;
8741 if (hdev->manufacturer_len > 0)
8742 memcpy(hdev->manufacturer_data, cp->data + 2,
8743 hdev->manufacturer_len);
8745 hci_update_eir_sync(hdev);
8747 err = hci_req_run(&req, set_manufacturer_data_complete);
8749 mgmt_pending_remove(cmd);
8754 hci_dev_unlock(hdev);
8759 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
8760 hdev->manufacturer_len = old_len;
8761 if (hdev->manufacturer_len > 0)
8762 memcpy(hdev->manufacturer_data, old_data,
8763 hdev->manufacturer_len);
8764 hci_dev_unlock(hdev);
8768 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
8769 void *data, u16 len)
8771 struct mgmt_cp_le_set_scan_params *cp = data;
8772 __u16 interval, window;
8775 BT_DBG("%s", hdev->name);
8777 if (!lmp_le_capable(hdev))
8778 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8779 MGMT_STATUS_NOT_SUPPORTED);
8781 interval = __le16_to_cpu(cp->interval);
8783 if (interval < 0x0004 || interval > 0x4000)
8784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8785 MGMT_STATUS_INVALID_PARAMS);
8787 window = __le16_to_cpu(cp->window);
8789 if (window < 0x0004 || window > 0x4000)
8790 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8791 MGMT_STATUS_INVALID_PARAMS);
8793 if (window > interval)
8794 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8795 MGMT_STATUS_INVALID_PARAMS);
8799 hdev->le_scan_type = cp->type;
8800 hdev->le_scan_interval = interval;
8801 hdev->le_scan_window = window;
8803 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
8806 /* If background scan is running, restart it so new parameters are
8809 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
8810 hdev->discovery.state == DISCOVERY_STOPPED) {
8811 struct hci_request req;
8813 hci_req_init(&req, hdev);
8815 hci_req_add_le_scan_disable(&req, false);
8816 hci_req_add_le_passive_scan(&req);
8818 hci_req_run(&req, NULL);
8821 hci_dev_unlock(hdev);
8826 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
8828 struct mgmt_ev_hardware_error ev;
8830 ev.error_code = err_code;
8831 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
8834 void mgmt_tx_timeout_error(struct hci_dev *hdev)
8836 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
8838 #endif /* TIZEN_BT */
8840 static bool ltk_is_valid(struct mgmt_ltk_info *key)
8842 if (key->initiator != 0x00 && key->initiator != 0x01)
8845 switch (key->addr.type) {
8846 case BDADDR_LE_PUBLIC:
8849 case BDADDR_LE_RANDOM:
8850 /* Two most significant bits shall be set */
8851 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
8859 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
8860 void *cp_data, u16 len)
8862 struct mgmt_cp_load_long_term_keys *cp = cp_data;
8863 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
8864 sizeof(struct mgmt_ltk_info));
8865 u16 key_count, expected_len;
8868 bt_dev_dbg(hdev, "sock %p", sk);
8870 if (!lmp_le_capable(hdev))
8871 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8872 MGMT_STATUS_NOT_SUPPORTED);
8874 key_count = __le16_to_cpu(cp->key_count);
8875 if (key_count > max_key_count) {
8876 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
8878 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8879 MGMT_STATUS_INVALID_PARAMS);
8882 expected_len = struct_size(cp, keys, key_count);
8883 if (expected_len != len) {
8884 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
8886 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8887 MGMT_STATUS_INVALID_PARAMS);
8890 bt_dev_dbg(hdev, "key_count %u", key_count);
8892 for (i = 0; i < key_count; i++) {
8893 struct mgmt_ltk_info *key = &cp->keys[i];
8895 if (!ltk_is_valid(key))
8896 return mgmt_cmd_status(sk, hdev->id,
8897 MGMT_OP_LOAD_LONG_TERM_KEYS,
8898 MGMT_STATUS_INVALID_PARAMS);
8903 hci_smp_ltks_clear(hdev);
8905 for (i = 0; i < key_count; i++) {
8906 struct mgmt_ltk_info *key = &cp->keys[i];
8907 u8 type, authenticated;
8909 if (hci_is_blocked_key(hdev,
8910 HCI_BLOCKED_KEY_TYPE_LTK,
8912 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
8917 switch (key->type) {
8918 case MGMT_LTK_UNAUTHENTICATED:
8919 authenticated = 0x00;
8920 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8922 case MGMT_LTK_AUTHENTICATED:
8923 authenticated = 0x01;
8924 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
8926 case MGMT_LTK_P256_UNAUTH:
8927 authenticated = 0x00;
8928 type = SMP_LTK_P256;
8930 case MGMT_LTK_P256_AUTH:
8931 authenticated = 0x01;
8932 type = SMP_LTK_P256;
8934 case MGMT_LTK_P256_DEBUG:
8935 authenticated = 0x00;
8936 type = SMP_LTK_P256_DEBUG;
8942 hci_add_ltk(hdev, &key->addr.bdaddr,
8943 le_addr_type(key->addr.type), type, authenticated,
8944 key->val, key->enc_size, key->ediv, key->rand);
8947 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
8950 hci_dev_unlock(hdev);
8955 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
8957 struct mgmt_pending_cmd *cmd = data;
8958 struct hci_conn *conn = cmd->user_data;
8959 struct mgmt_cp_get_conn_info *cp = cmd->param;
8960 struct mgmt_rp_get_conn_info rp;
8963 bt_dev_dbg(hdev, "err %d", err);
8965 memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
8967 status = mgmt_status(err);
8968 if (status == MGMT_STATUS_SUCCESS) {
8969 rp.rssi = conn->rssi;
8970 rp.tx_power = conn->tx_power;
8971 rp.max_tx_power = conn->max_tx_power;
8973 rp.rssi = HCI_RSSI_INVALID;
8974 rp.tx_power = HCI_TX_POWER_INVALID;
8975 rp.max_tx_power = HCI_TX_POWER_INVALID;
8978 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
8981 mgmt_pending_free(cmd);
8984 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
8986 struct mgmt_pending_cmd *cmd = data;
8987 struct mgmt_cp_get_conn_info *cp = cmd->param;
8988 struct hci_conn *conn;
8992 /* Make sure we are still connected */
8993 if (cp->addr.type == BDADDR_BREDR)
8994 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8997 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
8999 if (!conn || conn->state != BT_CONNECTED)
9000 return MGMT_STATUS_NOT_CONNECTED;
9002 cmd->user_data = conn;
9003 handle = cpu_to_le16(conn->handle);
9005 /* Refresh RSSI each time */
9006 err = hci_read_rssi_sync(hdev, handle);
9008 /* For LE links TX power does not change thus we don't need to
9009 * query for it once value is known.
9011 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
9012 conn->tx_power == HCI_TX_POWER_INVALID))
9013 err = hci_read_tx_power_sync(hdev, handle, 0x00);
9015 /* Max TX power needs to be read only once per connection */
9016 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
9017 err = hci_read_tx_power_sync(hdev, handle, 0x01);
9022 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
9025 struct mgmt_cp_get_conn_info *cp = data;
9026 struct mgmt_rp_get_conn_info rp;
9027 struct hci_conn *conn;
9028 unsigned long conn_info_age;
9031 bt_dev_dbg(hdev, "sock %p", sk);
9033 memset(&rp, 0, sizeof(rp));
9034 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9035 rp.addr.type = cp->addr.type;
9037 if (!bdaddr_type_is_valid(cp->addr.type))
9038 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9039 MGMT_STATUS_INVALID_PARAMS,
9044 if (!hdev_is_powered(hdev)) {
9045 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9046 MGMT_STATUS_NOT_POWERED, &rp,
9051 if (cp->addr.type == BDADDR_BREDR)
9052 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9055 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9057 if (!conn || conn->state != BT_CONNECTED) {
9058 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9059 MGMT_STATUS_NOT_CONNECTED, &rp,
9064 /* To avoid client trying to guess when to poll again for information we
9065 * calculate conn info age as random value between min/max set in hdev.
9067 conn_info_age = hdev->conn_info_min_age +
9068 prandom_u32_max(hdev->conn_info_max_age -
9069 hdev->conn_info_min_age);
9071 /* Query controller to refresh cached values if they are too old or were
9074 if (time_after(jiffies, conn->conn_info_timestamp +
9075 msecs_to_jiffies(conn_info_age)) ||
9076 !conn->conn_info_timestamp) {
9077 struct mgmt_pending_cmd *cmd;
9079 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
9084 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
9085 cmd, get_conn_info_complete);
9089 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9090 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9093 mgmt_pending_free(cmd);
9098 conn->conn_info_timestamp = jiffies;
9100 /* Cache is valid, just reply with values cached in hci_conn */
9101 rp.rssi = conn->rssi;
9102 rp.tx_power = conn->tx_power;
9103 rp.max_tx_power = conn->max_tx_power;
9105 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9106 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9110 hci_dev_unlock(hdev);
9114 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
9116 struct mgmt_pending_cmd *cmd = data;
9117 struct mgmt_cp_get_clock_info *cp = cmd->param;
9118 struct mgmt_rp_get_clock_info rp;
9119 struct hci_conn *conn = cmd->user_data;
9120 u8 status = mgmt_status(err);
9122 bt_dev_dbg(hdev, "err %d", err);
9124 memset(&rp, 0, sizeof(rp));
9125 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9126 rp.addr.type = cp->addr.type;
9131 rp.local_clock = cpu_to_le32(hdev->clock);
9134 rp.piconet_clock = cpu_to_le32(conn->clock);
9135 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
9139 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
9142 mgmt_pending_free(cmd);
9145 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
9147 struct mgmt_pending_cmd *cmd = data;
9148 struct mgmt_cp_get_clock_info *cp = cmd->param;
9149 struct hci_cp_read_clock hci_cp;
9150 struct hci_conn *conn;
9152 memset(&hci_cp, 0, sizeof(hci_cp));
9153 hci_read_clock_sync(hdev, &hci_cp);
9155 /* Make sure connection still exists */
9156 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
9157 if (!conn || conn->state != BT_CONNECTED)
9158 return MGMT_STATUS_NOT_CONNECTED;
9160 cmd->user_data = conn;
9161 hci_cp.handle = cpu_to_le16(conn->handle);
9162 hci_cp.which = 0x01; /* Piconet clock */
9164 return hci_read_clock_sync(hdev, &hci_cp);
9167 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
9170 struct mgmt_cp_get_clock_info *cp = data;
9171 struct mgmt_rp_get_clock_info rp;
9172 struct mgmt_pending_cmd *cmd;
9173 struct hci_conn *conn;
9176 bt_dev_dbg(hdev, "sock %p", sk);
9178 memset(&rp, 0, sizeof(rp));
9179 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9180 rp.addr.type = cp->addr.type;
9182 if (cp->addr.type != BDADDR_BREDR)
9183 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9184 MGMT_STATUS_INVALID_PARAMS,
9189 if (!hdev_is_powered(hdev)) {
9190 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9191 MGMT_STATUS_NOT_POWERED, &rp,
9196 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9197 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9199 if (!conn || conn->state != BT_CONNECTED) {
9200 err = mgmt_cmd_complete(sk, hdev->id,
9201 MGMT_OP_GET_CLOCK_INFO,
9202 MGMT_STATUS_NOT_CONNECTED,
9210 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
9214 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
9215 get_clock_info_complete);
9218 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9219 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9222 mgmt_pending_free(cmd);
9227 hci_dev_unlock(hdev);
9231 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
9233 struct hci_conn *conn;
9235 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
9239 if (conn->dst_type != type)
9242 if (conn->state != BT_CONNECTED)
9248 /* This function requires the caller holds hdev->lock */
9249 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
9250 u8 addr_type, u8 auto_connect)
9252 struct hci_conn_params *params;
9254 params = hci_conn_params_add(hdev, addr, addr_type);
9258 if (params->auto_connect == auto_connect)
9261 list_del_init(¶ms->action);
9263 switch (auto_connect) {
9264 case HCI_AUTO_CONN_DISABLED:
9265 case HCI_AUTO_CONN_LINK_LOSS:
9266 /* If auto connect is being disabled when we're trying to
9267 * connect to device, keep connecting.
9269 if (params->explicit_connect)
9270 list_add(¶ms->action, &hdev->pend_le_conns);
9272 case HCI_AUTO_CONN_REPORT:
9273 if (params->explicit_connect)
9274 list_add(¶ms->action, &hdev->pend_le_conns);
9276 list_add(¶ms->action, &hdev->pend_le_reports);
9278 case HCI_AUTO_CONN_DIRECT:
9279 case HCI_AUTO_CONN_ALWAYS:
9280 if (!is_connected(hdev, addr, addr_type))
9281 list_add(¶ms->action, &hdev->pend_le_conns);
9285 params->auto_connect = auto_connect;
9287 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
9288 addr, addr_type, auto_connect);
9293 static void device_added(struct sock *sk, struct hci_dev *hdev,
9294 bdaddr_t *bdaddr, u8 type, u8 action)
9296 struct mgmt_ev_device_added ev;
9298 bacpy(&ev.addr.bdaddr, bdaddr);
9299 ev.addr.type = type;
9302 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
9305 static int add_device_sync(struct hci_dev *hdev, void *data)
9307 return hci_update_passive_scan_sync(hdev);
9310 static int add_device(struct sock *sk, struct hci_dev *hdev,
9311 void *data, u16 len)
9313 struct mgmt_cp_add_device *cp = data;
9314 u8 auto_conn, addr_type;
9315 struct hci_conn_params *params;
9317 u32 current_flags = 0;
9318 u32 supported_flags;
9320 bt_dev_dbg(hdev, "sock %p", sk);
9322 if (!bdaddr_type_is_valid(cp->addr.type) ||
9323 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
9324 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9325 MGMT_STATUS_INVALID_PARAMS,
9326 &cp->addr, sizeof(cp->addr));
9328 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
9329 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9330 MGMT_STATUS_INVALID_PARAMS,
9331 &cp->addr, sizeof(cp->addr));
9335 if (cp->addr.type == BDADDR_BREDR) {
9336 /* Only incoming connections action is supported for now */
9337 if (cp->action != 0x01) {
9338 err = mgmt_cmd_complete(sk, hdev->id,
9340 MGMT_STATUS_INVALID_PARAMS,
9341 &cp->addr, sizeof(cp->addr));
9345 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
9351 hci_update_scan(hdev);
9356 addr_type = le_addr_type(cp->addr.type);
9358 if (cp->action == 0x02)
9359 auto_conn = HCI_AUTO_CONN_ALWAYS;
9360 else if (cp->action == 0x01)
9361 auto_conn = HCI_AUTO_CONN_DIRECT;
9363 auto_conn = HCI_AUTO_CONN_REPORT;
9365 /* Kernel internally uses conn_params with resolvable private
9366 * address, but Add Device allows only identity addresses.
9367 * Make sure it is enforced before calling
9368 * hci_conn_params_lookup.
9370 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9371 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9372 MGMT_STATUS_INVALID_PARAMS,
9373 &cp->addr, sizeof(cp->addr));
9377 /* If the connection parameters don't exist for this device,
9378 * they will be created and configured with defaults.
9380 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
9382 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9383 MGMT_STATUS_FAILED, &cp->addr,
9387 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9390 current_flags = params->flags;
9393 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
9398 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
9399 supported_flags = hdev->conn_flags;
9400 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
9401 supported_flags, current_flags);
9403 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9404 MGMT_STATUS_SUCCESS, &cp->addr,
9408 hci_dev_unlock(hdev);
9412 static void device_removed(struct sock *sk, struct hci_dev *hdev,
9413 bdaddr_t *bdaddr, u8 type)
9415 struct mgmt_ev_device_removed ev;
9417 bacpy(&ev.addr.bdaddr, bdaddr);
9418 ev.addr.type = type;
9420 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
9423 static int remove_device_sync(struct hci_dev *hdev, void *data)
9425 return hci_update_passive_scan_sync(hdev);
9428 static int remove_device(struct sock *sk, struct hci_dev *hdev,
9429 void *data, u16 len)
9431 struct mgmt_cp_remove_device *cp = data;
9434 bt_dev_dbg(hdev, "sock %p", sk);
9438 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9439 struct hci_conn_params *params;
9442 if (!bdaddr_type_is_valid(cp->addr.type)) {
9443 err = mgmt_cmd_complete(sk, hdev->id,
9444 MGMT_OP_REMOVE_DEVICE,
9445 MGMT_STATUS_INVALID_PARAMS,
9446 &cp->addr, sizeof(cp->addr));
9450 if (cp->addr.type == BDADDR_BREDR) {
9451 err = hci_bdaddr_list_del(&hdev->accept_list,
9455 err = mgmt_cmd_complete(sk, hdev->id,
9456 MGMT_OP_REMOVE_DEVICE,
9457 MGMT_STATUS_INVALID_PARAMS,
9463 hci_update_scan(hdev);
9465 device_removed(sk, hdev, &cp->addr.bdaddr,
9470 addr_type = le_addr_type(cp->addr.type);
9472 /* Kernel internally uses conn_params with resolvable private
9473 * address, but Remove Device allows only identity addresses.
9474 * Make sure it is enforced before calling
9475 * hci_conn_params_lookup.
9477 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9478 err = mgmt_cmd_complete(sk, hdev->id,
9479 MGMT_OP_REMOVE_DEVICE,
9480 MGMT_STATUS_INVALID_PARAMS,
9481 &cp->addr, sizeof(cp->addr));
9485 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9488 err = mgmt_cmd_complete(sk, hdev->id,
9489 MGMT_OP_REMOVE_DEVICE,
9490 MGMT_STATUS_INVALID_PARAMS,
9491 &cp->addr, sizeof(cp->addr));
9495 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
9496 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
9497 err = mgmt_cmd_complete(sk, hdev->id,
9498 MGMT_OP_REMOVE_DEVICE,
9499 MGMT_STATUS_INVALID_PARAMS,
9500 &cp->addr, sizeof(cp->addr));
9504 list_del(¶ms->action);
9505 list_del(¶ms->list);
9508 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
9510 struct hci_conn_params *p, *tmp;
9511 struct bdaddr_list *b, *btmp;
9513 if (cp->addr.type) {
9514 err = mgmt_cmd_complete(sk, hdev->id,
9515 MGMT_OP_REMOVE_DEVICE,
9516 MGMT_STATUS_INVALID_PARAMS,
9517 &cp->addr, sizeof(cp->addr));
9521 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
9522 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
9527 hci_update_scan(hdev);
9529 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
9530 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
9532 device_removed(sk, hdev, &p->addr, p->addr_type);
9533 if (p->explicit_connect) {
9534 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
9537 list_del(&p->action);
9542 bt_dev_dbg(hdev, "All LE connection parameters were removed");
9545 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
9548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
9549 MGMT_STATUS_SUCCESS, &cp->addr,
9552 hci_dev_unlock(hdev);
9556 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
9559 struct mgmt_cp_load_conn_param *cp = data;
9560 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
9561 sizeof(struct mgmt_conn_param));
9562 u16 param_count, expected_len;
9565 if (!lmp_le_capable(hdev))
9566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9567 MGMT_STATUS_NOT_SUPPORTED);
9569 param_count = __le16_to_cpu(cp->param_count);
9570 if (param_count > max_param_count) {
9571 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
9573 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9574 MGMT_STATUS_INVALID_PARAMS);
9577 expected_len = struct_size(cp, params, param_count);
9578 if (expected_len != len) {
9579 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
9581 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9582 MGMT_STATUS_INVALID_PARAMS);
9585 bt_dev_dbg(hdev, "param_count %u", param_count);
9589 hci_conn_params_clear_disabled(hdev);
9591 for (i = 0; i < param_count; i++) {
9592 struct mgmt_conn_param *param = &cp->params[i];
9593 struct hci_conn_params *hci_param;
9594 u16 min, max, latency, timeout;
9597 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
9600 if (param->addr.type == BDADDR_LE_PUBLIC) {
9601 addr_type = ADDR_LE_DEV_PUBLIC;
9602 } else if (param->addr.type == BDADDR_LE_RANDOM) {
9603 addr_type = ADDR_LE_DEV_RANDOM;
9605 bt_dev_err(hdev, "ignoring invalid connection parameters");
9609 min = le16_to_cpu(param->min_interval);
9610 max = le16_to_cpu(param->max_interval);
9611 latency = le16_to_cpu(param->latency);
9612 timeout = le16_to_cpu(param->timeout);
9614 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
9615 min, max, latency, timeout);
9617 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
9618 bt_dev_err(hdev, "ignoring invalid connection parameters");
9622 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
9625 bt_dev_err(hdev, "failed to add connection parameters");
9629 hci_param->conn_min_interval = min;
9630 hci_param->conn_max_interval = max;
9631 hci_param->conn_latency = latency;
9632 hci_param->supervision_timeout = timeout;
9635 hci_dev_unlock(hdev);
9637 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
9641 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
9642 void *data, u16 len)
9644 struct mgmt_cp_set_external_config *cp = data;
9648 bt_dev_dbg(hdev, "sock %p", sk);
9650 if (hdev_is_powered(hdev))
9651 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9652 MGMT_STATUS_REJECTED);
9654 if (cp->config != 0x00 && cp->config != 0x01)
9655 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9656 MGMT_STATUS_INVALID_PARAMS);
9658 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
9659 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9660 MGMT_STATUS_NOT_SUPPORTED);
9665 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
9667 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
9669 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
9676 err = new_options(hdev, sk);
9678 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
9679 mgmt_index_removed(hdev);
9681 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
9682 hci_dev_set_flag(hdev, HCI_CONFIG);
9683 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9685 queue_work(hdev->req_workqueue, &hdev->power_on);
9687 set_bit(HCI_RAW, &hdev->flags);
9688 mgmt_index_added(hdev);
9693 hci_dev_unlock(hdev);
9697 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
9698 void *data, u16 len)
9700 struct mgmt_cp_set_public_address *cp = data;
9704 bt_dev_dbg(hdev, "sock %p", sk);
9706 if (hdev_is_powered(hdev))
9707 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9708 MGMT_STATUS_REJECTED);
9710 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
9711 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9712 MGMT_STATUS_INVALID_PARAMS);
9714 if (!hdev->set_bdaddr)
9715 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9716 MGMT_STATUS_NOT_SUPPORTED);
9720 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
9721 bacpy(&hdev->public_addr, &cp->bdaddr);
9723 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
9730 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
9731 err = new_options(hdev, sk);
9733 if (is_configured(hdev)) {
9734 mgmt_index_removed(hdev);
9736 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
9738 hci_dev_set_flag(hdev, HCI_CONFIG);
9739 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9741 queue_work(hdev->req_workqueue, &hdev->power_on);
9745 hci_dev_unlock(hdev);
9750 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
9754 struct mgmt_ev_device_name_update *ev = (void *)buf;
9760 bacpy(&ev->addr.bdaddr, bdaddr);
9761 ev->addr.type = BDADDR_BREDR;
9763 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9766 ev->eir_len = cpu_to_le16(eir_len);
9768 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
9769 sizeof(*ev) + eir_len, NULL);
9772 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9773 u8 link_type, u8 addr_type, u8 status)
9775 struct mgmt_ev_conn_update_failed ev;
9777 bacpy(&ev.addr.bdaddr, bdaddr);
9778 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9781 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
9782 &ev, sizeof(ev), NULL);
9785 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
9786 u8 link_type, u8 addr_type, u16 conn_interval,
9787 u16 conn_latency, u16 supervision_timeout)
9789 struct mgmt_ev_conn_updated ev;
9791 bacpy(&ev.addr.bdaddr, bdaddr);
9792 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9793 ev.conn_interval = cpu_to_le16(conn_interval);
9794 ev.conn_latency = cpu_to_le16(conn_latency);
9795 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
9797 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
9798 &ev, sizeof(ev), NULL);
9802 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
9805 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
9806 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
9807 u8 *h192, *r192, *h256, *r256;
9808 struct mgmt_pending_cmd *cmd = data;
9809 struct sk_buff *skb = cmd->skb;
9810 u8 status = mgmt_status(err);
9813 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
9818 status = MGMT_STATUS_FAILED;
9819 else if (IS_ERR(skb))
9820 status = mgmt_status(PTR_ERR(skb));
9822 status = mgmt_status(skb->data[0]);
9825 bt_dev_dbg(hdev, "status %u", status);
9827 mgmt_cp = cmd->param;
9830 status = mgmt_status(status);
9837 } else if (!bredr_sc_enabled(hdev)) {
9838 struct hci_rp_read_local_oob_data *rp;
9840 if (skb->len != sizeof(*rp)) {
9841 status = MGMT_STATUS_FAILED;
9844 status = MGMT_STATUS_SUCCESS;
9845 rp = (void *)skb->data;
9847 eir_len = 5 + 18 + 18;
9854 struct hci_rp_read_local_oob_ext_data *rp;
9856 if (skb->len != sizeof(*rp)) {
9857 status = MGMT_STATUS_FAILED;
9860 status = MGMT_STATUS_SUCCESS;
9861 rp = (void *)skb->data;
9863 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
9864 eir_len = 5 + 18 + 18;
9868 eir_len = 5 + 18 + 18 + 18 + 18;
9878 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
9885 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
9886 hdev->dev_class, 3);
9889 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9890 EIR_SSP_HASH_C192, h192, 16);
9891 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9892 EIR_SSP_RAND_R192, r192, 16);
9896 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9897 EIR_SSP_HASH_C256, h256, 16);
9898 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
9899 EIR_SSP_RAND_R256, r256, 16);
9903 mgmt_rp->type = mgmt_cp->type;
9904 mgmt_rp->eir_len = cpu_to_le16(eir_len);
9906 err = mgmt_cmd_complete(cmd->sk, hdev->id,
9907 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
9908 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
9909 if (err < 0 || status)
9912 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
9914 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
9915 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
9916 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
9918 if (skb && !IS_ERR(skb))
9922 mgmt_pending_remove(cmd);
9925 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
9926 struct mgmt_cp_read_local_oob_ext_data *cp)
9928 struct mgmt_pending_cmd *cmd;
9931 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
9936 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
9937 read_local_oob_ext_data_complete);
9940 mgmt_pending_remove(cmd);
9947 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
9948 void *data, u16 data_len)
9950 struct mgmt_cp_read_local_oob_ext_data *cp = data;
9951 struct mgmt_rp_read_local_oob_ext_data *rp;
9954 u8 status, flags, role, addr[7], hash[16], rand[16];
9957 bt_dev_dbg(hdev, "sock %p", sk);
9959 if (hdev_is_powered(hdev)) {
9961 case BIT(BDADDR_BREDR):
9962 status = mgmt_bredr_support(hdev);
9968 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
9969 status = mgmt_le_support(hdev);
9973 eir_len = 9 + 3 + 18 + 18 + 3;
9976 status = MGMT_STATUS_INVALID_PARAMS;
9981 status = MGMT_STATUS_NOT_POWERED;
9985 rp_len = sizeof(*rp) + eir_len;
9986 rp = kmalloc(rp_len, GFP_ATOMIC);
9990 if (!status && !lmp_ssp_capable(hdev)) {
9991 status = MGMT_STATUS_NOT_SUPPORTED;
10001 switch (cp->type) {
10002 case BIT(BDADDR_BREDR):
10003 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
10004 err = read_local_ssp_oob_req(hdev, sk, cp);
10005 hci_dev_unlock(hdev);
10009 status = MGMT_STATUS_FAILED;
10012 eir_len = eir_append_data(rp->eir, eir_len,
10014 hdev->dev_class, 3);
10017 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10018 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
10019 smp_generate_oob(hdev, hash, rand) < 0) {
10020 hci_dev_unlock(hdev);
10021 status = MGMT_STATUS_FAILED;
10025 /* This should return the active RPA, but since the RPA
10026 * is only programmed on demand, it is really hard to fill
10027 * this in at the moment. For now disallow retrieving
10028 * local out-of-band data when privacy is in use.
10030 * Returning the identity address will not help here since
10031 * pairing happens before the identity resolving key is
10032 * known and thus the connection establishment happens
10033 * based on the RPA and not the identity address.
10035 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
10036 hci_dev_unlock(hdev);
10037 status = MGMT_STATUS_REJECTED;
10041 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
10042 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
10043 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
10044 bacmp(&hdev->static_addr, BDADDR_ANY))) {
10045 memcpy(addr, &hdev->static_addr, 6);
10048 memcpy(addr, &hdev->bdaddr, 6);
10052 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
10053 addr, sizeof(addr));
10055 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
10060 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
10061 &role, sizeof(role));
10063 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
10064 eir_len = eir_append_data(rp->eir, eir_len,
10066 hash, sizeof(hash));
10068 eir_len = eir_append_data(rp->eir, eir_len,
10070 rand, sizeof(rand));
10073 flags = mgmt_get_adv_discov_flags(hdev);
10075 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
10076 flags |= LE_AD_NO_BREDR;
10078 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
10079 &flags, sizeof(flags));
10083 hci_dev_unlock(hdev);
10085 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
10087 status = MGMT_STATUS_SUCCESS;
10090 rp->type = cp->type;
10091 rp->eir_len = cpu_to_le16(eir_len);
10093 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
10094 status, rp, sizeof(*rp) + eir_len);
10095 if (err < 0 || status)
10098 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10099 rp, sizeof(*rp) + eir_len,
10100 HCI_MGMT_OOB_DATA_EVENTS, sk);
10108 static u32 get_supported_adv_flags(struct hci_dev *hdev)
10112 flags |= MGMT_ADV_FLAG_CONNECTABLE;
10113 flags |= MGMT_ADV_FLAG_DISCOV;
10114 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
10115 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
10116 flags |= MGMT_ADV_FLAG_APPEARANCE;
10117 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
10118 flags |= MGMT_ADV_PARAM_DURATION;
10119 flags |= MGMT_ADV_PARAM_TIMEOUT;
10120 flags |= MGMT_ADV_PARAM_INTERVALS;
10121 flags |= MGMT_ADV_PARAM_TX_POWER;
10122 flags |= MGMT_ADV_PARAM_SCAN_RSP;
10124 /* In extended adv TX_POWER returned from Set Adv Param
10125 * will be always valid.
10127 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
10128 flags |= MGMT_ADV_FLAG_TX_POWER;
10130 if (ext_adv_capable(hdev)) {
10131 flags |= MGMT_ADV_FLAG_SEC_1M;
10132 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
10133 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
10135 if (hdev->le_features[1] & HCI_LE_PHY_2M)
10136 flags |= MGMT_ADV_FLAG_SEC_2M;
10138 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
10139 flags |= MGMT_ADV_FLAG_SEC_CODED;
10145 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
10146 void *data, u16 data_len)
10148 struct mgmt_rp_read_adv_features *rp;
10151 struct adv_info *adv_instance;
10152 u32 supported_flags;
10155 bt_dev_dbg(hdev, "sock %p", sk);
10157 if (!lmp_le_capable(hdev))
10158 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10159 MGMT_STATUS_REJECTED);
10161 hci_dev_lock(hdev);
10163 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
10164 rp = kmalloc(rp_len, GFP_ATOMIC);
10166 hci_dev_unlock(hdev);
10170 supported_flags = get_supported_adv_flags(hdev);
10172 rp->supported_flags = cpu_to_le32(supported_flags);
10173 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
10174 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
10175 rp->max_instances = hdev->le_num_of_adv_sets;
10176 rp->num_instances = hdev->adv_instance_cnt;
10178 instance = rp->instance;
10179 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
10180 /* Only instances 1-le_num_of_adv_sets are externally visible */
10181 if (adv_instance->instance <= hdev->adv_instance_cnt) {
10182 *instance = adv_instance->instance;
10185 rp->num_instances--;
10190 hci_dev_unlock(hdev);
10192 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10193 MGMT_STATUS_SUCCESS, rp, rp_len);
10200 static u8 calculate_name_len(struct hci_dev *hdev)
10202 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
10204 return eir_append_local_name(hdev, buf, 0);
10207 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
10210 u8 max_len = HCI_MAX_AD_LENGTH;
10213 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
10214 MGMT_ADV_FLAG_LIMITED_DISCOV |
10215 MGMT_ADV_FLAG_MANAGED_FLAGS))
10218 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
10221 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
10222 max_len -= calculate_name_len(hdev);
10224 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
10231 static bool flags_managed(u32 adv_flags)
10233 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
10234 MGMT_ADV_FLAG_LIMITED_DISCOV |
10235 MGMT_ADV_FLAG_MANAGED_FLAGS);
10238 static bool tx_power_managed(u32 adv_flags)
10240 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
10243 static bool name_managed(u32 adv_flags)
10245 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
10248 static bool appearance_managed(u32 adv_flags)
10250 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
10253 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
10254 u8 len, bool is_adv_data)
10259 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
10264 /* Make sure that the data is correctly formatted. */
10265 for (i = 0; i < len; i += (cur_len + 1)) {
10271 if (data[i + 1] == EIR_FLAGS &&
10272 (!is_adv_data || flags_managed(adv_flags)))
10275 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
10278 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
10281 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
10284 if (data[i + 1] == EIR_APPEARANCE &&
10285 appearance_managed(adv_flags))
10288 /* If the current field length would exceed the total data
10289 * length, then it's invalid.
10291 if (i + cur_len >= len)
10298 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
10300 u32 supported_flags, phy_flags;
10302 /* The current implementation only supports a subset of the specified
10303 * flags. Also need to check mutual exclusiveness of sec flags.
10305 supported_flags = get_supported_adv_flags(hdev);
10306 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
10307 if (adv_flags & ~supported_flags ||
10308 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
10314 static bool adv_busy(struct hci_dev *hdev)
10316 return pending_find(MGMT_OP_SET_LE, hdev);
10319 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
10322 struct adv_info *adv, *n;
10324 bt_dev_dbg(hdev, "err %d", err);
10326 hci_dev_lock(hdev);
10328 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
10335 adv->pending = false;
10339 instance = adv->instance;
10341 if (hdev->cur_adv_instance == instance)
10342 cancel_adv_timeout(hdev);
10344 hci_remove_adv_instance(hdev, instance);
10345 mgmt_advertising_removed(sk, hdev, instance);
10348 hci_dev_unlock(hdev);
10351 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
10353 struct mgmt_pending_cmd *cmd = data;
10354 struct mgmt_cp_add_advertising *cp = cmd->param;
10355 struct mgmt_rp_add_advertising rp;
10357 memset(&rp, 0, sizeof(rp));
10359 rp.instance = cp->instance;
10362 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10365 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10366 mgmt_status(err), &rp, sizeof(rp));
10368 add_adv_complete(hdev, cmd->sk, cp->instance, err);
10370 mgmt_pending_free(cmd);
10373 static int add_advertising_sync(struct hci_dev *hdev, void *data)
10375 struct mgmt_pending_cmd *cmd = data;
10376 struct mgmt_cp_add_advertising *cp = cmd->param;
10378 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
10381 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
10382 void *data, u16 data_len)
10384 struct mgmt_cp_add_advertising *cp = data;
10385 struct mgmt_rp_add_advertising rp;
10388 u16 timeout, duration;
10389 unsigned int prev_instance_cnt;
10390 u8 schedule_instance = 0;
10391 struct adv_info *adv, *next_instance;
10393 struct mgmt_pending_cmd *cmd;
10395 bt_dev_dbg(hdev, "sock %p", sk);
10397 status = mgmt_le_support(hdev);
10399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10402 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10404 MGMT_STATUS_INVALID_PARAMS);
10406 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
10407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10408 MGMT_STATUS_INVALID_PARAMS);
10410 flags = __le32_to_cpu(cp->flags);
10411 timeout = __le16_to_cpu(cp->timeout);
10412 duration = __le16_to_cpu(cp->duration);
10414 if (!requested_adv_flags_are_valid(hdev, flags))
10415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10416 MGMT_STATUS_INVALID_PARAMS);
10418 hci_dev_lock(hdev);
10420 if (timeout && !hdev_is_powered(hdev)) {
10421 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10422 MGMT_STATUS_REJECTED);
10426 if (adv_busy(hdev)) {
10427 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10432 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
10433 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
10434 cp->scan_rsp_len, false)) {
10435 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10436 MGMT_STATUS_INVALID_PARAMS);
10440 prev_instance_cnt = hdev->adv_instance_cnt;
10442 adv = hci_add_adv_instance(hdev, cp->instance, flags,
10443 cp->adv_data_len, cp->data,
10445 cp->data + cp->adv_data_len,
10447 HCI_ADV_TX_POWER_NO_PREFERENCE,
10448 hdev->le_adv_min_interval,
10449 hdev->le_adv_max_interval, 0);
10451 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10452 MGMT_STATUS_FAILED);
10456 /* Only trigger an advertising added event if a new instance was
10459 if (hdev->adv_instance_cnt > prev_instance_cnt)
10460 mgmt_advertising_added(sk, hdev, cp->instance);
10462 if (hdev->cur_adv_instance == cp->instance) {
10463 /* If the currently advertised instance is being changed then
10464 * cancel the current advertising and schedule the next
10465 * instance. If there is only one instance then the overridden
10466 * advertising data will be visible right away.
10468 cancel_adv_timeout(hdev);
10470 next_instance = hci_get_next_instance(hdev, cp->instance);
10472 schedule_instance = next_instance->instance;
10473 } else if (!hdev->adv_instance_timeout) {
10474 /* Immediately advertise the new instance if no other
10475 * instance is currently being advertised.
10477 schedule_instance = cp->instance;
10480 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
10481 * there is no instance to be advertised then we have no HCI
10482 * communication to make. Simply return.
10484 if (!hdev_is_powered(hdev) ||
10485 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10486 !schedule_instance) {
10487 rp.instance = cp->instance;
10488 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10489 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10493 /* We're good to go, update advertising data, parameters, and start
10496 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
10503 cp->instance = schedule_instance;
10505 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
10506 add_advertising_complete);
10508 mgmt_pending_free(cmd);
10511 hci_dev_unlock(hdev);
10516 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
10519 struct mgmt_pending_cmd *cmd = data;
10520 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
10521 struct mgmt_rp_add_ext_adv_params rp;
10522 struct adv_info *adv;
10525 BT_DBG("%s", hdev->name);
10527 hci_dev_lock(hdev);
10529 adv = hci_find_adv_instance(hdev, cp->instance);
10533 rp.instance = cp->instance;
10534 rp.tx_power = adv->tx_power;
10536 /* While we're at it, inform userspace of the available space for this
10537 * advertisement, given the flags that will be used.
10539 flags = __le32_to_cpu(cp->flags);
10540 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10541 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10544 /* If this advertisement was previously advertising and we
10545 * failed to update it, we signal that it has been removed and
10546 * delete its structure
10549 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
10551 hci_remove_adv_instance(hdev, cp->instance);
10553 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10556 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10557 mgmt_status(err), &rp, sizeof(rp));
10562 mgmt_pending_free(cmd);
10564 hci_dev_unlock(hdev);
10567 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
10569 struct mgmt_pending_cmd *cmd = data;
10570 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
10572 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
10575 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
10576 void *data, u16 data_len)
10578 struct mgmt_cp_add_ext_adv_params *cp = data;
10579 struct mgmt_rp_add_ext_adv_params rp;
10580 struct mgmt_pending_cmd *cmd = NULL;
10581 struct adv_info *adv;
10582 u32 flags, min_interval, max_interval;
10583 u16 timeout, duration;
10588 BT_DBG("%s", hdev->name);
10590 status = mgmt_le_support(hdev);
10592 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10595 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10596 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10597 MGMT_STATUS_INVALID_PARAMS);
10599 /* The purpose of breaking add_advertising into two separate MGMT calls
10600 * for params and data is to allow more parameters to be added to this
10601 * structure in the future. For this reason, we verify that we have the
10602 * bare minimum structure we know of when the interface was defined. Any
10603 * extra parameters we don't know about will be ignored in this request.
10605 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
10606 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10607 MGMT_STATUS_INVALID_PARAMS);
10609 flags = __le32_to_cpu(cp->flags);
10611 if (!requested_adv_flags_are_valid(hdev, flags))
10612 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10613 MGMT_STATUS_INVALID_PARAMS);
10615 hci_dev_lock(hdev);
10617 /* In new interface, we require that we are powered to register */
10618 if (!hdev_is_powered(hdev)) {
10619 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10620 MGMT_STATUS_REJECTED);
10624 if (adv_busy(hdev)) {
10625 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10630 /* Parse defined parameters from request, use defaults otherwise */
10631 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
10632 __le16_to_cpu(cp->timeout) : 0;
10634 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
10635 __le16_to_cpu(cp->duration) :
10636 hdev->def_multi_adv_rotation_duration;
10638 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10639 __le32_to_cpu(cp->min_interval) :
10640 hdev->le_adv_min_interval;
10642 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10643 __le32_to_cpu(cp->max_interval) :
10644 hdev->le_adv_max_interval;
10646 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
10648 HCI_ADV_TX_POWER_NO_PREFERENCE;
10650 /* Create advertising instance with no advertising or response data */
10651 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
10652 timeout, duration, tx_power, min_interval,
10656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10657 MGMT_STATUS_FAILED);
10661 /* Submit request for advertising params if ext adv available */
10662 if (ext_adv_capable(hdev)) {
10663 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
10667 hci_remove_adv_instance(hdev, cp->instance);
10671 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
10672 add_ext_adv_params_complete);
10674 mgmt_pending_free(cmd);
10676 rp.instance = cp->instance;
10677 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
10678 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10679 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10680 err = mgmt_cmd_complete(sk, hdev->id,
10681 MGMT_OP_ADD_EXT_ADV_PARAMS,
10682 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10686 hci_dev_unlock(hdev);
10691 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
10693 struct mgmt_pending_cmd *cmd = data;
10694 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
10695 struct mgmt_rp_add_advertising rp;
10697 add_adv_complete(hdev, cmd->sk, cp->instance, err);
10699 memset(&rp, 0, sizeof(rp));
10701 rp.instance = cp->instance;
10704 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10707 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10708 mgmt_status(err), &rp, sizeof(rp));
10710 mgmt_pending_free(cmd);
10713 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
10715 struct mgmt_pending_cmd *cmd = data;
10716 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
10719 if (ext_adv_capable(hdev)) {
10720 err = hci_update_adv_data_sync(hdev, cp->instance);
10724 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
10728 return hci_enable_ext_advertising_sync(hdev, cp->instance);
10731 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
10734 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
10737 struct mgmt_cp_add_ext_adv_data *cp = data;
10738 struct mgmt_rp_add_ext_adv_data rp;
10739 u8 schedule_instance = 0;
10740 struct adv_info *next_instance;
10741 struct adv_info *adv_instance;
10743 struct mgmt_pending_cmd *cmd;
10745 BT_DBG("%s", hdev->name);
10747 hci_dev_lock(hdev);
10749 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10751 if (!adv_instance) {
10752 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10753 MGMT_STATUS_INVALID_PARAMS);
10757 /* In new interface, we require that we are powered to register */
10758 if (!hdev_is_powered(hdev)) {
10759 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10760 MGMT_STATUS_REJECTED);
10761 goto clear_new_instance;
10764 if (adv_busy(hdev)) {
10765 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10767 goto clear_new_instance;
10770 /* Validate new data */
10771 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
10772 cp->adv_data_len, true) ||
10773 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
10774 cp->adv_data_len, cp->scan_rsp_len, false)) {
10775 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10776 MGMT_STATUS_INVALID_PARAMS);
10777 goto clear_new_instance;
10780 /* Set the data in the advertising instance */
10781 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
10782 cp->data, cp->scan_rsp_len,
10783 cp->data + cp->adv_data_len);
10785 /* If using software rotation, determine next instance to use */
10786 if (hdev->cur_adv_instance == cp->instance) {
10787 /* If the currently advertised instance is being changed
10788 * then cancel the current advertising and schedule the
10789 * next instance. If there is only one instance then the
10790 * overridden advertising data will be visible right
10793 cancel_adv_timeout(hdev);
10795 next_instance = hci_get_next_instance(hdev, cp->instance);
10797 schedule_instance = next_instance->instance;
10798 } else if (!hdev->adv_instance_timeout) {
10799 /* Immediately advertise the new instance if no other
10800 * instance is currently being advertised.
10802 schedule_instance = cp->instance;
10805 /* If the HCI_ADVERTISING flag is set or there is no instance to
10806 * be advertised then we have no HCI communication to make.
10809 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
10810 if (adv_instance->pending) {
10811 mgmt_advertising_added(sk, hdev, cp->instance);
10812 adv_instance->pending = false;
10814 rp.instance = cp->instance;
10815 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10816 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10820 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
10824 goto clear_new_instance;
10827 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
10828 add_ext_adv_data_complete);
10830 mgmt_pending_free(cmd);
10831 goto clear_new_instance;
10834 /* We were successful in updating data, so trigger advertising_added
10835 * event if this is an instance that wasn't previously advertising. If
10836 * a failure occurs in the requests we initiated, we will remove the
10837 * instance again in add_advertising_complete
10839 if (adv_instance->pending)
10840 mgmt_advertising_added(sk, hdev, cp->instance);
10844 clear_new_instance:
10845 hci_remove_adv_instance(hdev, cp->instance);
10848 hci_dev_unlock(hdev);
10853 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
10856 struct mgmt_pending_cmd *cmd = data;
10857 struct mgmt_cp_remove_advertising *cp = cmd->param;
10858 struct mgmt_rp_remove_advertising rp;
10860 bt_dev_dbg(hdev, "err %d", err);
10862 memset(&rp, 0, sizeof(rp));
10863 rp.instance = cp->instance;
10866 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10869 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10870 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10872 mgmt_pending_free(cmd);
10875 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
10877 struct mgmt_pending_cmd *cmd = data;
10878 struct mgmt_cp_remove_advertising *cp = cmd->param;
10881 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
10885 if (list_empty(&hdev->adv_instances))
10886 err = hci_disable_advertising_sync(hdev);
10891 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
10892 void *data, u16 data_len)
10894 struct mgmt_cp_remove_advertising *cp = data;
10895 struct mgmt_pending_cmd *cmd;
10898 bt_dev_dbg(hdev, "sock %p", sk);
10900 hci_dev_lock(hdev);
10902 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
10903 err = mgmt_cmd_status(sk, hdev->id,
10904 MGMT_OP_REMOVE_ADVERTISING,
10905 MGMT_STATUS_INVALID_PARAMS);
10909 if (pending_find(MGMT_OP_SET_LE, hdev)) {
10910 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10915 if (list_empty(&hdev->adv_instances)) {
10916 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
10917 MGMT_STATUS_INVALID_PARAMS);
10921 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
10928 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
10929 remove_advertising_complete);
10931 mgmt_pending_free(cmd);
10934 hci_dev_unlock(hdev);
10939 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
10940 void *data, u16 data_len)
10942 struct mgmt_cp_get_adv_size_info *cp = data;
10943 struct mgmt_rp_get_adv_size_info rp;
10944 u32 flags, supported_flags;
10946 bt_dev_dbg(hdev, "sock %p", sk);
10948 if (!lmp_le_capable(hdev))
10949 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10950 MGMT_STATUS_REJECTED);
10952 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10953 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10954 MGMT_STATUS_INVALID_PARAMS);
10956 flags = __le32_to_cpu(cp->flags);
10958 /* The current implementation only supports a subset of the specified
10961 supported_flags = get_supported_adv_flags(hdev);
10962 if (flags & ~supported_flags)
10963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10964 MGMT_STATUS_INVALID_PARAMS);
10966 rp.instance = cp->instance;
10967 rp.flags = cp->flags;
10968 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10969 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10971 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
10972 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10975 static const struct hci_mgmt_handler mgmt_handlers[] = {
10976 { NULL }, /* 0x0000 (no command) */
10977 { read_version, MGMT_READ_VERSION_SIZE,
10979 HCI_MGMT_UNTRUSTED },
10980 { read_commands, MGMT_READ_COMMANDS_SIZE,
10982 HCI_MGMT_UNTRUSTED },
10983 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
10985 HCI_MGMT_UNTRUSTED },
10986 { read_controller_info, MGMT_READ_INFO_SIZE,
10987 HCI_MGMT_UNTRUSTED },
10988 { set_powered, MGMT_SETTING_SIZE },
10989 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
10990 { set_connectable, MGMT_SETTING_SIZE },
10991 { set_fast_connectable, MGMT_SETTING_SIZE },
10992 { set_bondable, MGMT_SETTING_SIZE },
10993 { set_link_security, MGMT_SETTING_SIZE },
10994 { set_ssp, MGMT_SETTING_SIZE },
10995 { set_hs, MGMT_SETTING_SIZE },
10996 { set_le, MGMT_SETTING_SIZE },
10997 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
10998 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
10999 { add_uuid, MGMT_ADD_UUID_SIZE },
11000 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
11001 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
11002 HCI_MGMT_VAR_LEN },
11003 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
11004 HCI_MGMT_VAR_LEN },
11005 { disconnect, MGMT_DISCONNECT_SIZE },
11006 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
11007 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
11008 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
11009 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
11010 { pair_device, MGMT_PAIR_DEVICE_SIZE },
11011 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
11012 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
11013 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
11014 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
11015 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
11016 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
11017 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
11018 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
11019 HCI_MGMT_VAR_LEN },
11020 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
11021 { start_discovery, MGMT_START_DISCOVERY_SIZE },
11022 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
11023 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
11024 { block_device, MGMT_BLOCK_DEVICE_SIZE },
11025 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
11026 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
11027 { set_advertising, MGMT_SETTING_SIZE },
11028 { set_bredr, MGMT_SETTING_SIZE },
11029 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
11030 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
11031 { set_secure_conn, MGMT_SETTING_SIZE },
11032 { set_debug_keys, MGMT_SETTING_SIZE },
11033 { set_privacy, MGMT_SET_PRIVACY_SIZE },
11034 { load_irks, MGMT_LOAD_IRKS_SIZE,
11035 HCI_MGMT_VAR_LEN },
11036 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
11037 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
11038 { add_device, MGMT_ADD_DEVICE_SIZE },
11039 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
11040 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
11041 HCI_MGMT_VAR_LEN },
11042 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
11044 HCI_MGMT_UNTRUSTED },
11045 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
11046 HCI_MGMT_UNCONFIGURED |
11047 HCI_MGMT_UNTRUSTED },
11048 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
11049 HCI_MGMT_UNCONFIGURED },
11050 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
11051 HCI_MGMT_UNCONFIGURED },
11052 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
11053 HCI_MGMT_VAR_LEN },
11054 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
11055 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
11057 HCI_MGMT_UNTRUSTED },
11058 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
11059 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
11060 HCI_MGMT_VAR_LEN },
11061 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
11062 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
11063 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
11064 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
11065 HCI_MGMT_UNTRUSTED },
11066 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
11067 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
11068 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
11069 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
11070 HCI_MGMT_VAR_LEN },
11071 { set_wideband_speech, MGMT_SETTING_SIZE },
11072 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
11073 HCI_MGMT_UNTRUSTED },
11074 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
11075 HCI_MGMT_UNTRUSTED |
11076 HCI_MGMT_HDEV_OPTIONAL },
11077 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
11079 HCI_MGMT_HDEV_OPTIONAL },
11080 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
11081 HCI_MGMT_UNTRUSTED },
11082 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
11083 HCI_MGMT_VAR_LEN },
11084 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
11085 HCI_MGMT_UNTRUSTED },
11086 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
11087 HCI_MGMT_VAR_LEN },
11088 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
11089 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
11090 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
11091 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
11092 HCI_MGMT_VAR_LEN },
11093 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
11094 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
11095 HCI_MGMT_VAR_LEN },
11096 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
11097 HCI_MGMT_VAR_LEN },
11098 { add_adv_patterns_monitor_rssi,
11099 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
11100 HCI_MGMT_VAR_LEN },
11101 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
11102 HCI_MGMT_VAR_LEN },
11103 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
11104 { mesh_send, MGMT_MESH_SEND_SIZE,
11105 HCI_MGMT_VAR_LEN },
11106 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
11110 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
11111 { NULL }, /* 0x0000 (no command) */
11112 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
11113 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
11114 HCI_MGMT_VAR_LEN },
11115 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
11116 HCI_MGMT_VAR_LEN },
11117 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
11118 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
11119 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
11120 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
11121 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
11122 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
11123 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
11124 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
11125 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
11126 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
11127 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
11128 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
11132 void mgmt_index_added(struct hci_dev *hdev)
11134 struct mgmt_ev_ext_index ev;
11136 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11139 switch (hdev->dev_type) {
11141 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11142 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
11143 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11146 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
11147 HCI_MGMT_INDEX_EVENTS);
11158 ev.bus = hdev->bus;
11160 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
11161 HCI_MGMT_EXT_INDEX_EVENTS);
11164 void mgmt_index_removed(struct hci_dev *hdev)
11166 struct mgmt_ev_ext_index ev;
11167 u8 status = MGMT_STATUS_INVALID_INDEX;
11169 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11172 switch (hdev->dev_type) {
11174 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11176 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11177 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
11178 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11181 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
11182 HCI_MGMT_INDEX_EVENTS);
11193 ev.bus = hdev->bus;
11195 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
11196 HCI_MGMT_EXT_INDEX_EVENTS);
11198 /* Cancel any remaining timed work */
11199 if (!hci_dev_test_flag(hdev, HCI_MGMT))
11201 cancel_delayed_work_sync(&hdev->discov_off);
11202 cancel_delayed_work_sync(&hdev->service_cache);
11203 cancel_delayed_work_sync(&hdev->rpa_expired);
11206 void mgmt_power_on(struct hci_dev *hdev, int err)
11208 struct cmd_lookup match = { NULL, hdev };
11210 bt_dev_dbg(hdev, "err %d", err);
11212 hci_dev_lock(hdev);
11215 restart_le_actions(hdev);
11216 hci_update_passive_scan(hdev);
11219 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11221 new_settings(hdev, match.sk);
11224 sock_put(match.sk);
11226 hci_dev_unlock(hdev);
11229 void __mgmt_power_off(struct hci_dev *hdev)
11231 struct cmd_lookup match = { NULL, hdev };
11232 u8 status, zero_cod[] = { 0, 0, 0 };
11234 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11236 /* If the power off is because of hdev unregistration let
11237 * use the appropriate INVALID_INDEX status. Otherwise use
11238 * NOT_POWERED. We cover both scenarios here since later in
11239 * mgmt_index_removed() any hci_conn callbacks will have already
11240 * been triggered, potentially causing misleading DISCONNECTED
11241 * status responses.
11243 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
11244 status = MGMT_STATUS_INVALID_INDEX;
11246 status = MGMT_STATUS_NOT_POWERED;
11248 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11250 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
11251 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
11252 zero_cod, sizeof(zero_cod),
11253 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11254 ext_info_changed(hdev, NULL);
11257 new_settings(hdev, match.sk);
11260 sock_put(match.sk);
11263 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
11265 struct mgmt_pending_cmd *cmd;
11268 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11272 if (err == -ERFKILL)
11273 status = MGMT_STATUS_RFKILLED;
11275 status = MGMT_STATUS_FAILED;
11277 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
11279 mgmt_pending_remove(cmd);
11282 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
11285 struct mgmt_ev_new_link_key ev;
11287 memset(&ev, 0, sizeof(ev));
11289 ev.store_hint = persistent;
11290 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11291 ev.key.addr.type = BDADDR_BREDR;
11292 ev.key.type = key->type;
11293 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
11294 ev.key.pin_len = key->pin_len;
11296 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
11299 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
11301 switch (ltk->type) {
11303 case SMP_LTK_RESPONDER:
11304 if (ltk->authenticated)
11305 return MGMT_LTK_AUTHENTICATED;
11306 return MGMT_LTK_UNAUTHENTICATED;
11308 if (ltk->authenticated)
11309 return MGMT_LTK_P256_AUTH;
11310 return MGMT_LTK_P256_UNAUTH;
11311 case SMP_LTK_P256_DEBUG:
11312 return MGMT_LTK_P256_DEBUG;
11315 return MGMT_LTK_UNAUTHENTICATED;
11318 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
11320 struct mgmt_ev_new_long_term_key ev;
11322 memset(&ev, 0, sizeof(ev));
11324 /* Devices using resolvable or non-resolvable random addresses
11325 * without providing an identity resolving key don't require
11326 * to store long term keys. Their addresses will change the
11327 * next time around.
11329 * Only when a remote device provides an identity address
11330 * make sure the long term key is stored. If the remote
11331 * identity is known, the long term keys are internally
11332 * mapped to the identity address. So allow static random
11333 * and public addresses here.
11335 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11336 (key->bdaddr.b[5] & 0xc0) != 0xc0)
11337 ev.store_hint = 0x00;
11339 ev.store_hint = persistent;
11341 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11342 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
11343 ev.key.type = mgmt_ltk_type(key);
11344 ev.key.enc_size = key->enc_size;
11345 ev.key.ediv = key->ediv;
11346 ev.key.rand = key->rand;
11348 if (key->type == SMP_LTK)
11349 ev.key.initiator = 1;
11351 /* Make sure we copy only the significant bytes based on the
11352 * encryption key size, and set the rest of the value to zeroes.
11354 memcpy(ev.key.val, key->val, key->enc_size);
11355 memset(ev.key.val + key->enc_size, 0,
11356 sizeof(ev.key.val) - key->enc_size);
11358 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
11361 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
11363 struct mgmt_ev_new_irk ev;
11365 memset(&ev, 0, sizeof(ev));
11367 ev.store_hint = persistent;
11369 bacpy(&ev.rpa, &irk->rpa);
11370 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
11371 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
11372 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
11374 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
11377 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
11380 struct mgmt_ev_new_csrk ev;
11382 memset(&ev, 0, sizeof(ev));
11384 /* Devices using resolvable or non-resolvable random addresses
11385 * without providing an identity resolving key don't require
11386 * to store signature resolving keys. Their addresses will change
11387 * the next time around.
11389 * Only when a remote device provides an identity address
11390 * make sure the signature resolving key is stored. So allow
11391 * static random and public addresses here.
11393 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11394 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
11395 ev.store_hint = 0x00;
11397 ev.store_hint = persistent;
11399 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
11400 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
11401 ev.key.type = csrk->type;
11402 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
11404 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
11407 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
11408 u8 bdaddr_type, u8 store_hint, u16 min_interval,
11409 u16 max_interval, u16 latency, u16 timeout)
11411 struct mgmt_ev_new_conn_param ev;
11413 if (!hci_is_identity_address(bdaddr, bdaddr_type))
11416 memset(&ev, 0, sizeof(ev));
11417 bacpy(&ev.addr.bdaddr, bdaddr);
11418 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
11419 ev.store_hint = store_hint;
11420 ev.min_interval = cpu_to_le16(min_interval);
11421 ev.max_interval = cpu_to_le16(max_interval);
11422 ev.latency = cpu_to_le16(latency);
11423 ev.timeout = cpu_to_le16(timeout);
11425 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
11428 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
11429 u8 *name, u8 name_len)
11431 struct sk_buff *skb;
11432 struct mgmt_ev_device_connected *ev;
11436 /* allocate buff for LE or BR/EDR adv */
11437 if (conn->le_adv_data_len > 0)
11438 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
11439 sizeof(*ev) + conn->le_adv_data_len);
11441 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
11442 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
11443 eir_precalc_len(sizeof(conn->dev_class)));
11445 ev = skb_put(skb, sizeof(*ev));
11446 bacpy(&ev->addr.bdaddr, &conn->dst);
11447 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11450 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
11452 ev->flags = __cpu_to_le32(flags);
11454 /* We must ensure that the EIR Data fields are ordered and
11455 * unique. Keep it simple for now and avoid the problem by not
11456 * adding any BR/EDR data to the LE adv.
11458 if (conn->le_adv_data_len > 0) {
11459 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
11460 eir_len = conn->le_adv_data_len;
11463 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
11465 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
11466 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
11467 conn->dev_class, sizeof(conn->dev_class));
11470 ev->eir_len = cpu_to_le16(eir_len);
11472 mgmt_event_skb(skb, NULL);
11475 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
11477 struct sock **sk = data;
11479 cmd->cmd_complete(cmd, 0);
11484 mgmt_pending_remove(cmd);
11487 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
11489 struct hci_dev *hdev = data;
11490 struct mgmt_cp_unpair_device *cp = cmd->param;
11492 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
11494 cmd->cmd_complete(cmd, 0);
11495 mgmt_pending_remove(cmd);
11498 bool mgmt_powering_down(struct hci_dev *hdev)
11500 struct mgmt_pending_cmd *cmd;
11501 struct mgmt_mode *cp;
11503 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11514 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
11515 u8 link_type, u8 addr_type, u8 reason,
11516 bool mgmt_connected)
11518 struct mgmt_ev_device_disconnected ev;
11519 struct sock *sk = NULL;
11521 /* The connection is still in hci_conn_hash so test for 1
11522 * instead of 0 to know if this is the last one.
11524 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11525 cancel_delayed_work(&hdev->power_off);
11526 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11529 if (!mgmt_connected)
11532 if (link_type != ACL_LINK && link_type != LE_LINK)
11535 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
11537 bacpy(&ev.addr.bdaddr, bdaddr);
11538 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11539 ev.reason = reason;
11541 /* Report disconnects due to suspend */
11542 if (hdev->suspended)
11543 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
11545 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
11550 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11554 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
11555 u8 link_type, u8 addr_type, u8 status)
11557 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
11558 struct mgmt_cp_disconnect *cp;
11559 struct mgmt_pending_cmd *cmd;
11561 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11564 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
11570 if (bacmp(bdaddr, &cp->addr.bdaddr))
11573 if (cp->addr.type != bdaddr_type)
11576 cmd->cmd_complete(cmd, mgmt_status(status));
11577 mgmt_pending_remove(cmd);
11580 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11581 u8 addr_type, u8 status)
11583 struct mgmt_ev_connect_failed ev;
11585 /* The connection is still in hci_conn_hash so test for 1
11586 * instead of 0 to know if this is the last one.
11588 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11589 cancel_delayed_work(&hdev->power_off);
11590 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11593 bacpy(&ev.addr.bdaddr, bdaddr);
11594 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11595 ev.status = mgmt_status(status);
11597 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
11600 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
11602 struct mgmt_ev_pin_code_request ev;
11604 bacpy(&ev.addr.bdaddr, bdaddr);
11605 ev.addr.type = BDADDR_BREDR;
11606 ev.secure = secure;
11608 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
11611 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11614 struct mgmt_pending_cmd *cmd;
11616 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
11620 cmd->cmd_complete(cmd, mgmt_status(status));
11621 mgmt_pending_remove(cmd);
11624 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11627 struct mgmt_pending_cmd *cmd;
11629 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
11633 cmd->cmd_complete(cmd, mgmt_status(status));
11634 mgmt_pending_remove(cmd);
11637 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11638 u8 link_type, u8 addr_type, u32 value,
11641 struct mgmt_ev_user_confirm_request ev;
11643 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11645 bacpy(&ev.addr.bdaddr, bdaddr);
11646 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11647 ev.confirm_hint = confirm_hint;
11648 ev.value = cpu_to_le32(value);
11650 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
11654 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11655 u8 link_type, u8 addr_type)
11657 struct mgmt_ev_user_passkey_request ev;
11659 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11661 bacpy(&ev.addr.bdaddr, bdaddr);
11662 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11664 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
11668 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11669 u8 link_type, u8 addr_type, u8 status,
11672 struct mgmt_pending_cmd *cmd;
11674 cmd = pending_find(opcode, hdev);
11678 cmd->cmd_complete(cmd, mgmt_status(status));
11679 mgmt_pending_remove(cmd);
11684 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11685 u8 link_type, u8 addr_type, u8 status)
11687 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11688 status, MGMT_OP_USER_CONFIRM_REPLY);
11691 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11692 u8 link_type, u8 addr_type, u8 status)
11694 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11696 MGMT_OP_USER_CONFIRM_NEG_REPLY);
11699 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11700 u8 link_type, u8 addr_type, u8 status)
11702 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11703 status, MGMT_OP_USER_PASSKEY_REPLY);
11706 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11707 u8 link_type, u8 addr_type, u8 status)
11709 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11711 MGMT_OP_USER_PASSKEY_NEG_REPLY);
11714 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
11715 u8 link_type, u8 addr_type, u32 passkey,
11718 struct mgmt_ev_passkey_notify ev;
11720 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11722 bacpy(&ev.addr.bdaddr, bdaddr);
11723 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11724 ev.passkey = __cpu_to_le32(passkey);
11725 ev.entered = entered;
11727 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
11730 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
11732 struct mgmt_ev_auth_failed ev;
11733 struct mgmt_pending_cmd *cmd;
11734 u8 status = mgmt_status(hci_status);
11736 bacpy(&ev.addr.bdaddr, &conn->dst);
11737 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11738 ev.status = status;
11740 cmd = find_pairing(conn);
11742 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
11743 cmd ? cmd->sk : NULL);
11746 cmd->cmd_complete(cmd, status);
11747 mgmt_pending_remove(cmd);
11751 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
11753 struct cmd_lookup match = { NULL, hdev };
11757 u8 mgmt_err = mgmt_status(status);
11758 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
11759 cmd_status_rsp, &mgmt_err);
11763 if (test_bit(HCI_AUTH, &hdev->flags))
11764 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
11766 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
11768 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
11772 new_settings(hdev, match.sk);
11775 sock_put(match.sk);
11778 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
11780 struct cmd_lookup *match = data;
11782 if (match->sk == NULL) {
11783 match->sk = cmd->sk;
11784 sock_hold(match->sk);
11788 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
11791 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
11793 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
11794 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
11795 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
11798 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
11799 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11800 ext_info_changed(hdev, NULL);
11804 sock_put(match.sk);
11807 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
11809 struct mgmt_cp_set_local_name ev;
11810 struct mgmt_pending_cmd *cmd;
11815 memset(&ev, 0, sizeof(ev));
11816 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
11817 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
11819 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
11821 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
11823 /* If this is a HCI command related to powering on the
11824 * HCI dev don't send any mgmt signals.
11826 if (pending_find(MGMT_OP_SET_POWERED, hdev))
11830 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
11831 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
11832 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
11835 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
11839 for (i = 0; i < uuid_count; i++) {
11840 if (!memcmp(uuid, uuids[i], 16))
11847 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
11851 while (parsed < eir_len) {
11852 u8 field_len = eir[0];
11856 if (field_len == 0)
11859 if (eir_len - parsed < field_len + 1)
11863 case EIR_UUID16_ALL:
11864 case EIR_UUID16_SOME:
11865 for (i = 0; i + 3 <= field_len; i += 2) {
11866 memcpy(uuid, bluetooth_base_uuid, 16);
11867 uuid[13] = eir[i + 3];
11868 uuid[12] = eir[i + 2];
11869 if (has_uuid(uuid, uuid_count, uuids))
11873 case EIR_UUID32_ALL:
11874 case EIR_UUID32_SOME:
11875 for (i = 0; i + 5 <= field_len; i += 4) {
11876 memcpy(uuid, bluetooth_base_uuid, 16);
11877 uuid[15] = eir[i + 5];
11878 uuid[14] = eir[i + 4];
11879 uuid[13] = eir[i + 3];
11880 uuid[12] = eir[i + 2];
11881 if (has_uuid(uuid, uuid_count, uuids))
11885 case EIR_UUID128_ALL:
11886 case EIR_UUID128_SOME:
11887 for (i = 0; i + 17 <= field_len; i += 16) {
11888 memcpy(uuid, eir + i + 2, 16);
11889 if (has_uuid(uuid, uuid_count, uuids))
11895 parsed += field_len + 1;
11896 eir += field_len + 1;
11902 static void restart_le_scan(struct hci_dev *hdev)
11904 /* If controller is not scanning we are done. */
11905 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
11908 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
11909 hdev->discovery.scan_start +
11910 hdev->discovery.scan_duration))
11913 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
11914 DISCOV_LE_RESTART_DELAY);
11917 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
11918 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
11920 /* If a RSSI threshold has been specified, and
11921 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
11922 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
11923 * is set, let it through for further processing, as we might need to
11924 * restart the scan.
11926 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
11927 * the results are also dropped.
11929 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11930 (rssi == HCI_RSSI_INVALID ||
11931 (rssi < hdev->discovery.rssi &&
11932 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
11935 if (hdev->discovery.uuid_count != 0) {
11936 /* If a list of UUIDs is provided in filter, results with no
11937 * matching UUID should be dropped.
11939 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
11940 hdev->discovery.uuids) &&
11941 !eir_has_uuids(scan_rsp, scan_rsp_len,
11942 hdev->discovery.uuid_count,
11943 hdev->discovery.uuids))
11947 /* If duplicate filtering does not report RSSI changes, then restart
11948 * scanning to ensure updated result with updated RSSI values.
11950 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
11951 restart_le_scan(hdev);
11953 /* Validate RSSI value against the RSSI threshold once more. */
11954 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
11955 rssi < hdev->discovery.rssi)
11962 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
11963 bdaddr_t *bdaddr, u8 addr_type)
11965 struct mgmt_ev_adv_monitor_device_lost ev;
11967 ev.monitor_handle = cpu_to_le16(handle);
11968 bacpy(&ev.addr.bdaddr, bdaddr);
11969 ev.addr.type = addr_type;
11971 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
11975 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
11976 struct sk_buff *skb,
11977 struct sock *skip_sk,
11980 struct sk_buff *advmon_skb;
11981 size_t advmon_skb_len;
11982 __le16 *monitor_handle;
11987 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
11988 sizeof(struct mgmt_ev_device_found)) + skb->len;
11989 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
11994 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
11995 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
11996 * store monitor_handle of the matched monitor.
11998 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
11999 *monitor_handle = cpu_to_le16(handle);
12000 skb_put_data(advmon_skb, skb->data, skb->len);
12002 mgmt_event_skb(advmon_skb, skip_sk);
12005 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
12006 bdaddr_t *bdaddr, bool report_device,
12007 struct sk_buff *skb,
12008 struct sock *skip_sk)
12010 struct monitored_device *dev, *tmp;
12011 bool matched = false;
12012 bool notified = false;
12014 /* We have received the Advertisement Report because:
12015 * 1. the kernel has initiated active discovery
12016 * 2. if not, we have pend_le_reports > 0 in which case we are doing
12018 * 3. if none of the above is true, we have one or more active
12019 * Advertisement Monitor
12021 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
12022 * and report ONLY one advertisement per device for the matched Monitor
12023 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12025 * For case 3, since we are not active scanning and all advertisements
12026 * received are due to a matched Advertisement Monitor, report all
12027 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12029 if (report_device && !hdev->advmon_pend_notify) {
12030 mgmt_event_skb(skb, skip_sk);
12034 hdev->advmon_pend_notify = false;
12036 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
12037 if (!bacmp(&dev->bdaddr, bdaddr)) {
12040 if (!dev->notified) {
12041 mgmt_send_adv_monitor_device_found(hdev, skb,
12045 dev->notified = true;
12049 if (!dev->notified)
12050 hdev->advmon_pend_notify = true;
12053 if (!report_device &&
12054 ((matched && !notified) || !msft_monitor_supported(hdev))) {
12055 /* Handle 0 indicates that we are not active scanning and this
12056 * is a subsequent advertisement report for an already matched
12057 * Advertisement Monitor or the controller offloading support
12058 * is not available.
12060 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
12064 mgmt_event_skb(skb, skip_sk);
12069 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
12070 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
12071 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12074 struct sk_buff *skb;
12075 struct mgmt_ev_mesh_device_found *ev;
12078 if (!hdev->mesh_ad_types[0])
12081 /* Scan for requested AD types */
12083 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
12084 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12085 if (!hdev->mesh_ad_types[j])
12088 if (hdev->mesh_ad_types[j] == eir[i + 1])
12094 if (scan_rsp_len > 0) {
12095 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
12096 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12097 if (!hdev->mesh_ad_types[j])
12100 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
12109 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
12110 sizeof(*ev) + eir_len + scan_rsp_len);
12114 ev = skb_put(skb, sizeof(*ev));
12116 bacpy(&ev->addr.bdaddr, bdaddr);
12117 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
12119 ev->flags = cpu_to_le32(flags);
12120 ev->instant = cpu_to_le64(instant);
12123 /* Copy EIR or advertising data into event */
12124 skb_put_data(skb, eir, eir_len);
12126 if (scan_rsp_len > 0)
12127 /* Append scan response data to event */
12128 skb_put_data(skb, scan_rsp, scan_rsp_len);
12130 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12132 mgmt_event_skb(skb, NULL);
12135 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12136 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
12137 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12140 struct sk_buff *skb;
12141 struct mgmt_ev_device_found *ev;
12142 bool report_device = hci_discovery_active(hdev);
12144 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
12145 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
12146 eir, eir_len, scan_rsp, scan_rsp_len,
12149 /* Don't send events for a non-kernel initiated discovery. With
12150 * LE one exception is if we have pend_le_reports > 0 in which
12151 * case we're doing passive scanning and want these events.
12153 if (!hci_discovery_active(hdev)) {
12154 if (link_type == ACL_LINK)
12156 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
12157 report_device = true;
12158 else if (!hci_is_adv_monitoring(hdev))
12162 if (hdev->discovery.result_filtering) {
12163 /* We are using service discovery */
12164 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
12169 if (hdev->discovery.limited) {
12170 /* Check for limited discoverable bit */
12172 if (!(dev_class[1] & 0x20))
12175 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
12176 if (!flags || !(flags[0] & LE_AD_LIMITED))
12181 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
12182 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12183 sizeof(*ev) + eir_len + scan_rsp_len + 5);
12187 ev = skb_put(skb, sizeof(*ev));
12189 /* In case of device discovery with BR/EDR devices (pre 1.2), the
12190 * RSSI value was reported as 0 when not available. This behavior
12191 * is kept when using device discovery. This is required for full
12192 * backwards compatibility with the API.
12194 * However when using service discovery, the value 127 will be
12195 * returned when the RSSI is not available.
12197 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
12198 link_type == ACL_LINK)
12201 bacpy(&ev->addr.bdaddr, bdaddr);
12202 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12204 ev->flags = cpu_to_le32(flags);
12207 /* Copy EIR or advertising data into event */
12208 skb_put_data(skb, eir, eir_len);
12210 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
12213 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
12215 skb_put_data(skb, eir_cod, sizeof(eir_cod));
12218 if (scan_rsp_len > 0)
12219 /* Append scan response data to event */
12220 skb_put_data(skb, scan_rsp, scan_rsp_len);
12222 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12224 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
12227 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12228 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
12230 struct sk_buff *skb;
12231 struct mgmt_ev_device_found *ev;
12235 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12236 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
12238 ev = skb_put(skb, sizeof(*ev));
12239 bacpy(&ev->addr.bdaddr, bdaddr);
12240 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12244 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
12246 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
12248 ev->eir_len = cpu_to_le16(eir_len);
12249 ev->flags = cpu_to_le32(flags);
12251 mgmt_event_skb(skb, NULL);
12254 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
12256 struct mgmt_ev_discovering ev;
12258 bt_dev_dbg(hdev, "discovering %u", discovering);
12260 memset(&ev, 0, sizeof(ev));
12261 ev.type = hdev->discovery.type;
12262 ev.discovering = discovering;
12264 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
12267 void mgmt_suspending(struct hci_dev *hdev, u8 state)
12269 struct mgmt_ev_controller_suspend ev;
12271 ev.suspend_state = state;
12272 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
12275 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
12278 struct mgmt_ev_controller_resume ev;
12280 ev.wake_reason = reason;
12282 bacpy(&ev.addr.bdaddr, bdaddr);
12283 ev.addr.type = addr_type;
12285 memset(&ev.addr, 0, sizeof(ev.addr));
12288 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
12291 static struct hci_mgmt_chan chan = {
12292 .channel = HCI_CHANNEL_CONTROL,
12293 .handler_count = ARRAY_SIZE(mgmt_handlers),
12294 .handlers = mgmt_handlers,
12296 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
12297 .tizen_handlers = tizen_mgmt_handlers,
12299 .hdev_init = mgmt_init_hdev,
12302 int mgmt_init(void)
12304 return hci_mgmt_chan_register(&chan);
12307 void mgmt_exit(void)
12309 hci_mgmt_chan_unregister(&chan);
12312 void mgmt_cleanup(struct sock *sk)
12314 struct mgmt_mesh_tx *mesh_tx;
12315 struct hci_dev *hdev;
12317 read_lock(&hci_dev_list_lock);
12319 list_for_each_entry(hdev, &hci_dev_list, list) {
12321 mesh_tx = mgmt_mesh_next(hdev, sk);
12324 mesh_send_complete(hdev, mesh_tx, true);
12328 read_unlock(&hci_dev_list_lock);