2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
39 #include "hci_request.h"
41 #include "mgmt_util.h"
42 #include "mgmt_config.h"
45 #define MGMT_VERSION 1
46 #define MGMT_REVISION 18
48 static const u16 mgmt_commands[] = {
49 MGMT_OP_READ_INDEX_LIST,
52 MGMT_OP_SET_DISCOVERABLE,
53 MGMT_OP_SET_CONNECTABLE,
54 MGMT_OP_SET_FAST_CONNECTABLE,
56 MGMT_OP_SET_LINK_SECURITY,
60 MGMT_OP_SET_DEV_CLASS,
61 MGMT_OP_SET_LOCAL_NAME,
64 MGMT_OP_LOAD_LINK_KEYS,
65 MGMT_OP_LOAD_LONG_TERM_KEYS,
67 MGMT_OP_GET_CONNECTIONS,
68 MGMT_OP_PIN_CODE_REPLY,
69 MGMT_OP_PIN_CODE_NEG_REPLY,
70 MGMT_OP_SET_IO_CAPABILITY,
72 MGMT_OP_CANCEL_PAIR_DEVICE,
73 MGMT_OP_UNPAIR_DEVICE,
74 MGMT_OP_USER_CONFIRM_REPLY,
75 MGMT_OP_USER_CONFIRM_NEG_REPLY,
76 MGMT_OP_USER_PASSKEY_REPLY,
77 MGMT_OP_USER_PASSKEY_NEG_REPLY,
78 MGMT_OP_READ_LOCAL_OOB_DATA,
79 MGMT_OP_ADD_REMOTE_OOB_DATA,
80 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
81 MGMT_OP_START_DISCOVERY,
82 MGMT_OP_STOP_DISCOVERY,
85 MGMT_OP_UNBLOCK_DEVICE,
86 MGMT_OP_SET_DEVICE_ID,
87 MGMT_OP_SET_ADVERTISING,
89 MGMT_OP_SET_STATIC_ADDRESS,
90 MGMT_OP_SET_SCAN_PARAMS,
91 MGMT_OP_SET_SECURE_CONN,
92 MGMT_OP_SET_DEBUG_KEYS,
95 MGMT_OP_GET_CONN_INFO,
96 MGMT_OP_GET_CLOCK_INFO,
98 MGMT_OP_REMOVE_DEVICE,
99 MGMT_OP_LOAD_CONN_PARAM,
100 MGMT_OP_READ_UNCONF_INDEX_LIST,
101 MGMT_OP_READ_CONFIG_INFO,
102 MGMT_OP_SET_EXTERNAL_CONFIG,
103 MGMT_OP_SET_PUBLIC_ADDRESS,
104 MGMT_OP_START_SERVICE_DISCOVERY,
105 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
106 MGMT_OP_READ_EXT_INDEX_LIST,
107 MGMT_OP_READ_ADV_FEATURES,
108 MGMT_OP_ADD_ADVERTISING,
109 MGMT_OP_REMOVE_ADVERTISING,
110 MGMT_OP_GET_ADV_SIZE_INFO,
111 MGMT_OP_START_LIMITED_DISCOVERY,
112 MGMT_OP_READ_EXT_INFO,
113 MGMT_OP_SET_APPEARANCE,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_SECURITY_INFO,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
130 static const u16 mgmt_events[] = {
131 MGMT_EV_CONTROLLER_ERROR,
133 MGMT_EV_INDEX_REMOVED,
134 MGMT_EV_NEW_SETTINGS,
135 MGMT_EV_CLASS_OF_DEV_CHANGED,
136 MGMT_EV_LOCAL_NAME_CHANGED,
137 MGMT_EV_NEW_LINK_KEY,
138 MGMT_EV_NEW_LONG_TERM_KEY,
139 MGMT_EV_DEVICE_CONNECTED,
140 MGMT_EV_DEVICE_DISCONNECTED,
141 MGMT_EV_CONNECT_FAILED,
142 MGMT_EV_PIN_CODE_REQUEST,
143 MGMT_EV_USER_CONFIRM_REQUEST,
144 MGMT_EV_USER_PASSKEY_REQUEST,
146 MGMT_EV_DEVICE_FOUND,
148 MGMT_EV_DEVICE_BLOCKED,
149 MGMT_EV_DEVICE_UNBLOCKED,
150 MGMT_EV_DEVICE_UNPAIRED,
151 MGMT_EV_PASSKEY_NOTIFY,
154 MGMT_EV_DEVICE_ADDED,
155 MGMT_EV_DEVICE_REMOVED,
156 MGMT_EV_NEW_CONN_PARAM,
157 MGMT_EV_UNCONF_INDEX_ADDED,
158 MGMT_EV_UNCONF_INDEX_REMOVED,
159 MGMT_EV_NEW_CONFIG_OPTIONS,
160 MGMT_EV_EXT_INDEX_ADDED,
161 MGMT_EV_EXT_INDEX_REMOVED,
162 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
163 MGMT_EV_ADVERTISING_ADDED,
164 MGMT_EV_ADVERTISING_REMOVED,
165 MGMT_EV_EXT_INFO_CHANGED,
166 MGMT_EV_PHY_CONFIGURATION_CHANGED,
167 MGMT_EV_EXP_FEATURE_CHANGED,
168 MGMT_EV_DEVICE_FLAGS_CHANGED,
169 MGMT_EV_CONTROLLER_SUSPEND,
170 MGMT_EV_CONTROLLER_RESUME,
173 static const u16 mgmt_untrusted_commands[] = {
174 MGMT_OP_READ_INDEX_LIST,
176 MGMT_OP_READ_UNCONF_INDEX_LIST,
177 MGMT_OP_READ_CONFIG_INFO,
178 MGMT_OP_READ_EXT_INDEX_LIST,
179 MGMT_OP_READ_EXT_INFO,
180 MGMT_OP_READ_SECURITY_INFO,
181 MGMT_OP_READ_EXP_FEATURES_INFO,
182 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
183 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
186 static const u16 mgmt_untrusted_events[] = {
188 MGMT_EV_INDEX_REMOVED,
189 MGMT_EV_NEW_SETTINGS,
190 MGMT_EV_CLASS_OF_DEV_CHANGED,
191 MGMT_EV_LOCAL_NAME_CHANGED,
192 MGMT_EV_UNCONF_INDEX_ADDED,
193 MGMT_EV_UNCONF_INDEX_REMOVED,
194 MGMT_EV_NEW_CONFIG_OPTIONS,
195 MGMT_EV_EXT_INDEX_ADDED,
196 MGMT_EV_EXT_INDEX_REMOVED,
197 MGMT_EV_EXT_INFO_CHANGED,
198 MGMT_EV_EXP_FEATURE_CHANGED,
199 MGMT_EV_ADV_MONITOR_ADDED,
200 MGMT_EV_ADV_MONITOR_REMOVED,
203 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
205 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
206 "\x00\x00\x00\x00\x00\x00\x00\x00"
208 /* HCI to MGMT error code conversion table */
209 static const u8 mgmt_status_table[] = {
211 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
212 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
213 MGMT_STATUS_FAILED, /* Hardware Failure */
214 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
215 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
216 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
217 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
218 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
219 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
220 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
221 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
222 MGMT_STATUS_BUSY, /* Command Disallowed */
223 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
224 MGMT_STATUS_REJECTED, /* Rejected Security */
225 MGMT_STATUS_REJECTED, /* Rejected Personal */
226 MGMT_STATUS_TIMEOUT, /* Host Timeout */
227 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
228 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
229 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
230 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
231 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
232 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
233 MGMT_STATUS_BUSY, /* Repeated Attempts */
234 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
235 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
236 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
237 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
238 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
239 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
240 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
241 MGMT_STATUS_FAILED, /* Unspecified Error */
242 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
243 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
244 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
245 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
246 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
247 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
248 MGMT_STATUS_FAILED, /* Unit Link Key Used */
249 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
250 MGMT_STATUS_TIMEOUT, /* Instant Passed */
251 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
252 MGMT_STATUS_FAILED, /* Transaction Collision */
253 MGMT_STATUS_FAILED, /* Reserved for future use */
254 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
255 MGMT_STATUS_REJECTED, /* QoS Rejected */
256 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
257 MGMT_STATUS_REJECTED, /* Insufficient Security */
258 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
259 MGMT_STATUS_FAILED, /* Reserved for future use */
260 MGMT_STATUS_BUSY, /* Role Switch Pending */
261 MGMT_STATUS_FAILED, /* Reserved for future use */
262 MGMT_STATUS_FAILED, /* Slot Violation */
263 MGMT_STATUS_FAILED, /* Role Switch Failed */
264 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
265 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
266 MGMT_STATUS_BUSY, /* Host Busy Pairing */
267 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
268 MGMT_STATUS_BUSY, /* Controller Busy */
269 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
270 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
271 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
272 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
273 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
276 static u8 mgmt_status(u8 hci_status)
278 if (hci_status < ARRAY_SIZE(mgmt_status_table))
279 return mgmt_status_table[hci_status];
281 return MGMT_STATUS_FAILED;
284 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
287 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
291 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
292 u16 len, int flag, struct sock *skip_sk)
294 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
298 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
299 struct sock *skip_sk)
301 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
302 HCI_SOCK_TRUSTED, skip_sk);
305 static u8 le_addr_type(u8 mgmt_addr_type)
307 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
308 return ADDR_LE_DEV_PUBLIC;
310 return ADDR_LE_DEV_RANDOM;
313 void mgmt_fill_version_info(void *ver)
315 struct mgmt_rp_read_version *rp = ver;
317 rp->version = MGMT_VERSION;
318 rp->revision = cpu_to_le16(MGMT_REVISION);
321 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
324 struct mgmt_rp_read_version rp;
326 bt_dev_dbg(hdev, "sock %p", sk);
328 mgmt_fill_version_info(&rp);
330 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
334 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
337 struct mgmt_rp_read_commands *rp;
338 u16 num_commands, num_events;
342 bt_dev_dbg(hdev, "sock %p", sk);
344 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
345 num_commands = ARRAY_SIZE(mgmt_commands);
346 num_events = ARRAY_SIZE(mgmt_events);
348 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
349 num_events = ARRAY_SIZE(mgmt_untrusted_events);
352 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
354 rp = kmalloc(rp_size, GFP_KERNEL);
358 rp->num_commands = cpu_to_le16(num_commands);
359 rp->num_events = cpu_to_le16(num_events);
361 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
362 __le16 *opcode = rp->opcodes;
364 for (i = 0; i < num_commands; i++, opcode++)
365 put_unaligned_le16(mgmt_commands[i], opcode);
367 for (i = 0; i < num_events; i++, opcode++)
368 put_unaligned_le16(mgmt_events[i], opcode);
370 __le16 *opcode = rp->opcodes;
372 for (i = 0; i < num_commands; i++, opcode++)
373 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
375 for (i = 0; i < num_events; i++, opcode++)
376 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
379 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
386 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
389 struct mgmt_rp_read_index_list *rp;
395 bt_dev_dbg(hdev, "sock %p", sk);
397 read_lock(&hci_dev_list_lock);
400 list_for_each_entry(d, &hci_dev_list, list) {
401 if (d->dev_type == HCI_PRIMARY &&
402 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
406 rp_len = sizeof(*rp) + (2 * count);
407 rp = kmalloc(rp_len, GFP_ATOMIC);
409 read_unlock(&hci_dev_list_lock);
414 list_for_each_entry(d, &hci_dev_list, list) {
415 if (hci_dev_test_flag(d, HCI_SETUP) ||
416 hci_dev_test_flag(d, HCI_CONFIG) ||
417 hci_dev_test_flag(d, HCI_USER_CHANNEL))
420 /* Devices marked as raw-only are neither configured
421 * nor unconfigured controllers.
423 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
426 if (d->dev_type == HCI_PRIMARY &&
427 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
428 rp->index[count++] = cpu_to_le16(d->id);
429 bt_dev_dbg(hdev, "Added hci%u", d->id);
433 rp->num_controllers = cpu_to_le16(count);
434 rp_len = sizeof(*rp) + (2 * count);
436 read_unlock(&hci_dev_list_lock);
438 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
446 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
447 void *data, u16 data_len)
449 struct mgmt_rp_read_unconf_index_list *rp;
455 bt_dev_dbg(hdev, "sock %p", sk);
457 read_lock(&hci_dev_list_lock);
460 list_for_each_entry(d, &hci_dev_list, list) {
461 if (d->dev_type == HCI_PRIMARY &&
462 hci_dev_test_flag(d, HCI_UNCONFIGURED))
466 rp_len = sizeof(*rp) + (2 * count);
467 rp = kmalloc(rp_len, GFP_ATOMIC);
469 read_unlock(&hci_dev_list_lock);
474 list_for_each_entry(d, &hci_dev_list, list) {
475 if (hci_dev_test_flag(d, HCI_SETUP) ||
476 hci_dev_test_flag(d, HCI_CONFIG) ||
477 hci_dev_test_flag(d, HCI_USER_CHANNEL))
480 /* Devices marked as raw-only are neither configured
481 * nor unconfigured controllers.
483 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
486 if (d->dev_type == HCI_PRIMARY &&
487 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
488 rp->index[count++] = cpu_to_le16(d->id);
489 bt_dev_dbg(hdev, "Added hci%u", d->id);
493 rp->num_controllers = cpu_to_le16(count);
494 rp_len = sizeof(*rp) + (2 * count);
496 read_unlock(&hci_dev_list_lock);
498 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
499 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
506 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
507 void *data, u16 data_len)
509 struct mgmt_rp_read_ext_index_list *rp;
514 bt_dev_dbg(hdev, "sock %p", sk);
516 read_lock(&hci_dev_list_lock);
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
524 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
526 read_unlock(&hci_dev_list_lock);
531 list_for_each_entry(d, &hci_dev_list, list) {
532 if (hci_dev_test_flag(d, HCI_SETUP) ||
533 hci_dev_test_flag(d, HCI_CONFIG) ||
534 hci_dev_test_flag(d, HCI_USER_CHANNEL))
537 /* Devices marked as raw-only are neither configured
538 * nor unconfigured controllers.
540 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
543 if (d->dev_type == HCI_PRIMARY) {
544 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
545 rp->entry[count].type = 0x01;
547 rp->entry[count].type = 0x00;
548 } else if (d->dev_type == HCI_AMP) {
549 rp->entry[count].type = 0x02;
554 rp->entry[count].bus = d->bus;
555 rp->entry[count++].index = cpu_to_le16(d->id);
556 bt_dev_dbg(hdev, "Added hci%u", d->id);
559 rp->num_controllers = cpu_to_le16(count);
561 read_unlock(&hci_dev_list_lock);
563 /* If this command is called at least once, then all the
564 * default index and unconfigured index events are disabled
565 * and from now on only extended index events are used.
567 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
568 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
569 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
571 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
572 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
573 struct_size(rp, entry, count));
580 static bool is_configured(struct hci_dev *hdev)
582 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
583 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
586 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
587 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
588 !bacmp(&hdev->public_addr, BDADDR_ANY))
594 static __le32 get_missing_options(struct hci_dev *hdev)
598 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
599 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
600 options |= MGMT_OPTION_EXTERNAL_CONFIG;
602 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
603 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
604 !bacmp(&hdev->public_addr, BDADDR_ANY))
605 options |= MGMT_OPTION_PUBLIC_ADDRESS;
607 return cpu_to_le32(options);
610 static int new_options(struct hci_dev *hdev, struct sock *skip)
612 __le32 options = get_missing_options(hdev);
614 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
615 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
618 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
620 __le32 options = get_missing_options(hdev);
622 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
626 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
627 void *data, u16 data_len)
629 struct mgmt_rp_read_config_info rp;
632 bt_dev_dbg(hdev, "sock %p", sk);
636 memset(&rp, 0, sizeof(rp));
637 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
639 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
640 options |= MGMT_OPTION_EXTERNAL_CONFIG;
642 if (hdev->set_bdaddr)
643 options |= MGMT_OPTION_PUBLIC_ADDRESS;
645 rp.supported_options = cpu_to_le32(options);
646 rp.missing_options = get_missing_options(hdev);
648 hci_dev_unlock(hdev);
650 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
654 static u32 get_supported_phys(struct hci_dev *hdev)
656 u32 supported_phys = 0;
658 if (lmp_bredr_capable(hdev)) {
659 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
661 if (hdev->features[0][0] & LMP_3SLOT)
662 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
664 if (hdev->features[0][0] & LMP_5SLOT)
665 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
667 if (lmp_edr_2m_capable(hdev)) {
668 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
670 if (lmp_edr_3slot_capable(hdev))
671 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
673 if (lmp_edr_5slot_capable(hdev))
674 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
676 if (lmp_edr_3m_capable(hdev)) {
677 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
679 if (lmp_edr_3slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
682 if (lmp_edr_5slot_capable(hdev))
683 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
688 if (lmp_le_capable(hdev)) {
689 supported_phys |= MGMT_PHY_LE_1M_TX;
690 supported_phys |= MGMT_PHY_LE_1M_RX;
692 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
693 supported_phys |= MGMT_PHY_LE_2M_TX;
694 supported_phys |= MGMT_PHY_LE_2M_RX;
697 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
698 supported_phys |= MGMT_PHY_LE_CODED_TX;
699 supported_phys |= MGMT_PHY_LE_CODED_RX;
703 return supported_phys;
706 static u32 get_selected_phys(struct hci_dev *hdev)
708 u32 selected_phys = 0;
710 if (lmp_bredr_capable(hdev)) {
711 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
713 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
714 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
716 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
717 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
719 if (lmp_edr_2m_capable(hdev)) {
720 if (!(hdev->pkt_type & HCI_2DH1))
721 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
723 if (lmp_edr_3slot_capable(hdev) &&
724 !(hdev->pkt_type & HCI_2DH3))
725 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
727 if (lmp_edr_5slot_capable(hdev) &&
728 !(hdev->pkt_type & HCI_2DH5))
729 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
731 if (lmp_edr_3m_capable(hdev)) {
732 if (!(hdev->pkt_type & HCI_3DH1))
733 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
735 if (lmp_edr_3slot_capable(hdev) &&
736 !(hdev->pkt_type & HCI_3DH3))
737 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
739 if (lmp_edr_5slot_capable(hdev) &&
740 !(hdev->pkt_type & HCI_3DH5))
741 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
746 if (lmp_le_capable(hdev)) {
747 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
748 selected_phys |= MGMT_PHY_LE_1M_TX;
750 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
751 selected_phys |= MGMT_PHY_LE_1M_RX;
753 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
754 selected_phys |= MGMT_PHY_LE_2M_TX;
756 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
757 selected_phys |= MGMT_PHY_LE_2M_RX;
759 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
760 selected_phys |= MGMT_PHY_LE_CODED_TX;
762 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
763 selected_phys |= MGMT_PHY_LE_CODED_RX;
766 return selected_phys;
769 static u32 get_configurable_phys(struct hci_dev *hdev)
771 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
772 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
775 static u32 get_supported_settings(struct hci_dev *hdev)
779 settings |= MGMT_SETTING_POWERED;
780 settings |= MGMT_SETTING_BONDABLE;
781 settings |= MGMT_SETTING_DEBUG_KEYS;
782 settings |= MGMT_SETTING_CONNECTABLE;
783 settings |= MGMT_SETTING_DISCOVERABLE;
785 if (lmp_bredr_capable(hdev)) {
786 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
787 settings |= MGMT_SETTING_FAST_CONNECTABLE;
788 settings |= MGMT_SETTING_BREDR;
789 settings |= MGMT_SETTING_LINK_SECURITY;
791 if (lmp_ssp_capable(hdev)) {
792 settings |= MGMT_SETTING_SSP;
793 if (IS_ENABLED(CONFIG_BT_HS))
794 settings |= MGMT_SETTING_HS;
797 if (lmp_sc_capable(hdev))
798 settings |= MGMT_SETTING_SECURE_CONN;
800 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
802 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
805 if (lmp_le_capable(hdev)) {
806 settings |= MGMT_SETTING_LE;
807 settings |= MGMT_SETTING_SECURE_CONN;
808 settings |= MGMT_SETTING_PRIVACY;
809 settings |= MGMT_SETTING_STATIC_ADDRESS;
811 /* When the experimental feature for LL Privacy support is
812 * enabled, then advertising is no longer supported.
814 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
815 settings |= MGMT_SETTING_ADVERTISING;
818 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
820 settings |= MGMT_SETTING_CONFIGURATION;
822 settings |= MGMT_SETTING_PHY_CONFIGURATION;
827 static u32 get_current_settings(struct hci_dev *hdev)
831 if (hdev_is_powered(hdev))
832 settings |= MGMT_SETTING_POWERED;
834 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
835 settings |= MGMT_SETTING_CONNECTABLE;
837 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
838 settings |= MGMT_SETTING_FAST_CONNECTABLE;
840 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
841 settings |= MGMT_SETTING_DISCOVERABLE;
843 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
844 settings |= MGMT_SETTING_BONDABLE;
846 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
847 settings |= MGMT_SETTING_BREDR;
849 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
850 settings |= MGMT_SETTING_LE;
852 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
853 settings |= MGMT_SETTING_LINK_SECURITY;
855 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
856 settings |= MGMT_SETTING_SSP;
858 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
859 settings |= MGMT_SETTING_HS;
861 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
862 settings |= MGMT_SETTING_ADVERTISING;
864 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
865 settings |= MGMT_SETTING_SECURE_CONN;
867 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
868 settings |= MGMT_SETTING_DEBUG_KEYS;
870 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
871 settings |= MGMT_SETTING_PRIVACY;
873 /* The current setting for static address has two purposes. The
874 * first is to indicate if the static address will be used and
875 * the second is to indicate if it is actually set.
877 * This means if the static address is not configured, this flag
878 * will never be set. If the address is configured, then if the
879 * address is actually used decides if the flag is set or not.
881 * For single mode LE only controllers and dual-mode controllers
882 * with BR/EDR disabled, the existence of the static address will
885 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
886 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
887 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
888 if (bacmp(&hdev->static_addr, BDADDR_ANY))
889 settings |= MGMT_SETTING_STATIC_ADDRESS;
892 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
893 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
898 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
900 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
903 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
904 struct hci_dev *hdev,
907 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
910 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
912 struct mgmt_pending_cmd *cmd;
914 /* If there's a pending mgmt command the flags will not yet have
915 * their final values, so check for this first.
917 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
919 struct mgmt_mode *cp = cmd->param;
921 return LE_AD_GENERAL;
922 else if (cp->val == 0x02)
923 return LE_AD_LIMITED;
925 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
926 return LE_AD_LIMITED;
927 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
928 return LE_AD_GENERAL;
934 bool mgmt_get_connectable(struct hci_dev *hdev)
936 struct mgmt_pending_cmd *cmd;
938 /* If there's a pending mgmt command the flag will not yet have
939 * it's final value, so check for this first.
941 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
943 struct mgmt_mode *cp = cmd->param;
948 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
951 static void service_cache_off(struct work_struct *work)
953 struct hci_dev *hdev = container_of(work, struct hci_dev,
955 struct hci_request req;
957 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
960 hci_req_init(&req, hdev);
964 __hci_req_update_eir(&req);
965 __hci_req_update_class(&req);
967 hci_dev_unlock(hdev);
969 hci_req_run(&req, NULL);
972 static void rpa_expired(struct work_struct *work)
974 struct hci_dev *hdev = container_of(work, struct hci_dev,
976 struct hci_request req;
978 bt_dev_dbg(hdev, "");
980 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
982 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
985 /* The generation of a new RPA and programming it into the
986 * controller happens in the hci_req_enable_advertising()
989 hci_req_init(&req, hdev);
990 if (ext_adv_capable(hdev))
991 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
993 __hci_req_enable_advertising(&req);
994 hci_req_run(&req, NULL);
997 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
999 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1002 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1003 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1005 /* Non-mgmt controlled devices get this bit set
1006 * implicitly so that pairing works for them, however
1007 * for mgmt we require user-space to explicitly enable
1010 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1013 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1014 void *data, u16 data_len)
1016 struct mgmt_rp_read_info rp;
1018 bt_dev_dbg(hdev, "sock %p", sk);
1022 memset(&rp, 0, sizeof(rp));
1024 bacpy(&rp.bdaddr, &hdev->bdaddr);
1026 rp.version = hdev->hci_ver;
1027 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1029 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1030 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1032 memcpy(rp.dev_class, hdev->dev_class, 3);
1034 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1035 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1037 hci_dev_unlock(hdev);
1039 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1043 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1048 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1049 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1050 hdev->dev_class, 3);
1052 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1053 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1056 name_len = strlen(hdev->dev_name);
1057 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1058 hdev->dev_name, name_len);
1060 name_len = strlen(hdev->short_name);
1061 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1062 hdev->short_name, name_len);
1067 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1068 void *data, u16 data_len)
1071 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1074 bt_dev_dbg(hdev, "sock %p", sk);
1076 memset(&buf, 0, sizeof(buf));
1080 bacpy(&rp->bdaddr, &hdev->bdaddr);
1082 rp->version = hdev->hci_ver;
1083 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1085 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1086 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1089 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1090 rp->eir_len = cpu_to_le16(eir_len);
1092 hci_dev_unlock(hdev);
1094 /* If this command is called at least once, then the events
1095 * for class of device and local name changes are disabled
1096 * and only the new extended controller information event
1099 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1100 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1101 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1103 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1104 sizeof(*rp) + eir_len);
1107 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1110 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1113 memset(buf, 0, sizeof(buf));
1115 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1116 ev->eir_len = cpu_to_le16(eir_len);
1118 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1119 sizeof(*ev) + eir_len,
1120 HCI_MGMT_EXT_INFO_EVENTS, skip);
1123 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1125 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1127 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1131 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1133 bt_dev_dbg(hdev, "status 0x%02x", status);
1135 if (hci_conn_count(hdev) == 0) {
1136 cancel_delayed_work(&hdev->power_off);
1137 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1141 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1143 struct mgmt_ev_advertising_added ev;
1145 ev.instance = instance;
1147 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1150 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1153 struct mgmt_ev_advertising_removed ev;
1155 ev.instance = instance;
1157 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1160 static void cancel_adv_timeout(struct hci_dev *hdev)
1162 if (hdev->adv_instance_timeout) {
1163 hdev->adv_instance_timeout = 0;
1164 cancel_delayed_work(&hdev->adv_instance_expire);
1168 static int clean_up_hci_state(struct hci_dev *hdev)
1170 struct hci_request req;
1171 struct hci_conn *conn;
1172 bool discov_stopped;
1175 hci_req_init(&req, hdev);
1177 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1178 test_bit(HCI_PSCAN, &hdev->flags)) {
1180 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1183 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1185 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1186 __hci_req_disable_advertising(&req);
1188 discov_stopped = hci_req_stop_discovery(&req);
1190 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1191 /* 0x15 == Terminated due to Power Off */
1192 __hci_abort_conn(&req, conn, 0x15);
1195 err = hci_req_run(&req, clean_up_hci_complete);
1196 if (!err && discov_stopped)
1197 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1202 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1205 struct mgmt_mode *cp = data;
1206 struct mgmt_pending_cmd *cmd;
1209 bt_dev_dbg(hdev, "sock %p", sk);
1211 if (cp->val != 0x00 && cp->val != 0x01)
1212 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1213 MGMT_STATUS_INVALID_PARAMS);
1217 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1218 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1223 if (!!cp->val == hdev_is_powered(hdev)) {
1224 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1228 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1235 queue_work(hdev->req_workqueue, &hdev->power_on);
1238 /* Disconnect connections, stop scans, etc */
1239 err = clean_up_hci_state(hdev);
1241 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1242 HCI_POWER_OFF_TIMEOUT);
1244 /* ENODATA means there were no HCI commands queued */
1245 if (err == -ENODATA) {
1246 cancel_delayed_work(&hdev->power_off);
1247 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1253 hci_dev_unlock(hdev);
1257 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1259 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1261 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1262 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1265 int mgmt_new_settings(struct hci_dev *hdev)
1267 return new_settings(hdev, NULL);
1272 struct hci_dev *hdev;
1276 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1278 struct cmd_lookup *match = data;
1280 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1282 list_del(&cmd->list);
1284 if (match->sk == NULL) {
1285 match->sk = cmd->sk;
1286 sock_hold(match->sk);
1289 mgmt_pending_free(cmd);
1292 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1296 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1297 mgmt_pending_remove(cmd);
1300 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1302 if (cmd->cmd_complete) {
1305 cmd->cmd_complete(cmd, *status);
1306 mgmt_pending_remove(cmd);
1311 cmd_status_rsp(cmd, data);
1314 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1316 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1317 cmd->param, cmd->param_len);
1320 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1322 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1323 cmd->param, sizeof(struct mgmt_addr_info));
1326 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1328 if (!lmp_bredr_capable(hdev))
1329 return MGMT_STATUS_NOT_SUPPORTED;
1330 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1331 return MGMT_STATUS_REJECTED;
1333 return MGMT_STATUS_SUCCESS;
1336 static u8 mgmt_le_support(struct hci_dev *hdev)
1338 if (!lmp_le_capable(hdev))
1339 return MGMT_STATUS_NOT_SUPPORTED;
1340 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1341 return MGMT_STATUS_REJECTED;
1343 return MGMT_STATUS_SUCCESS;
1346 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1348 struct mgmt_pending_cmd *cmd;
1350 bt_dev_dbg(hdev, "status 0x%02x", status);
1354 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1359 u8 mgmt_err = mgmt_status(status);
1360 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1361 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1365 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1366 hdev->discov_timeout > 0) {
1367 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1368 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1371 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1372 new_settings(hdev, cmd->sk);
1375 mgmt_pending_remove(cmd);
1378 hci_dev_unlock(hdev);
1381 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1384 struct mgmt_cp_set_discoverable *cp = data;
1385 struct mgmt_pending_cmd *cmd;
1389 bt_dev_dbg(hdev, "sock %p", sk);
1391 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1392 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1393 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1394 MGMT_STATUS_REJECTED);
1396 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1397 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1398 MGMT_STATUS_INVALID_PARAMS);
1400 timeout = __le16_to_cpu(cp->timeout);
1402 /* Disabling discoverable requires that no timeout is set,
1403 * and enabling limited discoverable requires a timeout.
1405 if ((cp->val == 0x00 && timeout > 0) ||
1406 (cp->val == 0x02 && timeout == 0))
1407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1408 MGMT_STATUS_INVALID_PARAMS);
1412 if (!hdev_is_powered(hdev) && timeout > 0) {
1413 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1414 MGMT_STATUS_NOT_POWERED);
1418 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1419 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1420 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1425 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1427 MGMT_STATUS_REJECTED);
1431 if (hdev->advertising_paused) {
1432 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1437 if (!hdev_is_powered(hdev)) {
1438 bool changed = false;
1440 /* Setting limited discoverable when powered off is
1441 * not a valid operation since it requires a timeout
1442 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1444 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1445 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1449 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1454 err = new_settings(hdev, sk);
1459 /* If the current mode is the same, then just update the timeout
1460 * value with the new value. And if only the timeout gets updated,
1461 * then no need for any HCI transactions.
1463 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1464 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1465 HCI_LIMITED_DISCOVERABLE)) {
1466 cancel_delayed_work(&hdev->discov_off);
1467 hdev->discov_timeout = timeout;
1469 if (cp->val && hdev->discov_timeout > 0) {
1470 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1471 queue_delayed_work(hdev->req_workqueue,
1472 &hdev->discov_off, to);
1475 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1479 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1485 /* Cancel any potential discoverable timeout that might be
1486 * still active and store new timeout value. The arming of
1487 * the timeout happens in the complete handler.
1489 cancel_delayed_work(&hdev->discov_off);
1490 hdev->discov_timeout = timeout;
1493 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1495 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1497 /* Limited discoverable mode */
1498 if (cp->val == 0x02)
1499 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1501 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1503 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1507 hci_dev_unlock(hdev);
1511 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1513 struct mgmt_pending_cmd *cmd;
1515 bt_dev_dbg(hdev, "status 0x%02x", status);
1519 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1524 u8 mgmt_err = mgmt_status(status);
1525 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1529 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1530 new_settings(hdev, cmd->sk);
1533 mgmt_pending_remove(cmd);
1536 hci_dev_unlock(hdev);
1539 static int set_connectable_update_settings(struct hci_dev *hdev,
1540 struct sock *sk, u8 val)
1542 bool changed = false;
1545 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1549 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1551 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1552 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1555 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1560 hci_req_update_scan(hdev);
1561 hci_update_background_scan(hdev);
1562 return new_settings(hdev, sk);
1568 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1571 struct mgmt_mode *cp = data;
1572 struct mgmt_pending_cmd *cmd;
1575 bt_dev_dbg(hdev, "sock %p", sk);
1577 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1578 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1579 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1580 MGMT_STATUS_REJECTED);
1582 if (cp->val != 0x00 && cp->val != 0x01)
1583 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1584 MGMT_STATUS_INVALID_PARAMS);
1588 if (!hdev_is_powered(hdev)) {
1589 err = set_connectable_update_settings(hdev, sk, cp->val);
1593 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1594 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1595 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1600 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1607 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1609 if (hdev->discov_timeout > 0)
1610 cancel_delayed_work(&hdev->discov_off);
1612 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1613 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1614 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1617 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1621 hci_dev_unlock(hdev);
1625 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1628 struct mgmt_mode *cp = data;
1632 bt_dev_dbg(hdev, "sock %p", sk);
1634 if (cp->val != 0x00 && cp->val != 0x01)
1635 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1636 MGMT_STATUS_INVALID_PARAMS);
1641 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1643 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1645 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1650 /* In limited privacy mode the change of bondable mode
1651 * may affect the local advertising address.
1653 if (hdev_is_powered(hdev) &&
1654 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1655 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1656 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1657 queue_work(hdev->req_workqueue,
1658 &hdev->discoverable_update);
1660 err = new_settings(hdev, sk);
1664 hci_dev_unlock(hdev);
1668 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1671 struct mgmt_mode *cp = data;
1672 struct mgmt_pending_cmd *cmd;
1676 bt_dev_dbg(hdev, "sock %p", sk);
1678 status = mgmt_bredr_support(hdev);
1680 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1683 if (cp->val != 0x00 && cp->val != 0x01)
1684 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1685 MGMT_STATUS_INVALID_PARAMS);
1689 if (!hdev_is_powered(hdev)) {
1690 bool changed = false;
1692 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1693 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1697 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1702 err = new_settings(hdev, sk);
1707 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1708 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1715 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1716 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1720 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1726 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1728 mgmt_pending_remove(cmd);
1733 hci_dev_unlock(hdev);
1737 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1739 struct mgmt_mode *cp = data;
1740 struct mgmt_pending_cmd *cmd;
1744 bt_dev_dbg(hdev, "sock %p", sk);
1746 status = mgmt_bredr_support(hdev);
1748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1750 if (!lmp_ssp_capable(hdev))
1751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1752 MGMT_STATUS_NOT_SUPPORTED);
1754 if (cp->val != 0x00 && cp->val != 0x01)
1755 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1756 MGMT_STATUS_INVALID_PARAMS);
1760 if (!hdev_is_powered(hdev)) {
1764 changed = !hci_dev_test_and_set_flag(hdev,
1767 changed = hci_dev_test_and_clear_flag(hdev,
1770 changed = hci_dev_test_and_clear_flag(hdev,
1773 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1776 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1781 err = new_settings(hdev, sk);
1786 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1787 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1792 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1793 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1797 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1803 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1804 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1805 sizeof(cp->val), &cp->val);
1807 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1809 mgmt_pending_remove(cmd);
1814 hci_dev_unlock(hdev);
1818 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1820 struct mgmt_mode *cp = data;
1825 bt_dev_dbg(hdev, "sock %p", sk);
1827 if (!IS_ENABLED(CONFIG_BT_HS))
1828 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1829 MGMT_STATUS_NOT_SUPPORTED);
1831 status = mgmt_bredr_support(hdev);
1833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1835 if (!lmp_ssp_capable(hdev))
1836 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1837 MGMT_STATUS_NOT_SUPPORTED);
1839 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1840 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1841 MGMT_STATUS_REJECTED);
1843 if (cp->val != 0x00 && cp->val != 0x01)
1844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1845 MGMT_STATUS_INVALID_PARAMS);
1849 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1850 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1856 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1858 if (hdev_is_powered(hdev)) {
1859 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1860 MGMT_STATUS_REJECTED);
1864 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1867 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1872 err = new_settings(hdev, sk);
1875 hci_dev_unlock(hdev);
1879 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1881 struct cmd_lookup match = { NULL, hdev };
1886 u8 mgmt_err = mgmt_status(status);
1888 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1893 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1895 new_settings(hdev, match.sk);
1900 /* Make sure the controller has a good default for
1901 * advertising data. Restrict the update to when LE
1902 * has actually been enabled. During power on, the
1903 * update in powered_update_hci will take care of it.
1905 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1906 struct hci_request req;
1907 hci_req_init(&req, hdev);
1908 if (ext_adv_capable(hdev)) {
1911 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1913 __hci_req_update_scan_rsp_data(&req, 0x00);
1915 __hci_req_update_adv_data(&req, 0x00);
1916 __hci_req_update_scan_rsp_data(&req, 0x00);
1918 hci_req_run(&req, NULL);
1919 hci_update_background_scan(hdev);
1923 hci_dev_unlock(hdev);
1926 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1928 struct mgmt_mode *cp = data;
1929 struct hci_cp_write_le_host_supported hci_cp;
1930 struct mgmt_pending_cmd *cmd;
1931 struct hci_request req;
1935 bt_dev_dbg(hdev, "sock %p", sk);
1937 if (!lmp_le_capable(hdev))
1938 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1939 MGMT_STATUS_NOT_SUPPORTED);
1941 if (cp->val != 0x00 && cp->val != 0x01)
1942 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1943 MGMT_STATUS_INVALID_PARAMS);
1945 /* Bluetooth single mode LE only controllers or dual-mode
1946 * controllers configured as LE only devices, do not allow
1947 * switching LE off. These have either LE enabled explicitly
1948 * or BR/EDR has been previously switched off.
1950 * When trying to enable an already enabled LE, then gracefully
1951 * send a positive response. Trying to disable it however will
1952 * result into rejection.
1954 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1955 if (cp->val == 0x01)
1956 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1958 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1959 MGMT_STATUS_REJECTED);
1965 enabled = lmp_host_le_capable(hdev);
1968 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1970 if (!hdev_is_powered(hdev) || val == enabled) {
1971 bool changed = false;
1973 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1974 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1978 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1979 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1983 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1988 err = new_settings(hdev, sk);
1993 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1994 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1995 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2000 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2006 hci_req_init(&req, hdev);
2008 memset(&hci_cp, 0, sizeof(hci_cp));
2012 hci_cp.simul = 0x00;
2014 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2015 __hci_req_disable_advertising(&req);
2017 if (ext_adv_capable(hdev))
2018 __hci_req_clear_ext_adv_sets(&req);
2021 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2024 err = hci_req_run(&req, le_enable_complete);
2026 mgmt_pending_remove(cmd);
2029 hci_dev_unlock(hdev);
2033 /* This is a helper function to test for pending mgmt commands that can
2034 * cause CoD or EIR HCI commands. We can only allow one such pending
2035 * mgmt command at a time since otherwise we cannot easily track what
2036 * the current values are, will be, and based on that calculate if a new
2037 * HCI command needs to be sent and if yes with what value.
2039 static bool pending_eir_or_class(struct hci_dev *hdev)
2041 struct mgmt_pending_cmd *cmd;
2043 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2044 switch (cmd->opcode) {
2045 case MGMT_OP_ADD_UUID:
2046 case MGMT_OP_REMOVE_UUID:
2047 case MGMT_OP_SET_DEV_CLASS:
2048 case MGMT_OP_SET_POWERED:
2056 static const u8 bluetooth_base_uuid[] = {
2057 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2058 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2061 static u8 get_uuid_size(const u8 *uuid)
2065 if (memcmp(uuid, bluetooth_base_uuid, 12))
2068 val = get_unaligned_le32(&uuid[12]);
2075 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2077 struct mgmt_pending_cmd *cmd;
2081 cmd = pending_find(mgmt_op, hdev);
2085 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2086 mgmt_status(status), hdev->dev_class, 3);
2088 mgmt_pending_remove(cmd);
2091 hci_dev_unlock(hdev);
2094 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2096 bt_dev_dbg(hdev, "status 0x%02x", status);
2098 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2101 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2103 struct mgmt_cp_add_uuid *cp = data;
2104 struct mgmt_pending_cmd *cmd;
2105 struct hci_request req;
2106 struct bt_uuid *uuid;
2109 bt_dev_dbg(hdev, "sock %p", sk);
2113 if (pending_eir_or_class(hdev)) {
2114 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2119 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2125 memcpy(uuid->uuid, cp->uuid, 16);
2126 uuid->svc_hint = cp->svc_hint;
2127 uuid->size = get_uuid_size(cp->uuid);
2129 list_add_tail(&uuid->list, &hdev->uuids);
2131 hci_req_init(&req, hdev);
2133 __hci_req_update_class(&req);
2134 __hci_req_update_eir(&req);
2136 err = hci_req_run(&req, add_uuid_complete);
2138 if (err != -ENODATA)
2141 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2142 hdev->dev_class, 3);
2146 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2155 hci_dev_unlock(hdev);
2159 static bool enable_service_cache(struct hci_dev *hdev)
2161 if (!hdev_is_powered(hdev))
2164 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2165 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2173 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2175 bt_dev_dbg(hdev, "status 0x%02x", status);
2177 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2180 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2183 struct mgmt_cp_remove_uuid *cp = data;
2184 struct mgmt_pending_cmd *cmd;
2185 struct bt_uuid *match, *tmp;
2186 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2187 struct hci_request req;
2190 bt_dev_dbg(hdev, "sock %p", sk);
2194 if (pending_eir_or_class(hdev)) {
2195 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2200 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2201 hci_uuids_clear(hdev);
2203 if (enable_service_cache(hdev)) {
2204 err = mgmt_cmd_complete(sk, hdev->id,
2205 MGMT_OP_REMOVE_UUID,
2206 0, hdev->dev_class, 3);
2215 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2216 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2219 list_del(&match->list);
2225 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2226 MGMT_STATUS_INVALID_PARAMS);
2231 hci_req_init(&req, hdev);
2233 __hci_req_update_class(&req);
2234 __hci_req_update_eir(&req);
2236 err = hci_req_run(&req, remove_uuid_complete);
2238 if (err != -ENODATA)
2241 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2242 hdev->dev_class, 3);
2246 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2255 hci_dev_unlock(hdev);
2259 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2261 bt_dev_dbg(hdev, "status 0x%02x", status);
2263 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2266 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2269 struct mgmt_cp_set_dev_class *cp = data;
2270 struct mgmt_pending_cmd *cmd;
2271 struct hci_request req;
2274 bt_dev_dbg(hdev, "sock %p", sk);
2276 if (!lmp_bredr_capable(hdev))
2277 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2278 MGMT_STATUS_NOT_SUPPORTED);
2282 if (pending_eir_or_class(hdev)) {
2283 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2288 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2289 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2290 MGMT_STATUS_INVALID_PARAMS);
2294 hdev->major_class = cp->major;
2295 hdev->minor_class = cp->minor;
2297 if (!hdev_is_powered(hdev)) {
2298 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2299 hdev->dev_class, 3);
2303 hci_req_init(&req, hdev);
2305 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2306 hci_dev_unlock(hdev);
2307 cancel_delayed_work_sync(&hdev->service_cache);
2309 __hci_req_update_eir(&req);
2312 __hci_req_update_class(&req);
2314 err = hci_req_run(&req, set_class_complete);
2316 if (err != -ENODATA)
2319 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2320 hdev->dev_class, 3);
2324 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2333 hci_dev_unlock(hdev);
2337 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2340 struct mgmt_cp_load_link_keys *cp = data;
2341 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2342 sizeof(struct mgmt_link_key_info));
2343 u16 key_count, expected_len;
2347 bt_dev_dbg(hdev, "sock %p", sk);
2349 if (!lmp_bredr_capable(hdev))
2350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2351 MGMT_STATUS_NOT_SUPPORTED);
2353 key_count = __le16_to_cpu(cp->key_count);
2354 if (key_count > max_key_count) {
2355 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2357 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2358 MGMT_STATUS_INVALID_PARAMS);
2361 expected_len = struct_size(cp, keys, key_count);
2362 if (expected_len != len) {
2363 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2365 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2366 MGMT_STATUS_INVALID_PARAMS);
2369 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2371 MGMT_STATUS_INVALID_PARAMS);
2373 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2376 for (i = 0; i < key_count; i++) {
2377 struct mgmt_link_key_info *key = &cp->keys[i];
2379 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2380 return mgmt_cmd_status(sk, hdev->id,
2381 MGMT_OP_LOAD_LINK_KEYS,
2382 MGMT_STATUS_INVALID_PARAMS);
2387 hci_link_keys_clear(hdev);
2390 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2392 changed = hci_dev_test_and_clear_flag(hdev,
2393 HCI_KEEP_DEBUG_KEYS);
2396 new_settings(hdev, NULL);
2398 for (i = 0; i < key_count; i++) {
2399 struct mgmt_link_key_info *key = &cp->keys[i];
2401 if (hci_is_blocked_key(hdev,
2402 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2404 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2409 /* Always ignore debug keys and require a new pairing if
2410 * the user wants to use them.
2412 if (key->type == HCI_LK_DEBUG_COMBINATION)
2415 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2416 key->type, key->pin_len, NULL);
2419 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2421 hci_dev_unlock(hdev);
2426 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2427 u8 addr_type, struct sock *skip_sk)
2429 struct mgmt_ev_device_unpaired ev;
2431 bacpy(&ev.addr.bdaddr, bdaddr);
2432 ev.addr.type = addr_type;
2434 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2438 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2441 struct mgmt_cp_unpair_device *cp = data;
2442 struct mgmt_rp_unpair_device rp;
2443 struct hci_conn_params *params;
2444 struct mgmt_pending_cmd *cmd;
2445 struct hci_conn *conn;
2449 memset(&rp, 0, sizeof(rp));
2450 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2451 rp.addr.type = cp->addr.type;
2453 if (!bdaddr_type_is_valid(cp->addr.type))
2454 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2455 MGMT_STATUS_INVALID_PARAMS,
2458 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2459 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2460 MGMT_STATUS_INVALID_PARAMS,
2465 if (!hdev_is_powered(hdev)) {
2466 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2467 MGMT_STATUS_NOT_POWERED, &rp,
2472 if (cp->addr.type == BDADDR_BREDR) {
2473 /* If disconnection is requested, then look up the
2474 * connection. If the remote device is connected, it
2475 * will be later used to terminate the link.
2477 * Setting it to NULL explicitly will cause no
2478 * termination of the link.
2481 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2486 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2488 err = mgmt_cmd_complete(sk, hdev->id,
2489 MGMT_OP_UNPAIR_DEVICE,
2490 MGMT_STATUS_NOT_PAIRED, &rp,
2498 /* LE address type */
2499 addr_type = le_addr_type(cp->addr.type);
2501 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2502 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2504 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2505 MGMT_STATUS_NOT_PAIRED, &rp,
2510 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2512 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2517 /* Defer clearing up the connection parameters until closing to
2518 * give a chance of keeping them if a repairing happens.
2520 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2522 /* Disable auto-connection parameters if present */
2523 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2525 if (params->explicit_connect)
2526 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2528 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2531 /* If disconnection is not requested, then clear the connection
2532 * variable so that the link is not terminated.
2534 if (!cp->disconnect)
2538 /* If the connection variable is set, then termination of the
2539 * link is requested.
2542 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2544 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2548 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2555 cmd->cmd_complete = addr_cmd_complete;
2557 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2559 mgmt_pending_remove(cmd);
2562 hci_dev_unlock(hdev);
2566 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2569 struct mgmt_cp_disconnect *cp = data;
2570 struct mgmt_rp_disconnect rp;
2571 struct mgmt_pending_cmd *cmd;
2572 struct hci_conn *conn;
2575 bt_dev_dbg(hdev, "sock %p", sk);
2577 memset(&rp, 0, sizeof(rp));
2578 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2579 rp.addr.type = cp->addr.type;
2581 if (!bdaddr_type_is_valid(cp->addr.type))
2582 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2583 MGMT_STATUS_INVALID_PARAMS,
2588 if (!test_bit(HCI_UP, &hdev->flags)) {
2589 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2590 MGMT_STATUS_NOT_POWERED, &rp,
2595 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2596 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2597 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2601 if (cp->addr.type == BDADDR_BREDR)
2602 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2605 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2606 le_addr_type(cp->addr.type));
2608 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2609 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2610 MGMT_STATUS_NOT_CONNECTED, &rp,
2615 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2621 cmd->cmd_complete = generic_cmd_complete;
2623 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2625 mgmt_pending_remove(cmd);
2628 hci_dev_unlock(hdev);
2632 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2634 switch (link_type) {
2636 switch (addr_type) {
2637 case ADDR_LE_DEV_PUBLIC:
2638 return BDADDR_LE_PUBLIC;
2641 /* Fallback to LE Random address type */
2642 return BDADDR_LE_RANDOM;
2646 /* Fallback to BR/EDR type */
2647 return BDADDR_BREDR;
2651 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2654 struct mgmt_rp_get_connections *rp;
2659 bt_dev_dbg(hdev, "sock %p", sk);
2663 if (!hdev_is_powered(hdev)) {
2664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2665 MGMT_STATUS_NOT_POWERED);
2670 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2671 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2675 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2682 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2683 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2685 bacpy(&rp->addr[i].bdaddr, &c->dst);
2686 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2687 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2692 rp->conn_count = cpu_to_le16(i);
2694 /* Recalculate length in case of filtered SCO connections, etc */
2695 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2696 struct_size(rp, addr, i));
2701 hci_dev_unlock(hdev);
2705 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2706 struct mgmt_cp_pin_code_neg_reply *cp)
2708 struct mgmt_pending_cmd *cmd;
2711 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2716 cmd->cmd_complete = addr_cmd_complete;
2718 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2719 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2721 mgmt_pending_remove(cmd);
2726 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2729 struct hci_conn *conn;
2730 struct mgmt_cp_pin_code_reply *cp = data;
2731 struct hci_cp_pin_code_reply reply;
2732 struct mgmt_pending_cmd *cmd;
2735 bt_dev_dbg(hdev, "sock %p", sk);
2739 if (!hdev_is_powered(hdev)) {
2740 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2741 MGMT_STATUS_NOT_POWERED);
2745 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2747 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2748 MGMT_STATUS_NOT_CONNECTED);
2752 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2753 struct mgmt_cp_pin_code_neg_reply ncp;
2755 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2757 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2759 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2761 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2762 MGMT_STATUS_INVALID_PARAMS);
2767 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2773 cmd->cmd_complete = addr_cmd_complete;
2775 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2776 reply.pin_len = cp->pin_len;
2777 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2779 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2781 mgmt_pending_remove(cmd);
2784 hci_dev_unlock(hdev);
2788 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2791 struct mgmt_cp_set_io_capability *cp = data;
2793 bt_dev_dbg(hdev, "sock %p", sk);
2795 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2796 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2797 MGMT_STATUS_INVALID_PARAMS);
2801 hdev->io_capability = cp->io_capability;
2803 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2805 hci_dev_unlock(hdev);
2807 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2811 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2813 struct hci_dev *hdev = conn->hdev;
2814 struct mgmt_pending_cmd *cmd;
2816 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2817 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2820 if (cmd->user_data != conn)
2829 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2831 struct mgmt_rp_pair_device rp;
2832 struct hci_conn *conn = cmd->user_data;
2835 bacpy(&rp.addr.bdaddr, &conn->dst);
2836 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2838 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2839 status, &rp, sizeof(rp));
2841 /* So we don't get further callbacks for this connection */
2842 conn->connect_cfm_cb = NULL;
2843 conn->security_cfm_cb = NULL;
2844 conn->disconn_cfm_cb = NULL;
2846 hci_conn_drop(conn);
2848 /* The device is paired so there is no need to remove
2849 * its connection parameters anymore.
2851 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2858 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2860 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2861 struct mgmt_pending_cmd *cmd;
2863 cmd = find_pairing(conn);
2865 cmd->cmd_complete(cmd, status);
2866 mgmt_pending_remove(cmd);
2870 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2872 struct mgmt_pending_cmd *cmd;
2874 BT_DBG("status %u", status);
2876 cmd = find_pairing(conn);
2878 BT_DBG("Unable to find a pending command");
2882 cmd->cmd_complete(cmd, mgmt_status(status));
2883 mgmt_pending_remove(cmd);
2886 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2888 struct mgmt_pending_cmd *cmd;
2890 BT_DBG("status %u", status);
2895 cmd = find_pairing(conn);
2897 BT_DBG("Unable to find a pending command");
2901 cmd->cmd_complete(cmd, mgmt_status(status));
2902 mgmt_pending_remove(cmd);
2905 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2908 struct mgmt_cp_pair_device *cp = data;
2909 struct mgmt_rp_pair_device rp;
2910 struct mgmt_pending_cmd *cmd;
2911 u8 sec_level, auth_type;
2912 struct hci_conn *conn;
2915 bt_dev_dbg(hdev, "sock %p", sk);
2917 memset(&rp, 0, sizeof(rp));
2918 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2919 rp.addr.type = cp->addr.type;
2921 if (!bdaddr_type_is_valid(cp->addr.type))
2922 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2923 MGMT_STATUS_INVALID_PARAMS,
2926 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2927 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2928 MGMT_STATUS_INVALID_PARAMS,
2933 if (!hdev_is_powered(hdev)) {
2934 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2935 MGMT_STATUS_NOT_POWERED, &rp,
2940 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2941 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2942 MGMT_STATUS_ALREADY_PAIRED, &rp,
2947 sec_level = BT_SECURITY_MEDIUM;
2948 auth_type = HCI_AT_DEDICATED_BONDING;
2950 if (cp->addr.type == BDADDR_BREDR) {
2951 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2952 auth_type, CONN_REASON_PAIR_DEVICE);
2954 u8 addr_type = le_addr_type(cp->addr.type);
2955 struct hci_conn_params *p;
2957 /* When pairing a new device, it is expected to remember
2958 * this device for future connections. Adding the connection
2959 * parameter information ahead of time allows tracking
2960 * of the slave preferred values and will speed up any
2961 * further connection establishment.
2963 * If connection parameters already exist, then they
2964 * will be kept and this function does nothing.
2966 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2968 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2969 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2971 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2972 sec_level, HCI_LE_CONN_TIMEOUT,
2973 CONN_REASON_PAIR_DEVICE);
2979 if (PTR_ERR(conn) == -EBUSY)
2980 status = MGMT_STATUS_BUSY;
2981 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2982 status = MGMT_STATUS_NOT_SUPPORTED;
2983 else if (PTR_ERR(conn) == -ECONNREFUSED)
2984 status = MGMT_STATUS_REJECTED;
2986 status = MGMT_STATUS_CONNECT_FAILED;
2988 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2989 status, &rp, sizeof(rp));
2993 if (conn->connect_cfm_cb) {
2994 hci_conn_drop(conn);
2995 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2996 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3000 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3003 hci_conn_drop(conn);
3007 cmd->cmd_complete = pairing_complete;
3009 /* For LE, just connecting isn't a proof that the pairing finished */
3010 if (cp->addr.type == BDADDR_BREDR) {
3011 conn->connect_cfm_cb = pairing_complete_cb;
3012 conn->security_cfm_cb = pairing_complete_cb;
3013 conn->disconn_cfm_cb = pairing_complete_cb;
3015 conn->connect_cfm_cb = le_pairing_complete_cb;
3016 conn->security_cfm_cb = le_pairing_complete_cb;
3017 conn->disconn_cfm_cb = le_pairing_complete_cb;
3020 conn->io_capability = cp->io_cap;
3021 cmd->user_data = hci_conn_get(conn);
3023 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3024 hci_conn_security(conn, sec_level, auth_type, true)) {
3025 cmd->cmd_complete(cmd, 0);
3026 mgmt_pending_remove(cmd);
3032 hci_dev_unlock(hdev);
3036 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3039 struct mgmt_addr_info *addr = data;
3040 struct mgmt_pending_cmd *cmd;
3041 struct hci_conn *conn;
3044 bt_dev_dbg(hdev, "sock %p", sk);
3048 if (!hdev_is_powered(hdev)) {
3049 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3050 MGMT_STATUS_NOT_POWERED);
3054 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3056 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3057 MGMT_STATUS_INVALID_PARAMS);
3061 conn = cmd->user_data;
3063 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3064 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3065 MGMT_STATUS_INVALID_PARAMS);
3069 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3070 mgmt_pending_remove(cmd);
3072 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3073 addr, sizeof(*addr));
3075 /* Since user doesn't want to proceed with the connection, abort any
3076 * ongoing pairing and then terminate the link if it was created
3077 * because of the pair device action.
3079 if (addr->type == BDADDR_BREDR)
3080 hci_remove_link_key(hdev, &addr->bdaddr);
3082 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3083 le_addr_type(addr->type));
3085 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3086 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3089 hci_dev_unlock(hdev);
3093 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3094 struct mgmt_addr_info *addr, u16 mgmt_op,
3095 u16 hci_op, __le32 passkey)
3097 struct mgmt_pending_cmd *cmd;
3098 struct hci_conn *conn;
3103 if (!hdev_is_powered(hdev)) {
3104 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3105 MGMT_STATUS_NOT_POWERED, addr,
3110 if (addr->type == BDADDR_BREDR)
3111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3113 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3114 le_addr_type(addr->type));
3117 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3118 MGMT_STATUS_NOT_CONNECTED, addr,
3123 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3124 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3126 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3127 MGMT_STATUS_SUCCESS, addr,
3130 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3131 MGMT_STATUS_FAILED, addr,
3137 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3143 cmd->cmd_complete = addr_cmd_complete;
3145 /* Continue with pairing via HCI */
3146 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3147 struct hci_cp_user_passkey_reply cp;
3149 bacpy(&cp.bdaddr, &addr->bdaddr);
3150 cp.passkey = passkey;
3151 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3153 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3157 mgmt_pending_remove(cmd);
3160 hci_dev_unlock(hdev);
3164 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3165 void *data, u16 len)
3167 struct mgmt_cp_pin_code_neg_reply *cp = data;
3169 bt_dev_dbg(hdev, "sock %p", sk);
3171 return user_pairing_resp(sk, hdev, &cp->addr,
3172 MGMT_OP_PIN_CODE_NEG_REPLY,
3173 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3176 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3179 struct mgmt_cp_user_confirm_reply *cp = data;
3181 bt_dev_dbg(hdev, "sock %p", sk);
3183 if (len != sizeof(*cp))
3184 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3185 MGMT_STATUS_INVALID_PARAMS);
3187 return user_pairing_resp(sk, hdev, &cp->addr,
3188 MGMT_OP_USER_CONFIRM_REPLY,
3189 HCI_OP_USER_CONFIRM_REPLY, 0);
3192 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3193 void *data, u16 len)
3195 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3197 bt_dev_dbg(hdev, "sock %p", sk);
3199 return user_pairing_resp(sk, hdev, &cp->addr,
3200 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3201 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3204 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3207 struct mgmt_cp_user_passkey_reply *cp = data;
3209 bt_dev_dbg(hdev, "sock %p", sk);
3211 return user_pairing_resp(sk, hdev, &cp->addr,
3212 MGMT_OP_USER_PASSKEY_REPLY,
3213 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3216 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3217 void *data, u16 len)
3219 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3221 bt_dev_dbg(hdev, "sock %p", sk);
3223 return user_pairing_resp(sk, hdev, &cp->addr,
3224 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3225 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3228 static void adv_expire(struct hci_dev *hdev, u32 flags)
3230 struct adv_info *adv_instance;
3231 struct hci_request req;
3234 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3238 /* stop if current instance doesn't need to be changed */
3239 if (!(adv_instance->flags & flags))
3242 cancel_adv_timeout(hdev);
3244 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3248 hci_req_init(&req, hdev);
3249 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3254 hci_req_run(&req, NULL);
3257 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3259 struct mgmt_cp_set_local_name *cp;
3260 struct mgmt_pending_cmd *cmd;
3262 bt_dev_dbg(hdev, "status 0x%02x", status);
3266 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3273 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3274 mgmt_status(status));
3276 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3279 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3280 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3283 mgmt_pending_remove(cmd);
3286 hci_dev_unlock(hdev);
3289 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3292 struct mgmt_cp_set_local_name *cp = data;
3293 struct mgmt_pending_cmd *cmd;
3294 struct hci_request req;
3297 bt_dev_dbg(hdev, "sock %p", sk);
3301 /* If the old values are the same as the new ones just return a
3302 * direct command complete event.
3304 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3305 !memcmp(hdev->short_name, cp->short_name,
3306 sizeof(hdev->short_name))) {
3307 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3312 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3314 if (!hdev_is_powered(hdev)) {
3315 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3317 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3322 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3323 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3324 ext_info_changed(hdev, sk);
3329 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3335 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3337 hci_req_init(&req, hdev);
3339 if (lmp_bredr_capable(hdev)) {
3340 __hci_req_update_name(&req);
3341 __hci_req_update_eir(&req);
3344 /* The name is stored in the scan response data and so
3345 * no need to udpate the advertising data here.
3347 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3348 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3350 err = hci_req_run(&req, set_name_complete);
3352 mgmt_pending_remove(cmd);
3355 hci_dev_unlock(hdev);
3359 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3362 struct mgmt_cp_set_appearance *cp = data;
3366 bt_dev_dbg(hdev, "sock %p", sk);
3368 if (!lmp_le_capable(hdev))
3369 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3370 MGMT_STATUS_NOT_SUPPORTED);
3372 appearance = le16_to_cpu(cp->appearance);
3376 if (hdev->appearance != appearance) {
3377 hdev->appearance = appearance;
3379 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3380 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3382 ext_info_changed(hdev, sk);
3385 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3388 hci_dev_unlock(hdev);
3393 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3394 void *data, u16 len)
3396 struct mgmt_rp_get_phy_confguration rp;
3398 bt_dev_dbg(hdev, "sock %p", sk);
3402 memset(&rp, 0, sizeof(rp));
3404 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3405 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3406 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3408 hci_dev_unlock(hdev);
3410 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3414 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3416 struct mgmt_ev_phy_configuration_changed ev;
3418 memset(&ev, 0, sizeof(ev));
3420 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3422 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3426 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3427 u16 opcode, struct sk_buff *skb)
3429 struct mgmt_pending_cmd *cmd;
3431 bt_dev_dbg(hdev, "status 0x%02x", status);
3435 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3440 mgmt_cmd_status(cmd->sk, hdev->id,
3441 MGMT_OP_SET_PHY_CONFIGURATION,
3442 mgmt_status(status));
3444 mgmt_cmd_complete(cmd->sk, hdev->id,
3445 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3448 mgmt_phy_configuration_changed(hdev, cmd->sk);
3451 mgmt_pending_remove(cmd);
3454 hci_dev_unlock(hdev);
3457 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3458 void *data, u16 len)
3460 struct mgmt_cp_set_phy_confguration *cp = data;
3461 struct hci_cp_le_set_default_phy cp_phy;
3462 struct mgmt_pending_cmd *cmd;
3463 struct hci_request req;
3464 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3465 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3466 bool changed = false;
3469 bt_dev_dbg(hdev, "sock %p", sk);
3471 configurable_phys = get_configurable_phys(hdev);
3472 supported_phys = get_supported_phys(hdev);
3473 selected_phys = __le32_to_cpu(cp->selected_phys);
3475 if (selected_phys & ~supported_phys)
3476 return mgmt_cmd_status(sk, hdev->id,
3477 MGMT_OP_SET_PHY_CONFIGURATION,
3478 MGMT_STATUS_INVALID_PARAMS);
3480 unconfigure_phys = supported_phys & ~configurable_phys;
3482 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3483 return mgmt_cmd_status(sk, hdev->id,
3484 MGMT_OP_SET_PHY_CONFIGURATION,
3485 MGMT_STATUS_INVALID_PARAMS);
3487 if (selected_phys == get_selected_phys(hdev))
3488 return mgmt_cmd_complete(sk, hdev->id,
3489 MGMT_OP_SET_PHY_CONFIGURATION,
3494 if (!hdev_is_powered(hdev)) {
3495 err = mgmt_cmd_status(sk, hdev->id,
3496 MGMT_OP_SET_PHY_CONFIGURATION,
3497 MGMT_STATUS_REJECTED);
3501 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3502 err = mgmt_cmd_status(sk, hdev->id,
3503 MGMT_OP_SET_PHY_CONFIGURATION,
3508 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3509 pkt_type |= (HCI_DH3 | HCI_DM3);
3511 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3513 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3514 pkt_type |= (HCI_DH5 | HCI_DM5);
3516 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3518 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3519 pkt_type &= ~HCI_2DH1;
3521 pkt_type |= HCI_2DH1;
3523 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3524 pkt_type &= ~HCI_2DH3;
3526 pkt_type |= HCI_2DH3;
3528 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3529 pkt_type &= ~HCI_2DH5;
3531 pkt_type |= HCI_2DH5;
3533 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3534 pkt_type &= ~HCI_3DH1;
3536 pkt_type |= HCI_3DH1;
3538 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3539 pkt_type &= ~HCI_3DH3;
3541 pkt_type |= HCI_3DH3;
3543 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3544 pkt_type &= ~HCI_3DH5;
3546 pkt_type |= HCI_3DH5;
3548 if (pkt_type != hdev->pkt_type) {
3549 hdev->pkt_type = pkt_type;
3553 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3554 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3556 mgmt_phy_configuration_changed(hdev, sk);
3558 err = mgmt_cmd_complete(sk, hdev->id,
3559 MGMT_OP_SET_PHY_CONFIGURATION,
3565 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3572 hci_req_init(&req, hdev);
3574 memset(&cp_phy, 0, sizeof(cp_phy));
3576 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3577 cp_phy.all_phys |= 0x01;
3579 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3580 cp_phy.all_phys |= 0x02;
3582 if (selected_phys & MGMT_PHY_LE_1M_TX)
3583 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3585 if (selected_phys & MGMT_PHY_LE_2M_TX)
3586 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3588 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3589 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3591 if (selected_phys & MGMT_PHY_LE_1M_RX)
3592 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3594 if (selected_phys & MGMT_PHY_LE_2M_RX)
3595 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3597 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3598 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3600 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3602 err = hci_req_run_skb(&req, set_default_phy_complete);
3604 mgmt_pending_remove(cmd);
3607 hci_dev_unlock(hdev);
3612 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3615 int err = MGMT_STATUS_SUCCESS;
3616 struct mgmt_cp_set_blocked_keys *keys = data;
3617 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3618 sizeof(struct mgmt_blocked_key_info));
3619 u16 key_count, expected_len;
3622 bt_dev_dbg(hdev, "sock %p", sk);
3624 key_count = __le16_to_cpu(keys->key_count);
3625 if (key_count > max_key_count) {
3626 bt_dev_err(hdev, "too big key_count value %u", key_count);
3627 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3628 MGMT_STATUS_INVALID_PARAMS);
3631 expected_len = struct_size(keys, keys, key_count);
3632 if (expected_len != len) {
3633 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3635 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3636 MGMT_STATUS_INVALID_PARAMS);
3641 hci_blocked_keys_clear(hdev);
3643 for (i = 0; i < keys->key_count; ++i) {
3644 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3647 err = MGMT_STATUS_NO_RESOURCES;
3651 b->type = keys->keys[i].type;
3652 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3653 list_add_rcu(&b->list, &hdev->blocked_keys);
3655 hci_dev_unlock(hdev);
3657 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3661 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3662 void *data, u16 len)
3664 struct mgmt_mode *cp = data;
3666 bool changed = false;
3668 bt_dev_dbg(hdev, "sock %p", sk);
3670 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3671 return mgmt_cmd_status(sk, hdev->id,
3672 MGMT_OP_SET_WIDEBAND_SPEECH,
3673 MGMT_STATUS_NOT_SUPPORTED);
3675 if (cp->val != 0x00 && cp->val != 0x01)
3676 return mgmt_cmd_status(sk, hdev->id,
3677 MGMT_OP_SET_WIDEBAND_SPEECH,
3678 MGMT_STATUS_INVALID_PARAMS);
3682 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3683 err = mgmt_cmd_status(sk, hdev->id,
3684 MGMT_OP_SET_WIDEBAND_SPEECH,
3689 if (hdev_is_powered(hdev) &&
3690 !!cp->val != hci_dev_test_flag(hdev,
3691 HCI_WIDEBAND_SPEECH_ENABLED)) {
3692 err = mgmt_cmd_status(sk, hdev->id,
3693 MGMT_OP_SET_WIDEBAND_SPEECH,
3694 MGMT_STATUS_REJECTED);
3699 changed = !hci_dev_test_and_set_flag(hdev,
3700 HCI_WIDEBAND_SPEECH_ENABLED);
3702 changed = hci_dev_test_and_clear_flag(hdev,
3703 HCI_WIDEBAND_SPEECH_ENABLED);
3705 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3710 err = new_settings(hdev, sk);
3713 hci_dev_unlock(hdev);
3717 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3718 void *data, u16 data_len)
3721 struct mgmt_rp_read_security_info *rp = (void *)buf;
3725 bt_dev_dbg(hdev, "sock %p", sk);
3727 memset(&buf, 0, sizeof(buf));
3731 /* When the Read Simple Pairing Options command is supported, then
3732 * the remote public key validation is supported.
3734 if (hdev->commands[41] & 0x08)
3735 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3737 flags |= 0x02; /* Remote public key validation (LE) */
3739 /* When the Read Encryption Key Size command is supported, then the
3740 * encryption key size is enforced.
3742 if (hdev->commands[20] & 0x10)
3743 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3745 flags |= 0x08; /* Encryption key size enforcement (LE) */
3747 sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3749 /* When the Read Simple Pairing Options command is supported, then
3750 * also max encryption key size information is provided.
3752 if (hdev->commands[41] & 0x08)
3753 sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3754 hdev->max_enc_key_size);
3756 sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3758 rp->sec_len = cpu_to_le16(sec_len);
3760 hci_dev_unlock(hdev);
3762 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3763 rp, sizeof(*rp) + sec_len);
3766 #ifdef CONFIG_BT_FEATURE_DEBUG
3767 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3768 static const u8 debug_uuid[16] = {
3769 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3770 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3774 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3775 static const u8 simult_central_periph_uuid[16] = {
3776 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3777 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3780 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3781 static const u8 rpa_resolution_uuid[16] = {
3782 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3783 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3786 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3787 void *data, u16 data_len)
3789 char buf[62]; /* Enough space for 3 features */
3790 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3794 bt_dev_dbg(hdev, "sock %p", sk);
3796 memset(&buf, 0, sizeof(buf));
3798 #ifdef CONFIG_BT_FEATURE_DEBUG
3800 flags = bt_dbg_get() ? BIT(0) : 0;
3802 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3803 rp->features[idx].flags = cpu_to_le32(flags);
3809 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3810 (hdev->le_states[4] & 0x08) && /* Central */
3811 (hdev->le_states[4] & 0x40) && /* Peripheral */
3812 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3817 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3818 rp->features[idx].flags = cpu_to_le32(flags);
3822 if (hdev && use_ll_privacy(hdev)) {
3823 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3824 flags = BIT(0) | BIT(1);
3828 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3829 rp->features[idx].flags = cpu_to_le32(flags);
3833 rp->feature_count = cpu_to_le16(idx);
3835 /* After reading the experimental features information, enable
3836 * the events to update client on any future change.
3838 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3840 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3841 MGMT_OP_READ_EXP_FEATURES_INFO,
3842 0, rp, sizeof(*rp) + (20 * idx));
3845 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3848 struct mgmt_ev_exp_feature_changed ev;
3850 memset(&ev, 0, sizeof(ev));
3851 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3852 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3854 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3856 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3860 #ifdef CONFIG_BT_FEATURE_DEBUG
3861 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3863 struct mgmt_ev_exp_feature_changed ev;
3865 memset(&ev, 0, sizeof(ev));
3866 memcpy(ev.uuid, debug_uuid, 16);
3867 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3869 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3871 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3875 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3876 void *data, u16 data_len)
3878 struct mgmt_cp_set_exp_feature *cp = data;
3879 struct mgmt_rp_set_exp_feature rp;
3881 bt_dev_dbg(hdev, "sock %p", sk);
3883 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3884 memset(rp.uuid, 0, 16);
3885 rp.flags = cpu_to_le32(0);
3887 #ifdef CONFIG_BT_FEATURE_DEBUG
3889 bool changed = bt_dbg_get();
3894 exp_debug_feature_changed(false, sk);
3898 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3899 bool changed = hci_dev_test_flag(hdev,
3900 HCI_ENABLE_LL_PRIVACY);
3902 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3905 exp_ll_privacy_feature_changed(false, hdev, sk);
3908 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3910 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3911 MGMT_OP_SET_EXP_FEATURE, 0,
3915 #ifdef CONFIG_BT_FEATURE_DEBUG
3916 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3920 /* Command requires to use the non-controller index */
3922 return mgmt_cmd_status(sk, hdev->id,
3923 MGMT_OP_SET_EXP_FEATURE,
3924 MGMT_STATUS_INVALID_INDEX);
3926 /* Parameters are limited to a single octet */
3927 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3928 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3929 MGMT_OP_SET_EXP_FEATURE,
3930 MGMT_STATUS_INVALID_PARAMS);
3932 /* Only boolean on/off is supported */
3933 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3934 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3935 MGMT_OP_SET_EXP_FEATURE,
3936 MGMT_STATUS_INVALID_PARAMS);
3938 val = !!cp->param[0];
3939 changed = val ? !bt_dbg_get() : bt_dbg_get();
3942 memcpy(rp.uuid, debug_uuid, 16);
3943 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3945 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3947 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3948 MGMT_OP_SET_EXP_FEATURE, 0,
3952 exp_debug_feature_changed(val, sk);
3958 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3963 /* Command requires to use the controller index */
3965 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3966 MGMT_OP_SET_EXP_FEATURE,
3967 MGMT_STATUS_INVALID_INDEX);
3969 /* Changes can only be made when controller is powered down */
3970 if (hdev_is_powered(hdev))
3971 return mgmt_cmd_status(sk, hdev->id,
3972 MGMT_OP_SET_EXP_FEATURE,
3973 MGMT_STATUS_NOT_POWERED);
3975 /* Parameters are limited to a single octet */
3976 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3977 return mgmt_cmd_status(sk, hdev->id,
3978 MGMT_OP_SET_EXP_FEATURE,
3979 MGMT_STATUS_INVALID_PARAMS);
3981 /* Only boolean on/off is supported */
3982 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3983 return mgmt_cmd_status(sk, hdev->id,
3984 MGMT_OP_SET_EXP_FEATURE,
3985 MGMT_STATUS_INVALID_PARAMS);
3987 val = !!cp->param[0];
3990 changed = !hci_dev_test_flag(hdev,
3991 HCI_ENABLE_LL_PRIVACY);
3992 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3993 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3995 /* Enable LL privacy + supported settings changed */
3996 flags = BIT(0) | BIT(1);
3998 changed = hci_dev_test_flag(hdev,
3999 HCI_ENABLE_LL_PRIVACY);
4000 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4002 /* Disable LL privacy + supported settings changed */
4006 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4007 rp.flags = cpu_to_le32(flags);
4009 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4011 err = mgmt_cmd_complete(sk, hdev->id,
4012 MGMT_OP_SET_EXP_FEATURE, 0,
4016 exp_ll_privacy_feature_changed(val, hdev, sk);
4021 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4022 MGMT_OP_SET_EXP_FEATURE,
4023 MGMT_STATUS_NOT_SUPPORTED);
4026 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4028 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4031 struct mgmt_cp_get_device_flags *cp = data;
4032 struct mgmt_rp_get_device_flags rp;
4033 struct bdaddr_list_with_flags *br_params;
4034 struct hci_conn_params *params;
4035 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4036 u32 current_flags = 0;
4037 u8 status = MGMT_STATUS_INVALID_PARAMS;
4039 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4040 &cp->addr.bdaddr, cp->addr.type);
4044 memset(&rp, 0, sizeof(rp));
4046 if (cp->addr.type == BDADDR_BREDR) {
4047 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4053 current_flags = br_params->current_flags;
4055 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4056 le_addr_type(cp->addr.type));
4061 current_flags = params->current_flags;
4064 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4065 rp.addr.type = cp->addr.type;
4066 rp.supported_flags = cpu_to_le32(supported_flags);
4067 rp.current_flags = cpu_to_le32(current_flags);
4069 status = MGMT_STATUS_SUCCESS;
4072 hci_dev_unlock(hdev);
4074 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4078 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4079 bdaddr_t *bdaddr, u8 bdaddr_type,
4080 u32 supported_flags, u32 current_flags)
4082 struct mgmt_ev_device_flags_changed ev;
4084 bacpy(&ev.addr.bdaddr, bdaddr);
4085 ev.addr.type = bdaddr_type;
4086 ev.supported_flags = cpu_to_le32(supported_flags);
4087 ev.current_flags = cpu_to_le32(current_flags);
4089 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4092 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4095 struct mgmt_cp_set_device_flags *cp = data;
4096 struct bdaddr_list_with_flags *br_params;
4097 struct hci_conn_params *params;
4098 u8 status = MGMT_STATUS_INVALID_PARAMS;
4099 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4100 u32 current_flags = __le32_to_cpu(cp->current_flags);
4102 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4103 &cp->addr.bdaddr, cp->addr.type,
4104 __le32_to_cpu(current_flags));
4106 if ((supported_flags | current_flags) != supported_flags) {
4107 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4108 current_flags, supported_flags);
4114 if (cp->addr.type == BDADDR_BREDR) {
4115 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4120 br_params->current_flags = current_flags;
4121 status = MGMT_STATUS_SUCCESS;
4123 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4124 &cp->addr.bdaddr, cp->addr.type);
4127 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4128 le_addr_type(cp->addr.type));
4130 params->current_flags = current_flags;
4131 status = MGMT_STATUS_SUCCESS;
4133 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4135 le_addr_type(cp->addr.type));
4140 hci_dev_unlock(hdev);
4142 if (status == MGMT_STATUS_SUCCESS)
4143 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4144 supported_flags, current_flags);
4146 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4147 &cp->addr, sizeof(cp->addr));
4150 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4153 struct mgmt_ev_adv_monitor_added ev;
4155 ev.monitor_handle = cpu_to_le16(handle);
4157 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4160 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4163 struct mgmt_ev_adv_monitor_added ev;
4165 ev.monitor_handle = cpu_to_le16(handle);
4167 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4170 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4171 void *data, u16 len)
4173 struct adv_monitor *monitor = NULL;
4174 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4177 __u32 supported = 0;
4178 __u16 num_handles = 0;
4179 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4181 BT_DBG("request for %s", hdev->name);
4185 if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4186 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4188 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4189 handles[num_handles++] = monitor->handle;
4192 hci_dev_unlock(hdev);
4194 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4195 rp = kmalloc(rp_size, GFP_KERNEL);
4199 /* Once controller-based monitoring is in place, the enabled_features
4200 * should reflect the use.
4202 rp->supported_features = cpu_to_le32(supported);
4203 rp->enabled_features = 0;
4204 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4205 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4206 rp->num_handles = cpu_to_le16(num_handles);
4208 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4210 err = mgmt_cmd_complete(sk, hdev->id,
4211 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4212 MGMT_STATUS_SUCCESS, rp, rp_size);
4219 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4220 void *data, u16 len)
4222 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4223 struct mgmt_rp_add_adv_patterns_monitor rp;
4224 struct adv_monitor *m = NULL;
4225 struct adv_pattern *p = NULL;
4226 unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4227 __u8 cp_ofst = 0, cp_len = 0;
4230 BT_DBG("request for %s", hdev->name);
4232 if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4233 err = mgmt_cmd_status(sk, hdev->id,
4234 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4235 MGMT_STATUS_INVALID_PARAMS);
4239 m = kmalloc(sizeof(*m), GFP_KERNEL);
4245 INIT_LIST_HEAD(&m->patterns);
4248 for (i = 0; i < cp->pattern_count; i++) {
4249 if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4250 err = mgmt_cmd_status(sk, hdev->id,
4251 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4252 MGMT_STATUS_INVALID_PARAMS);
4256 cp_ofst = cp->patterns[i].offset;
4257 cp_len = cp->patterns[i].length;
4258 if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4259 cp_len > HCI_MAX_AD_LENGTH ||
4260 (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4261 err = mgmt_cmd_status(sk, hdev->id,
4262 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4263 MGMT_STATUS_INVALID_PARAMS);
4267 p = kmalloc(sizeof(*p), GFP_KERNEL);
4273 p->ad_type = cp->patterns[i].ad_type;
4274 p->offset = cp->patterns[i].offset;
4275 p->length = cp->patterns[i].length;
4276 memcpy(p->value, cp->patterns[i].value, p->length);
4278 INIT_LIST_HEAD(&p->list);
4279 list_add(&p->list, &m->patterns);
4282 if (mp_cnt != cp->pattern_count) {
4283 err = mgmt_cmd_status(sk, hdev->id,
4284 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4285 MGMT_STATUS_INVALID_PARAMS);
4291 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4293 err = hci_add_adv_monitor(hdev, m);
4295 if (err == -ENOSPC) {
4296 mgmt_cmd_status(sk, hdev->id,
4297 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4298 MGMT_STATUS_NO_RESOURCES);
4303 if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4304 mgmt_adv_monitor_added(sk, hdev, m->handle);
4306 hci_dev_unlock(hdev);
4308 rp.monitor_handle = cpu_to_le16(m->handle);
4310 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4311 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4314 hci_dev_unlock(hdev);
4317 hci_free_adv_monitor(m);
4321 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4322 void *data, u16 len)
4324 struct mgmt_cp_remove_adv_monitor *cp = data;
4325 struct mgmt_rp_remove_adv_monitor rp;
4326 unsigned int prev_adv_monitors_cnt;
4330 BT_DBG("request for %s", hdev->name);
4334 handle = __le16_to_cpu(cp->monitor_handle);
4335 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4337 err = hci_remove_adv_monitor(hdev, handle);
4338 if (err == -ENOENT) {
4339 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4340 MGMT_STATUS_INVALID_INDEX);
4344 if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4345 mgmt_adv_monitor_removed(sk, hdev, handle);
4347 hci_dev_unlock(hdev);
4349 rp.monitor_handle = cp->monitor_handle;
4351 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4352 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4355 hci_dev_unlock(hdev);
4359 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4360 u16 opcode, struct sk_buff *skb)
4362 struct mgmt_rp_read_local_oob_data mgmt_rp;
4363 size_t rp_size = sizeof(mgmt_rp);
4364 struct mgmt_pending_cmd *cmd;
4366 bt_dev_dbg(hdev, "status %u", status);
4368 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4372 if (status || !skb) {
4373 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4374 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4378 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4380 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4381 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4383 if (skb->len < sizeof(*rp)) {
4384 mgmt_cmd_status(cmd->sk, hdev->id,
4385 MGMT_OP_READ_LOCAL_OOB_DATA,
4386 MGMT_STATUS_FAILED);
4390 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4391 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4393 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4395 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4397 if (skb->len < sizeof(*rp)) {
4398 mgmt_cmd_status(cmd->sk, hdev->id,
4399 MGMT_OP_READ_LOCAL_OOB_DATA,
4400 MGMT_STATUS_FAILED);
4404 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4405 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4407 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4408 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4411 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4412 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4415 mgmt_pending_remove(cmd);
4418 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4419 void *data, u16 data_len)
4421 struct mgmt_pending_cmd *cmd;
4422 struct hci_request req;
4425 bt_dev_dbg(hdev, "sock %p", sk);
4429 if (!hdev_is_powered(hdev)) {
4430 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4431 MGMT_STATUS_NOT_POWERED);
4435 if (!lmp_ssp_capable(hdev)) {
4436 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4437 MGMT_STATUS_NOT_SUPPORTED);
4441 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4442 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4447 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4453 hci_req_init(&req, hdev);
4455 if (bredr_sc_enabled(hdev))
4456 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4458 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4460 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4462 mgmt_pending_remove(cmd);
4465 hci_dev_unlock(hdev);
4469 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4470 void *data, u16 len)
4472 struct mgmt_addr_info *addr = data;
4475 bt_dev_dbg(hdev, "sock %p", sk);
4477 if (!bdaddr_type_is_valid(addr->type))
4478 return mgmt_cmd_complete(sk, hdev->id,
4479 MGMT_OP_ADD_REMOTE_OOB_DATA,
4480 MGMT_STATUS_INVALID_PARAMS,
4481 addr, sizeof(*addr));
4485 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4486 struct mgmt_cp_add_remote_oob_data *cp = data;
4489 if (cp->addr.type != BDADDR_BREDR) {
4490 err = mgmt_cmd_complete(sk, hdev->id,
4491 MGMT_OP_ADD_REMOTE_OOB_DATA,
4492 MGMT_STATUS_INVALID_PARAMS,
4493 &cp->addr, sizeof(cp->addr));
4497 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4498 cp->addr.type, cp->hash,
4499 cp->rand, NULL, NULL);
4501 status = MGMT_STATUS_FAILED;
4503 status = MGMT_STATUS_SUCCESS;
4505 err = mgmt_cmd_complete(sk, hdev->id,
4506 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4507 &cp->addr, sizeof(cp->addr));
4508 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4509 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4510 u8 *rand192, *hash192, *rand256, *hash256;
4513 if (bdaddr_type_is_le(cp->addr.type)) {
4514 /* Enforce zero-valued 192-bit parameters as
4515 * long as legacy SMP OOB isn't implemented.
4517 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4518 memcmp(cp->hash192, ZERO_KEY, 16)) {
4519 err = mgmt_cmd_complete(sk, hdev->id,
4520 MGMT_OP_ADD_REMOTE_OOB_DATA,
4521 MGMT_STATUS_INVALID_PARAMS,
4522 addr, sizeof(*addr));
4529 /* In case one of the P-192 values is set to zero,
4530 * then just disable OOB data for P-192.
4532 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4533 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4537 rand192 = cp->rand192;
4538 hash192 = cp->hash192;
4542 /* In case one of the P-256 values is set to zero, then just
4543 * disable OOB data for P-256.
4545 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4546 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4550 rand256 = cp->rand256;
4551 hash256 = cp->hash256;
4554 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4555 cp->addr.type, hash192, rand192,
4558 status = MGMT_STATUS_FAILED;
4560 status = MGMT_STATUS_SUCCESS;
4562 err = mgmt_cmd_complete(sk, hdev->id,
4563 MGMT_OP_ADD_REMOTE_OOB_DATA,
4564 status, &cp->addr, sizeof(cp->addr));
4566 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4568 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4569 MGMT_STATUS_INVALID_PARAMS);
4573 hci_dev_unlock(hdev);
4577 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4578 void *data, u16 len)
4580 struct mgmt_cp_remove_remote_oob_data *cp = data;
4584 bt_dev_dbg(hdev, "sock %p", sk);
4586 if (cp->addr.type != BDADDR_BREDR)
4587 return mgmt_cmd_complete(sk, hdev->id,
4588 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4589 MGMT_STATUS_INVALID_PARAMS,
4590 &cp->addr, sizeof(cp->addr));
4594 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4595 hci_remote_oob_data_clear(hdev);
4596 status = MGMT_STATUS_SUCCESS;
4600 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4602 status = MGMT_STATUS_INVALID_PARAMS;
4604 status = MGMT_STATUS_SUCCESS;
4607 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4608 status, &cp->addr, sizeof(cp->addr));
4610 hci_dev_unlock(hdev);
4614 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4616 struct mgmt_pending_cmd *cmd;
4618 bt_dev_dbg(hdev, "status %d", status);
4622 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4624 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4627 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4630 cmd->cmd_complete(cmd, mgmt_status(status));
4631 mgmt_pending_remove(cmd);
4634 hci_dev_unlock(hdev);
4636 /* Handle suspend notifier */
4637 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4638 hdev->suspend_tasks)) {
4639 bt_dev_dbg(hdev, "Unpaused discovery");
4640 wake_up(&hdev->suspend_wait_q);
4644 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4645 uint8_t *mgmt_status)
4648 case DISCOV_TYPE_LE:
4649 *mgmt_status = mgmt_le_support(hdev);
4653 case DISCOV_TYPE_INTERLEAVED:
4654 *mgmt_status = mgmt_le_support(hdev);
4658 case DISCOV_TYPE_BREDR:
4659 *mgmt_status = mgmt_bredr_support(hdev);
4664 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4671 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4672 u16 op, void *data, u16 len)
4674 struct mgmt_cp_start_discovery *cp = data;
4675 struct mgmt_pending_cmd *cmd;
4679 bt_dev_dbg(hdev, "sock %p", sk);
4683 if (!hdev_is_powered(hdev)) {
4684 err = mgmt_cmd_complete(sk, hdev->id, op,
4685 MGMT_STATUS_NOT_POWERED,
4686 &cp->type, sizeof(cp->type));
4690 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4691 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4692 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4693 &cp->type, sizeof(cp->type));
4697 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4698 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4699 &cp->type, sizeof(cp->type));
4703 /* Can't start discovery when it is paused */
4704 if (hdev->discovery_paused) {
4705 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4706 &cp->type, sizeof(cp->type));
4710 /* Clear the discovery filter first to free any previously
4711 * allocated memory for the UUID list.
4713 hci_discovery_filter_clear(hdev);
4715 hdev->discovery.type = cp->type;
4716 hdev->discovery.report_invalid_rssi = false;
4717 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4718 hdev->discovery.limited = true;
4720 hdev->discovery.limited = false;
4722 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4728 cmd->cmd_complete = generic_cmd_complete;
4730 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4731 queue_work(hdev->req_workqueue, &hdev->discov_update);
4735 hci_dev_unlock(hdev);
4739 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4740 void *data, u16 len)
4742 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4746 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4747 void *data, u16 len)
4749 return start_discovery_internal(sk, hdev,
4750 MGMT_OP_START_LIMITED_DISCOVERY,
4754 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4757 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4761 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4762 void *data, u16 len)
4764 struct mgmt_cp_start_service_discovery *cp = data;
4765 struct mgmt_pending_cmd *cmd;
4766 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4767 u16 uuid_count, expected_len;
4771 bt_dev_dbg(hdev, "sock %p", sk);
4775 if (!hdev_is_powered(hdev)) {
4776 err = mgmt_cmd_complete(sk, hdev->id,
4777 MGMT_OP_START_SERVICE_DISCOVERY,
4778 MGMT_STATUS_NOT_POWERED,
4779 &cp->type, sizeof(cp->type));
4783 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4784 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4785 err = mgmt_cmd_complete(sk, hdev->id,
4786 MGMT_OP_START_SERVICE_DISCOVERY,
4787 MGMT_STATUS_BUSY, &cp->type,
4792 uuid_count = __le16_to_cpu(cp->uuid_count);
4793 if (uuid_count > max_uuid_count) {
4794 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4796 err = mgmt_cmd_complete(sk, hdev->id,
4797 MGMT_OP_START_SERVICE_DISCOVERY,
4798 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4803 expected_len = sizeof(*cp) + uuid_count * 16;
4804 if (expected_len != len) {
4805 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4807 err = mgmt_cmd_complete(sk, hdev->id,
4808 MGMT_OP_START_SERVICE_DISCOVERY,
4809 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4814 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4815 err = mgmt_cmd_complete(sk, hdev->id,
4816 MGMT_OP_START_SERVICE_DISCOVERY,
4817 status, &cp->type, sizeof(cp->type));
4821 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4828 cmd->cmd_complete = service_discovery_cmd_complete;
4830 /* Clear the discovery filter first to free any previously
4831 * allocated memory for the UUID list.
4833 hci_discovery_filter_clear(hdev);
4835 hdev->discovery.result_filtering = true;
4836 hdev->discovery.type = cp->type;
4837 hdev->discovery.rssi = cp->rssi;
4838 hdev->discovery.uuid_count = uuid_count;
4840 if (uuid_count > 0) {
4841 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4843 if (!hdev->discovery.uuids) {
4844 err = mgmt_cmd_complete(sk, hdev->id,
4845 MGMT_OP_START_SERVICE_DISCOVERY,
4847 &cp->type, sizeof(cp->type));
4848 mgmt_pending_remove(cmd);
4853 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4854 queue_work(hdev->req_workqueue, &hdev->discov_update);
4858 hci_dev_unlock(hdev);
4862 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4864 struct mgmt_pending_cmd *cmd;
4866 bt_dev_dbg(hdev, "status %d", status);
4870 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4872 cmd->cmd_complete(cmd, mgmt_status(status));
4873 mgmt_pending_remove(cmd);
4876 hci_dev_unlock(hdev);
4878 /* Handle suspend notifier */
4879 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4880 bt_dev_dbg(hdev, "Paused discovery");
4881 wake_up(&hdev->suspend_wait_q);
4885 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4888 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4889 struct mgmt_pending_cmd *cmd;
4892 bt_dev_dbg(hdev, "sock %p", sk);
4896 if (!hci_discovery_active(hdev)) {
4897 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4898 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4899 sizeof(mgmt_cp->type));
4903 if (hdev->discovery.type != mgmt_cp->type) {
4904 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4905 MGMT_STATUS_INVALID_PARAMS,
4906 &mgmt_cp->type, sizeof(mgmt_cp->type));
4910 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4916 cmd->cmd_complete = generic_cmd_complete;
4918 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4919 queue_work(hdev->req_workqueue, &hdev->discov_update);
4923 hci_dev_unlock(hdev);
4927 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4930 struct mgmt_cp_confirm_name *cp = data;
4931 struct inquiry_entry *e;
4934 bt_dev_dbg(hdev, "sock %p", sk);
4938 if (!hci_discovery_active(hdev)) {
4939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4940 MGMT_STATUS_FAILED, &cp->addr,
4945 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4947 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4948 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4953 if (cp->name_known) {
4954 e->name_state = NAME_KNOWN;
4957 e->name_state = NAME_NEEDED;
4958 hci_inquiry_cache_update_resolve(hdev, e);
4961 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4962 &cp->addr, sizeof(cp->addr));
4965 hci_dev_unlock(hdev);
4969 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4972 struct mgmt_cp_block_device *cp = data;
4976 bt_dev_dbg(hdev, "sock %p", sk);
4978 if (!bdaddr_type_is_valid(cp->addr.type))
4979 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4980 MGMT_STATUS_INVALID_PARAMS,
4981 &cp->addr, sizeof(cp->addr));
4985 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4988 status = MGMT_STATUS_FAILED;
4992 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4994 status = MGMT_STATUS_SUCCESS;
4997 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4998 &cp->addr, sizeof(cp->addr));
5000 hci_dev_unlock(hdev);
5005 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5008 struct mgmt_cp_unblock_device *cp = data;
5012 bt_dev_dbg(hdev, "sock %p", sk);
5014 if (!bdaddr_type_is_valid(cp->addr.type))
5015 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5016 MGMT_STATUS_INVALID_PARAMS,
5017 &cp->addr, sizeof(cp->addr));
5021 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5024 status = MGMT_STATUS_INVALID_PARAMS;
5028 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5030 status = MGMT_STATUS_SUCCESS;
5033 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5034 &cp->addr, sizeof(cp->addr));
5036 hci_dev_unlock(hdev);
5041 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5044 struct mgmt_cp_set_device_id *cp = data;
5045 struct hci_request req;
5049 bt_dev_dbg(hdev, "sock %p", sk);
5051 source = __le16_to_cpu(cp->source);
5053 if (source > 0x0002)
5054 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5055 MGMT_STATUS_INVALID_PARAMS);
5059 hdev->devid_source = source;
5060 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5061 hdev->devid_product = __le16_to_cpu(cp->product);
5062 hdev->devid_version = __le16_to_cpu(cp->version);
5064 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5067 hci_req_init(&req, hdev);
5068 __hci_req_update_eir(&req);
5069 hci_req_run(&req, NULL);
5071 hci_dev_unlock(hdev);
5076 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5079 bt_dev_dbg(hdev, "status %d", status);
5082 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5085 struct cmd_lookup match = { NULL, hdev };
5086 struct hci_request req;
5088 struct adv_info *adv_instance;
5094 u8 mgmt_err = mgmt_status(status);
5096 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5097 cmd_status_rsp, &mgmt_err);
5101 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5102 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5104 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5106 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5109 new_settings(hdev, match.sk);
5114 /* Handle suspend notifier */
5115 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5116 hdev->suspend_tasks)) {
5117 bt_dev_dbg(hdev, "Paused advertising");
5118 wake_up(&hdev->suspend_wait_q);
5119 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5120 hdev->suspend_tasks)) {
5121 bt_dev_dbg(hdev, "Unpaused advertising");
5122 wake_up(&hdev->suspend_wait_q);
5125 /* If "Set Advertising" was just disabled and instance advertising was
5126 * set up earlier, then re-enable multi-instance advertising.
5128 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5129 list_empty(&hdev->adv_instances))
5132 instance = hdev->cur_adv_instance;
5134 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5135 struct adv_info, list);
5139 instance = adv_instance->instance;
5142 hci_req_init(&req, hdev);
5144 err = __hci_req_schedule_adv_instance(&req, instance, true);
5147 err = hci_req_run(&req, enable_advertising_instance);
5150 bt_dev_err(hdev, "failed to re-configure advertising");
5153 hci_dev_unlock(hdev);
5156 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5159 struct mgmt_mode *cp = data;
5160 struct mgmt_pending_cmd *cmd;
5161 struct hci_request req;
5165 bt_dev_dbg(hdev, "sock %p", sk);
5167 status = mgmt_le_support(hdev);
5169 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5172 /* Enabling the experimental LL Privay support disables support for
5175 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5176 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5177 MGMT_STATUS_NOT_SUPPORTED);
5179 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5180 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5181 MGMT_STATUS_INVALID_PARAMS);
5183 if (hdev->advertising_paused)
5184 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5191 /* The following conditions are ones which mean that we should
5192 * not do any HCI communication but directly send a mgmt
5193 * response to user space (after toggling the flag if
5196 if (!hdev_is_powered(hdev) ||
5197 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5198 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5199 hci_conn_num(hdev, LE_LINK) > 0 ||
5200 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5201 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5205 hdev->cur_adv_instance = 0x00;
5206 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5207 if (cp->val == 0x02)
5208 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5210 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5212 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5213 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5216 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5221 err = new_settings(hdev, sk);
5226 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5227 pending_find(MGMT_OP_SET_LE, hdev)) {
5228 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5233 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5239 hci_req_init(&req, hdev);
5241 if (cp->val == 0x02)
5242 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5244 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5246 cancel_adv_timeout(hdev);
5249 /* Switch to instance "0" for the Set Advertising setting.
5250 * We cannot use update_[adv|scan_rsp]_data() here as the
5251 * HCI_ADVERTISING flag is not yet set.
5253 hdev->cur_adv_instance = 0x00;
5255 if (ext_adv_capable(hdev)) {
5256 __hci_req_start_ext_adv(&req, 0x00);
5258 __hci_req_update_adv_data(&req, 0x00);
5259 __hci_req_update_scan_rsp_data(&req, 0x00);
5260 __hci_req_enable_advertising(&req);
5263 __hci_req_disable_advertising(&req);
5266 err = hci_req_run(&req, set_advertising_complete);
5268 mgmt_pending_remove(cmd);
5271 hci_dev_unlock(hdev);
5275 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5276 void *data, u16 len)
5278 struct mgmt_cp_set_static_address *cp = data;
5281 bt_dev_dbg(hdev, "sock %p", sk);
5283 if (!lmp_le_capable(hdev))
5284 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5285 MGMT_STATUS_NOT_SUPPORTED);
5287 if (hdev_is_powered(hdev))
5288 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5289 MGMT_STATUS_REJECTED);
5291 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5292 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5293 return mgmt_cmd_status(sk, hdev->id,
5294 MGMT_OP_SET_STATIC_ADDRESS,
5295 MGMT_STATUS_INVALID_PARAMS);
5297 /* Two most significant bits shall be set */
5298 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5299 return mgmt_cmd_status(sk, hdev->id,
5300 MGMT_OP_SET_STATIC_ADDRESS,
5301 MGMT_STATUS_INVALID_PARAMS);
5306 bacpy(&hdev->static_addr, &cp->bdaddr);
5308 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5312 err = new_settings(hdev, sk);
5315 hci_dev_unlock(hdev);
5319 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5320 void *data, u16 len)
5322 struct mgmt_cp_set_scan_params *cp = data;
5323 __u16 interval, window;
5326 bt_dev_dbg(hdev, "sock %p", sk);
5328 if (!lmp_le_capable(hdev))
5329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5330 MGMT_STATUS_NOT_SUPPORTED);
5332 interval = __le16_to_cpu(cp->interval);
5334 if (interval < 0x0004 || interval > 0x4000)
5335 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5336 MGMT_STATUS_INVALID_PARAMS);
5338 window = __le16_to_cpu(cp->window);
5340 if (window < 0x0004 || window > 0x4000)
5341 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5342 MGMT_STATUS_INVALID_PARAMS);
5344 if (window > interval)
5345 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5346 MGMT_STATUS_INVALID_PARAMS);
5350 hdev->le_scan_interval = interval;
5351 hdev->le_scan_window = window;
5353 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5356 /* If background scan is running, restart it so new parameters are
5359 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5360 hdev->discovery.state == DISCOVERY_STOPPED) {
5361 struct hci_request req;
5363 hci_req_init(&req, hdev);
5365 hci_req_add_le_scan_disable(&req, false);
5366 hci_req_add_le_passive_scan(&req);
5368 hci_req_run(&req, NULL);
5371 hci_dev_unlock(hdev);
5376 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5379 struct mgmt_pending_cmd *cmd;
5381 bt_dev_dbg(hdev, "status 0x%02x", status);
5385 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5390 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5391 mgmt_status(status));
5393 struct mgmt_mode *cp = cmd->param;
5396 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5398 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5400 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5401 new_settings(hdev, cmd->sk);
5404 mgmt_pending_remove(cmd);
5407 hci_dev_unlock(hdev);
5410 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5411 void *data, u16 len)
5413 struct mgmt_mode *cp = data;
5414 struct mgmt_pending_cmd *cmd;
5415 struct hci_request req;
5418 bt_dev_dbg(hdev, "sock %p", sk);
5420 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5421 hdev->hci_ver < BLUETOOTH_VER_1_2)
5422 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5423 MGMT_STATUS_NOT_SUPPORTED);
5425 if (cp->val != 0x00 && cp->val != 0x01)
5426 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5427 MGMT_STATUS_INVALID_PARAMS);
5431 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5432 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5437 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5438 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5443 if (!hdev_is_powered(hdev)) {
5444 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5445 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5447 new_settings(hdev, sk);
5451 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5458 hci_req_init(&req, hdev);
5460 __hci_req_write_fast_connectable(&req, cp->val);
5462 err = hci_req_run(&req, fast_connectable_complete);
5464 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5465 MGMT_STATUS_FAILED);
5466 mgmt_pending_remove(cmd);
5470 hci_dev_unlock(hdev);
5475 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5477 struct mgmt_pending_cmd *cmd;
5479 bt_dev_dbg(hdev, "status 0x%02x", status);
5483 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5488 u8 mgmt_err = mgmt_status(status);
5490 /* We need to restore the flag if related HCI commands
5493 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5495 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5497 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5498 new_settings(hdev, cmd->sk);
5501 mgmt_pending_remove(cmd);
5504 hci_dev_unlock(hdev);
5507 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5509 struct mgmt_mode *cp = data;
5510 struct mgmt_pending_cmd *cmd;
5511 struct hci_request req;
5514 bt_dev_dbg(hdev, "sock %p", sk);
5516 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5517 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5518 MGMT_STATUS_NOT_SUPPORTED);
5520 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5521 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5522 MGMT_STATUS_REJECTED);
5524 if (cp->val != 0x00 && cp->val != 0x01)
5525 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5526 MGMT_STATUS_INVALID_PARAMS);
5530 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5531 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5535 if (!hdev_is_powered(hdev)) {
5537 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5538 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5539 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5540 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5541 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5544 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5546 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5550 err = new_settings(hdev, sk);
5554 /* Reject disabling when powered on */
5556 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5557 MGMT_STATUS_REJECTED);
5560 /* When configuring a dual-mode controller to operate
5561 * with LE only and using a static address, then switching
5562 * BR/EDR back on is not allowed.
5564 * Dual-mode controllers shall operate with the public
5565 * address as its identity address for BR/EDR and LE. So
5566 * reject the attempt to create an invalid configuration.
5568 * The same restrictions applies when secure connections
5569 * has been enabled. For BR/EDR this is a controller feature
5570 * while for LE it is a host stack feature. This means that
5571 * switching BR/EDR back on when secure connections has been
5572 * enabled is not a supported transaction.
5574 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5575 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5576 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5577 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5578 MGMT_STATUS_REJECTED);
5583 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5584 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5589 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5595 /* We need to flip the bit already here so that
5596 * hci_req_update_adv_data generates the correct flags.
5598 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5600 hci_req_init(&req, hdev);
5602 __hci_req_write_fast_connectable(&req, false);
5603 __hci_req_update_scan(&req);
5605 /* Since only the advertising data flags will change, there
5606 * is no need to update the scan response data.
5608 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5610 err = hci_req_run(&req, set_bredr_complete);
5612 mgmt_pending_remove(cmd);
5615 hci_dev_unlock(hdev);
5619 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5621 struct mgmt_pending_cmd *cmd;
5622 struct mgmt_mode *cp;
5624 bt_dev_dbg(hdev, "status %u", status);
5628 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5633 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5634 mgmt_status(status));
5642 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5643 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5646 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5647 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5650 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5651 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5655 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5656 new_settings(hdev, cmd->sk);
5659 mgmt_pending_remove(cmd);
5661 hci_dev_unlock(hdev);
5664 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5665 void *data, u16 len)
5667 struct mgmt_mode *cp = data;
5668 struct mgmt_pending_cmd *cmd;
5669 struct hci_request req;
5673 bt_dev_dbg(hdev, "sock %p", sk);
5675 if (!lmp_sc_capable(hdev) &&
5676 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5677 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5678 MGMT_STATUS_NOT_SUPPORTED);
5680 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5681 lmp_sc_capable(hdev) &&
5682 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5683 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5684 MGMT_STATUS_REJECTED);
5686 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5687 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5688 MGMT_STATUS_INVALID_PARAMS);
5692 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5693 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5697 changed = !hci_dev_test_and_set_flag(hdev,
5699 if (cp->val == 0x02)
5700 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5702 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5704 changed = hci_dev_test_and_clear_flag(hdev,
5706 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5709 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5714 err = new_settings(hdev, sk);
5719 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5720 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5727 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5728 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5729 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5733 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5739 hci_req_init(&req, hdev);
5740 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5741 err = hci_req_run(&req, sc_enable_complete);
5743 mgmt_pending_remove(cmd);
5748 hci_dev_unlock(hdev);
5752 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5753 void *data, u16 len)
5755 struct mgmt_mode *cp = data;
5756 bool changed, use_changed;
5759 bt_dev_dbg(hdev, "sock %p", sk);
5761 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5763 MGMT_STATUS_INVALID_PARAMS);
5768 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5770 changed = hci_dev_test_and_clear_flag(hdev,
5771 HCI_KEEP_DEBUG_KEYS);
5773 if (cp->val == 0x02)
5774 use_changed = !hci_dev_test_and_set_flag(hdev,
5775 HCI_USE_DEBUG_KEYS);
5777 use_changed = hci_dev_test_and_clear_flag(hdev,
5778 HCI_USE_DEBUG_KEYS);
5780 if (hdev_is_powered(hdev) && use_changed &&
5781 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5782 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5783 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5784 sizeof(mode), &mode);
5787 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5792 err = new_settings(hdev, sk);
5795 hci_dev_unlock(hdev);
5799 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5802 struct mgmt_cp_set_privacy *cp = cp_data;
5806 bt_dev_dbg(hdev, "sock %p", sk);
5808 if (!lmp_le_capable(hdev))
5809 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5810 MGMT_STATUS_NOT_SUPPORTED);
5812 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5814 MGMT_STATUS_INVALID_PARAMS);
5816 if (hdev_is_powered(hdev))
5817 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5818 MGMT_STATUS_REJECTED);
5822 /* If user space supports this command it is also expected to
5823 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5825 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5828 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5829 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5830 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5831 hci_adv_instances_set_rpa_expired(hdev, true);
5832 if (cp->privacy == 0x02)
5833 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5835 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5837 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5838 memset(hdev->irk, 0, sizeof(hdev->irk));
5839 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5840 hci_adv_instances_set_rpa_expired(hdev, false);
5841 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5844 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5849 err = new_settings(hdev, sk);
5852 hci_dev_unlock(hdev);
5856 static bool irk_is_valid(struct mgmt_irk_info *irk)
5858 switch (irk->addr.type) {
5859 case BDADDR_LE_PUBLIC:
5862 case BDADDR_LE_RANDOM:
5863 /* Two most significant bits shall be set */
5864 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5872 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5875 struct mgmt_cp_load_irks *cp = cp_data;
5876 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5877 sizeof(struct mgmt_irk_info));
5878 u16 irk_count, expected_len;
5881 bt_dev_dbg(hdev, "sock %p", sk);
5883 if (!lmp_le_capable(hdev))
5884 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5885 MGMT_STATUS_NOT_SUPPORTED);
5887 irk_count = __le16_to_cpu(cp->irk_count);
5888 if (irk_count > max_irk_count) {
5889 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5891 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5892 MGMT_STATUS_INVALID_PARAMS);
5895 expected_len = struct_size(cp, irks, irk_count);
5896 if (expected_len != len) {
5897 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5899 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5900 MGMT_STATUS_INVALID_PARAMS);
5903 bt_dev_dbg(hdev, "irk_count %u", irk_count);
5905 for (i = 0; i < irk_count; i++) {
5906 struct mgmt_irk_info *key = &cp->irks[i];
5908 if (!irk_is_valid(key))
5909 return mgmt_cmd_status(sk, hdev->id,
5911 MGMT_STATUS_INVALID_PARAMS);
5916 hci_smp_irks_clear(hdev);
5918 for (i = 0; i < irk_count; i++) {
5919 struct mgmt_irk_info *irk = &cp->irks[i];
5921 if (hci_is_blocked_key(hdev,
5922 HCI_BLOCKED_KEY_TYPE_IRK,
5924 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5929 hci_add_irk(hdev, &irk->addr.bdaddr,
5930 le_addr_type(irk->addr.type), irk->val,
5934 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5936 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5938 hci_dev_unlock(hdev);
5944 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
5945 void *data, u16 len)
5947 struct mgmt_cp_set_advertising_params *cp = data;
5952 BT_DBG("%s", hdev->name);
5954 if (!lmp_le_capable(hdev))
5955 return mgmt_cmd_status(sk, hdev->id,
5956 MGMT_OP_SET_ADVERTISING_PARAMS,
5957 MGMT_STATUS_NOT_SUPPORTED);
5959 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
5960 return mgmt_cmd_status(sk, hdev->id,
5961 MGMT_OP_SET_ADVERTISING_PARAMS,
5964 min_interval = __le16_to_cpu(cp->interval_min);
5965 max_interval = __le16_to_cpu(cp->interval_max);
5967 if (min_interval > max_interval ||
5968 min_interval < 0x0020 || max_interval > 0x4000)
5969 return mgmt_cmd_status(sk, hdev->id,
5970 MGMT_OP_SET_ADVERTISING_PARAMS,
5971 MGMT_STATUS_INVALID_PARAMS);
5975 hdev->le_adv_min_interval = min_interval;
5976 hdev->le_adv_max_interval = max_interval;
5977 hdev->adv_filter_policy = cp->filter_policy;
5978 hdev->adv_type = cp->type;
5980 err = mgmt_cmd_complete(sk, hdev->id,
5981 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
5983 hci_dev_unlock(hdev);
5988 static void set_advertising_data_complete(struct hci_dev *hdev,
5989 u8 status, u16 opcode)
5991 struct mgmt_cp_set_advertising_data *cp;
5992 struct mgmt_pending_cmd *cmd;
5994 BT_DBG("status 0x%02x", status);
5998 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
6005 mgmt_cmd_status(cmd->sk, hdev->id,
6006 MGMT_OP_SET_ADVERTISING_DATA,
6007 mgmt_status(status));
6009 mgmt_cmd_complete(cmd->sk, hdev->id,
6010 MGMT_OP_SET_ADVERTISING_DATA, 0,
6013 mgmt_pending_remove(cmd);
6016 hci_dev_unlock(hdev);
6019 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
6020 void *data, u16 len)
6022 struct mgmt_pending_cmd *cmd;
6023 struct hci_request req;
6024 struct mgmt_cp_set_advertising_data *cp = data;
6025 struct hci_cp_le_set_adv_data adv;
6028 BT_DBG("%s", hdev->name);
6030 if (!lmp_le_capable(hdev)) {
6031 return mgmt_cmd_status(sk, hdev->id,
6032 MGMT_OP_SET_ADVERTISING_DATA,
6033 MGMT_STATUS_NOT_SUPPORTED);
6038 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
6039 err = mgmt_cmd_status(sk, hdev->id,
6040 MGMT_OP_SET_ADVERTISING_DATA,
6045 if (len > HCI_MAX_AD_LENGTH) {
6046 err = mgmt_cmd_status(sk, hdev->id,
6047 MGMT_OP_SET_ADVERTISING_DATA,
6048 MGMT_STATUS_INVALID_PARAMS);
6052 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
6059 hci_req_init(&req, hdev);
6061 memset(&adv, 0, sizeof(adv));
6062 memcpy(adv.data, cp->data, len);
6065 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
6067 err = hci_req_run(&req, set_advertising_data_complete);
6069 mgmt_pending_remove(cmd);
6072 hci_dev_unlock(hdev);
6077 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
6080 struct mgmt_cp_set_scan_rsp_data *cp;
6081 struct mgmt_pending_cmd *cmd;
6083 BT_DBG("status 0x%02x", status);
6087 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
6094 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6095 mgmt_status(status));
6097 mgmt_cmd_complete(cmd->sk, hdev->id,
6098 MGMT_OP_SET_SCAN_RSP_DATA, 0,
6101 mgmt_pending_remove(cmd);
6104 hci_dev_unlock(hdev);
6107 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
6110 struct mgmt_pending_cmd *cmd;
6111 struct hci_request req;
6112 struct mgmt_cp_set_scan_rsp_data *cp = data;
6113 struct hci_cp_le_set_scan_rsp_data rsp;
6116 BT_DBG("%s", hdev->name);
6118 if (!lmp_le_capable(hdev))
6119 return mgmt_cmd_status(sk, hdev->id,
6120 MGMT_OP_SET_SCAN_RSP_DATA,
6121 MGMT_STATUS_NOT_SUPPORTED);
6125 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
6126 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6131 if (len > HCI_MAX_AD_LENGTH) {
6132 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6133 MGMT_STATUS_INVALID_PARAMS);
6137 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
6143 hci_req_init(&req, hdev);
6145 memset(&rsp, 0, sizeof(rsp));
6146 memcpy(rsp.data, cp->data, len);
6149 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
6151 err = hci_req_run(&req, set_scan_rsp_data_complete);
6153 mgmt_pending_remove(cmd);
6156 hci_dev_unlock(hdev);
6161 /* Adv White List feature */
6162 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6164 struct mgmt_cp_add_dev_white_list *cp;
6165 struct mgmt_pending_cmd *cmd;
6167 BT_DBG("status 0x%02x", status);
6171 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
6178 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6179 mgmt_status(status));
6181 mgmt_cmd_complete(cmd->sk, hdev->id,
6182 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
6184 mgmt_pending_remove(cmd);
6187 hci_dev_unlock(hdev);
6190 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
6191 void *data, u16 len)
6193 struct mgmt_pending_cmd *cmd;
6194 struct mgmt_cp_add_dev_white_list *cp = data;
6195 struct hci_request req;
6198 BT_DBG("%s", hdev->name);
6200 if (!lmp_le_capable(hdev))
6201 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6202 MGMT_STATUS_NOT_SUPPORTED);
6204 if (!hdev_is_powered(hdev))
6205 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6206 MGMT_STATUS_REJECTED);
6210 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
6211 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6216 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
6222 hci_req_init(&req, hdev);
6224 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
6226 err = hci_req_run(&req, add_white_list_complete);
6228 mgmt_pending_remove(cmd);
6233 hci_dev_unlock(hdev);
6238 static void remove_from_white_list_complete(struct hci_dev *hdev,
6239 u8 status, u16 opcode)
6241 struct mgmt_cp_remove_dev_from_white_list *cp;
6242 struct mgmt_pending_cmd *cmd;
6244 BT_DBG("status 0x%02x", status);
6248 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
6255 mgmt_cmd_status(cmd->sk, hdev->id,
6256 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6257 mgmt_status(status));
6259 mgmt_cmd_complete(cmd->sk, hdev->id,
6260 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
6263 mgmt_pending_remove(cmd);
6266 hci_dev_unlock(hdev);
6269 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
6270 void *data, u16 len)
6272 struct mgmt_pending_cmd *cmd;
6273 struct mgmt_cp_remove_dev_from_white_list *cp = data;
6274 struct hci_request req;
6277 BT_DBG("%s", hdev->name);
6279 if (!lmp_le_capable(hdev))
6280 return mgmt_cmd_status(sk, hdev->id,
6281 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6282 MGMT_STATUS_NOT_SUPPORTED);
6284 if (!hdev_is_powered(hdev))
6285 return mgmt_cmd_status(sk, hdev->id,
6286 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6287 MGMT_STATUS_REJECTED);
6291 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
6292 err = mgmt_cmd_status(sk, hdev->id,
6293 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6298 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6305 hci_req_init(&req, hdev);
6307 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
6309 err = hci_req_run(&req, remove_from_white_list_complete);
6311 mgmt_pending_remove(cmd);
6316 hci_dev_unlock(hdev);
6321 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
6324 struct mgmt_pending_cmd *cmd;
6326 BT_DBG("status 0x%02x", status);
6330 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
6335 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6336 mgmt_status(status));
6338 mgmt_cmd_complete(cmd->sk, hdev->id,
6339 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6342 mgmt_pending_remove(cmd);
6345 hci_dev_unlock(hdev);
6348 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
6349 void *data, u16 len)
6351 struct mgmt_pending_cmd *cmd;
6352 struct hci_request req;
6355 BT_DBG("%s", hdev->name);
6357 if (!lmp_le_capable(hdev))
6358 return mgmt_cmd_status(sk, hdev->id,
6359 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6360 MGMT_STATUS_NOT_SUPPORTED);
6362 if (!hdev_is_powered(hdev))
6363 return mgmt_cmd_status(sk, hdev->id,
6364 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6365 MGMT_STATUS_REJECTED);
6369 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
6370 err = mgmt_cmd_status(sk, hdev->id,
6371 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6376 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6383 hci_req_init(&req, hdev);
6385 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
6387 err = hci_req_run(&req, clear_white_list_complete);
6389 mgmt_pending_remove(cmd);
6394 hci_dev_unlock(hdev);
6399 static void set_rssi_threshold_complete(struct hci_dev *hdev,
6400 u8 status, u16 opcode)
6402 struct mgmt_pending_cmd *cmd;
6404 BT_DBG("status 0x%02x", status);
6408 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6413 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6414 mgmt_status(status));
6416 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6419 mgmt_pending_remove(cmd);
6422 hci_dev_unlock(hdev);
6425 static void set_rssi_disable_complete(struct hci_dev *hdev,
6426 u8 status, u16 opcode)
6428 struct mgmt_pending_cmd *cmd;
6430 BT_DBG("status 0x%02x", status);
6434 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6439 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6440 mgmt_status(status));
6442 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6445 mgmt_pending_remove(cmd);
6448 hci_dev_unlock(hdev);
6451 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
6452 void *data, u16 len)
6455 struct hci_cp_set_rssi_threshold th = { 0, };
6456 struct mgmt_cp_set_enable_rssi *cp = data;
6457 struct hci_conn *conn;
6458 struct mgmt_pending_cmd *cmd;
6459 struct hci_request req;
6464 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6466 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6467 MGMT_STATUS_FAILED);
6471 if (!lmp_le_capable(hdev)) {
6472 mgmt_pending_remove(cmd);
6473 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6474 MGMT_STATUS_NOT_SUPPORTED);
6478 if (!hdev_is_powered(hdev)) {
6479 BT_DBG("%s", hdev->name);
6480 mgmt_pending_remove(cmd);
6481 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6482 MGMT_STATUS_NOT_POWERED);
6486 if (cp->link_type == 0x01)
6487 dest_type = LE_LINK;
6489 dest_type = ACL_LINK;
6491 /* Get LE/ACL link handle info */
6492 conn = hci_conn_hash_lookup_ba(hdev,
6493 dest_type, &cp->bdaddr);
6496 err = mgmt_cmd_complete(sk, hdev->id,
6497 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
6498 mgmt_pending_remove(cmd);
6502 hci_req_init(&req, hdev);
6504 th.hci_le_ext_opcode = 0x0B;
6506 th.conn_handle = conn->handle;
6507 th.alert_mask = 0x07;
6508 th.low_th = cp->low_th;
6509 th.in_range_th = cp->in_range_th;
6510 th.high_th = cp->high_th;
6512 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6513 err = hci_req_run(&req, set_rssi_threshold_complete);
6516 mgmt_pending_remove(cmd);
6517 BT_ERR("Error in requesting hci_req_run");
6522 hci_dev_unlock(hdev);
6526 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
6527 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6529 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
6530 struct mgmt_cp_set_enable_rssi *cp = data;
6531 struct mgmt_pending_cmd *cmd;
6536 mgmt_rp.status = rp->status;
6537 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6538 mgmt_rp.bt_address = cp->bdaddr;
6539 mgmt_rp.link_type = cp->link_type;
6541 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6542 MGMT_STATUS_SUCCESS, &mgmt_rp,
6543 sizeof(struct mgmt_cc_rsp_enable_rssi));
6545 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
6546 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6548 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
6549 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6550 &mgmt_rp.bt_address, true);
6554 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6556 mgmt_pending_remove(cmd);
6558 hci_dev_unlock(hdev);
6561 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
6562 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6564 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
6565 struct mgmt_cp_disable_rssi *cp = data;
6566 struct mgmt_pending_cmd *cmd;
6571 mgmt_rp.status = rp->status;
6572 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6573 mgmt_rp.bt_address = cp->bdaddr;
6574 mgmt_rp.link_type = cp->link_type;
6576 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6577 MGMT_STATUS_SUCCESS, &mgmt_rp,
6578 sizeof(struct mgmt_cc_rsp_enable_rssi));
6580 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
6581 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6583 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6584 &mgmt_rp.bt_address, false);
6588 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6590 mgmt_pending_remove(cmd);
6592 hci_dev_unlock(hdev);
6595 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
6596 void *data, u16 len)
6598 struct mgmt_pending_cmd *cmd;
6599 struct hci_request req;
6600 struct hci_cp_set_enable_rssi cp_en = { 0, };
6603 BT_DBG("Set Disable RSSI.");
6605 cp_en.hci_le_ext_opcode = 0x01;
6606 cp_en.le_enable_cs_Features = 0x00;
6607 cp_en.data[0] = 0x00;
6608 cp_en.data[1] = 0x00;
6609 cp_en.data[2] = 0x00;
6613 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6615 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6616 MGMT_STATUS_FAILED);
6620 if (!lmp_le_capable(hdev)) {
6621 mgmt_pending_remove(cmd);
6622 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6623 MGMT_STATUS_NOT_SUPPORTED);
6627 if (!hdev_is_powered(hdev)) {
6628 BT_DBG("%s", hdev->name);
6629 mgmt_pending_remove(cmd);
6630 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6631 MGMT_STATUS_NOT_POWERED);
6635 hci_req_init(&req, hdev);
6637 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6638 sizeof(struct hci_cp_set_enable_rssi),
6639 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6640 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6642 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6643 err = hci_req_run(&req, set_rssi_disable_complete);
6646 mgmt_pending_remove(cmd);
6647 BT_ERR("Error in requesting hci_req_run");
6652 hci_dev_unlock(hdev);
6656 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
6658 struct hci_cc_rsp_enable_rssi *rp = response;
6659 struct mgmt_pending_cmd *cmd_enable = NULL;
6660 struct mgmt_pending_cmd *cmd_disable = NULL;
6661 struct mgmt_cp_set_enable_rssi *cp_en;
6662 struct mgmt_cp_disable_rssi *cp_dis;
6665 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6666 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6667 hci_dev_unlock(hdev);
6670 BT_DBG("Enable Request");
6673 BT_DBG("Disable Request");
6676 cp_en = cmd_enable->param;
6681 switch (rp->le_ext_opcode) {
6683 BT_DBG("RSSI enabled.. Setting Threshold...");
6684 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
6685 cp_en, sizeof(*cp_en));
6689 BT_DBG("Sending RSSI enable success");
6690 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
6691 cp_en, rp, rp->status);
6695 } else if (cmd_disable) {
6696 cp_dis = cmd_disable->param;
6701 switch (rp->le_ext_opcode) {
6703 BT_DBG("Sending RSSI disable success");
6704 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6705 cp_dis, rp, rp->status);
6710 * Only unset RSSI Threshold values for the Link if
6711 * RSSI is monitored for other BREDR or LE Links
6713 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
6714 BT_DBG("Unset Threshold. Other links being monitored");
6715 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6716 cp_dis, rp, rp->status);
6718 BT_DBG("Unset Threshold. Disabling...");
6719 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
6720 cp_dis, sizeof(*cp_dis));
6727 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
6730 struct mgmt_pending_cmd *cmd;
6732 BT_DBG("status 0x%02x", status);
6736 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6741 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6742 mgmt_status(status));
6744 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6747 mgmt_pending_remove(cmd);
6750 hci_dev_unlock(hdev);
6753 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
6754 void *data, u16 len)
6756 struct mgmt_pending_cmd *cmd;
6757 struct hci_request req;
6758 struct mgmt_cp_set_enable_rssi *cp = data;
6759 struct hci_cp_set_enable_rssi cp_en = { 0, };
6762 BT_DBG("Set Enable RSSI.");
6764 cp_en.hci_le_ext_opcode = 0x01;
6765 cp_en.le_enable_cs_Features = 0x04;
6766 cp_en.data[0] = 0x00;
6767 cp_en.data[1] = 0x00;
6768 cp_en.data[2] = 0x00;
6772 if (!lmp_le_capable(hdev)) {
6773 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6774 MGMT_STATUS_NOT_SUPPORTED);
6778 if (!hdev_is_powered(hdev)) {
6779 BT_DBG("%s", hdev->name);
6780 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6781 MGMT_STATUS_NOT_POWERED);
6785 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
6786 BT_DBG("%s", hdev->name);
6787 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6792 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
6795 BT_DBG("%s", hdev->name);
6800 /* If RSSI is already enabled directly set Threshold values */
6801 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
6802 hci_dev_unlock(hdev);
6803 BT_DBG("RSSI Enabled. Directly set Threshold");
6804 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
6808 hci_req_init(&req, hdev);
6810 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6811 sizeof(struct hci_cp_set_enable_rssi),
6812 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6813 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6815 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6816 err = hci_req_run(&req, set_rssi_enable_complete);
6819 mgmt_pending_remove(cmd);
6820 BT_ERR("Error in requesting hci_req_run");
6825 hci_dev_unlock(hdev);
6830 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6832 struct mgmt_pending_cmd *cmd;
6834 BT_DBG("status 0x%02x", status);
6838 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
6842 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6843 MGMT_STATUS_SUCCESS, &status, 1);
6845 mgmt_pending_remove(cmd);
6848 hci_dev_unlock(hdev);
6851 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
6854 struct mgmt_pending_cmd *cmd;
6855 struct hci_request req;
6856 struct mgmt_cp_get_raw_rssi *cp = data;
6857 struct hci_cp_get_raw_rssi hci_cp;
6859 struct hci_conn *conn;
6863 BT_DBG("Get Raw RSSI.");
6867 if (!lmp_le_capable(hdev)) {
6868 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6869 MGMT_STATUS_NOT_SUPPORTED);
6873 if (cp->link_type == 0x01)
6874 dest_type = LE_LINK;
6876 dest_type = ACL_LINK;
6878 /* Get LE/BREDR link handle info */
6879 conn = hci_conn_hash_lookup_ba(hdev,
6880 dest_type, &cp->bt_address);
6882 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6883 MGMT_STATUS_NOT_CONNECTED);
6886 hci_cp.conn_handle = conn->handle;
6888 if (!hdev_is_powered(hdev)) {
6889 BT_DBG("%s", hdev->name);
6890 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6891 MGMT_STATUS_NOT_POWERED);
6895 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
6896 BT_DBG("%s", hdev->name);
6897 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6902 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
6904 BT_DBG("%s", hdev->name);
6909 hci_req_init(&req, hdev);
6911 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
6912 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
6913 err = hci_req_run(&req, get_raw_rssi_complete);
6916 mgmt_pending_remove(cmd);
6917 BT_ERR("Error in requesting hci_req_run");
6921 hci_dev_unlock(hdev);
6926 void mgmt_raw_rssi_response(struct hci_dev *hdev,
6927 struct hci_cc_rp_get_raw_rssi *rp, int success)
6929 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
6930 struct hci_conn *conn;
6932 mgmt_rp.status = rp->status;
6933 mgmt_rp.rssi_dbm = rp->rssi_dbm;
6935 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
6939 bacpy(&mgmt_rp.bt_address, &conn->dst);
6940 if (conn->type == LE_LINK)
6941 mgmt_rp.link_type = 0x01;
6943 mgmt_rp.link_type = 0x00;
6945 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
6946 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
6949 static void set_disable_threshold_complete(struct hci_dev *hdev,
6950 u8 status, u16 opcode)
6952 struct mgmt_pending_cmd *cmd;
6954 BT_DBG("status 0x%02x", status);
6958 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6962 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6963 MGMT_STATUS_SUCCESS, &status, 1);
6965 mgmt_pending_remove(cmd);
6968 hci_dev_unlock(hdev);
6971 /** Removes monitoring for a link*/
6972 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
6973 void *data, u16 len)
6976 struct hci_cp_set_rssi_threshold th = { 0, };
6977 struct mgmt_cp_disable_rssi *cp = data;
6978 struct hci_conn *conn;
6979 struct mgmt_pending_cmd *cmd;
6980 struct hci_request req;
6983 BT_DBG("Set Disable RSSI.");
6987 if (!lmp_le_capable(hdev)) {
6988 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6989 MGMT_STATUS_NOT_SUPPORTED);
6993 /* Get LE/ACL link handle info*/
6994 if (cp->link_type == 0x01)
6995 dest_type = LE_LINK;
6997 dest_type = ACL_LINK;
6999 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
7001 err = mgmt_cmd_complete(sk, hdev->id,
7002 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
7006 th.hci_le_ext_opcode = 0x0B;
7008 th.conn_handle = conn->handle;
7009 th.alert_mask = 0x00;
7011 th.in_range_th = 0x00;
7014 if (!hdev_is_powered(hdev)) {
7015 BT_DBG("%s", hdev->name);
7016 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7021 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
7022 BT_DBG("%s", hdev->name);
7023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7028 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
7031 BT_DBG("%s", hdev->name);
7036 hci_req_init(&req, hdev);
7038 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7039 err = hci_req_run(&req, set_disable_threshold_complete);
7041 mgmt_pending_remove(cmd);
7042 BT_ERR("Error in requesting hci_req_run");
7047 hci_dev_unlock(hdev);
7052 void mgmt_rssi_alert_evt(struct hci_dev *hdev, struct sk_buff *skb)
7054 struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
7055 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
7056 struct hci_conn *conn;
7058 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
7059 ev->conn_handle, ev->alert_type, ev->rssi_dbm);
7061 conn = hci_conn_hash_lookup_handle(hdev, ev->conn_handle);
7064 BT_ERR("RSSI alert Error: Device not found for handle");
7067 bacpy(&mgmt_ev.bdaddr, &conn->dst);
7069 if (conn->type == LE_LINK)
7070 mgmt_ev.link_type = 0x01;
7072 mgmt_ev.link_type = 0x00;
7074 mgmt_ev.alert_type = ev->alert_type;
7075 mgmt_ev.rssi_dbm = ev->rssi_dbm;
7077 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
7078 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
7082 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
7084 struct mgmt_pending_cmd *cmd;
7088 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7090 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7094 type = hdev->le_discovery.type;
7096 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7097 mgmt_status(status), &type, sizeof(type));
7098 mgmt_pending_remove(cmd);
7103 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
7106 unsigned long timeout = 0;
7108 BT_DBG("status %d", status);
7112 mgmt_start_le_discovery_failed(hdev, status);
7113 hci_dev_unlock(hdev);
7118 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
7119 hci_dev_unlock(hdev);
7121 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
7122 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
7127 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
7130 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
7131 void *data, u16 len)
7133 struct mgmt_cp_start_le_discovery *cp = data;
7134 struct mgmt_pending_cmd *cmd;
7135 struct hci_cp_le_set_scan_param param_cp;
7136 struct hci_cp_le_set_scan_enable enable_cp;
7137 struct hci_request req;
7138 u8 status, own_addr_type;
7141 BT_DBG("%s", hdev->name);
7145 if (!hdev_is_powered(hdev)) {
7146 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7147 MGMT_STATUS_NOT_POWERED);
7151 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
7152 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7157 if (cp->type != DISCOV_TYPE_LE) {
7158 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7159 MGMT_STATUS_INVALID_PARAMS);
7163 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
7169 hdev->le_discovery.type = cp->type;
7171 hci_req_init(&req, hdev);
7173 status = mgmt_le_support(hdev);
7175 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7177 mgmt_pending_remove(cmd);
7181 /* If controller is scanning, it means the background scanning
7182 * is running. Thus, we should temporarily stop it in order to
7183 * set the discovery scanning parameters.
7185 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
7186 hci_req_add_le_scan_disable(&req, false);
7188 memset(¶m_cp, 0, sizeof(param_cp));
7190 /* All active scans will be done with either a resolvable
7191 * private address (when privacy feature has been enabled)
7192 * or unresolvable private address.
7194 err = hci_update_random_address(&req, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
7196 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7197 MGMT_STATUS_FAILED);
7198 mgmt_pending_remove(cmd);
7202 param_cp.type = hdev->le_scan_type;
7203 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
7204 param_cp.window = cpu_to_le16(hdev->le_scan_window);
7205 param_cp.own_address_type = own_addr_type;
7206 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
7209 memset(&enable_cp, 0, sizeof(enable_cp));
7210 enable_cp.enable = LE_SCAN_ENABLE;
7211 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
7213 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
7216 err = hci_req_run(&req, start_le_discovery_complete);
7218 mgmt_pending_remove(cmd);
7220 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
7223 hci_dev_unlock(hdev);
7227 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
7229 struct mgmt_pending_cmd *cmd;
7232 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7236 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7237 mgmt_status(status), &hdev->le_discovery.type,
7238 sizeof(hdev->le_discovery.type));
7239 mgmt_pending_remove(cmd);
7244 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
7247 BT_DBG("status %d", status);
7252 mgmt_stop_le_discovery_failed(hdev, status);
7256 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7259 hci_dev_unlock(hdev);
7262 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
7263 void *data, u16 len)
7265 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
7266 struct mgmt_pending_cmd *cmd;
7267 struct hci_request req;
7270 BT_DBG("%s", hdev->name);
7274 if (!hci_le_discovery_active(hdev)) {
7275 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7276 MGMT_STATUS_REJECTED, &mgmt_cp->type,
7277 sizeof(mgmt_cp->type));
7281 if (hdev->le_discovery.type != mgmt_cp->type) {
7282 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7283 MGMT_STATUS_INVALID_PARAMS,
7284 &mgmt_cp->type, sizeof(mgmt_cp->type));
7288 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
7294 hci_req_init(&req, hdev);
7296 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
7297 BT_DBG("unknown le discovery state %u",
7298 hdev->le_discovery.state);
7300 mgmt_pending_remove(cmd);
7301 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7302 MGMT_STATUS_FAILED, &mgmt_cp->type,
7303 sizeof(mgmt_cp->type));
7307 cancel_delayed_work(&hdev->le_scan_disable);
7308 hci_req_add_le_scan_disable(&req, false);
7310 err = hci_req_run(&req, stop_le_discovery_complete);
7312 mgmt_pending_remove(cmd);
7314 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
7317 hci_dev_unlock(hdev);
7321 /* Separate LE discovery */
7322 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
7324 struct mgmt_ev_discovering ev;
7325 struct mgmt_pending_cmd *cmd;
7327 BT_DBG("%s le discovering %u", hdev->name, discovering);
7330 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7332 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7335 u8 type = hdev->le_discovery.type;
7337 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
7339 mgmt_pending_remove(cmd);
7342 memset(&ev, 0, sizeof(ev));
7343 ev.type = hdev->le_discovery.type;
7344 ev.discovering = discovering;
7346 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7349 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
7350 void *data, u16 len)
7354 BT_DBG("%s", hdev->name);
7358 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
7360 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
7362 hci_dev_unlock(hdev);
7367 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
7372 if (min > max || min < 6 || max > 3200)
7375 if (to_multiplier < 10 || to_multiplier > 3200)
7378 if (max >= to_multiplier * 8)
7381 max_latency = (to_multiplier * 8 / max) - 1;
7383 if (latency > 499 || latency > max_latency)
7389 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
7392 struct mgmt_cp_le_conn_update *cp = data;
7394 struct hci_conn *conn;
7395 u16 min, max, latency, supervision_timeout;
7398 if (!hdev_is_powered(hdev))
7399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7400 MGMT_STATUS_NOT_POWERED);
7402 min = __le16_to_cpu(cp->conn_interval_min);
7403 max = __le16_to_cpu(cp->conn_interval_max);
7404 latency = __le16_to_cpu(cp->conn_latency);
7405 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
7407 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
7408 min, max, latency, supervision_timeout);
7410 err = check_le_conn_update_param(min, max, latency,
7411 supervision_timeout);
7414 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7415 MGMT_STATUS_INVALID_PARAMS);
7419 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
7421 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7422 MGMT_STATUS_NOT_CONNECTED);
7423 hci_dev_unlock(hdev);
7427 hci_dev_unlock(hdev);
7429 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
7431 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
7435 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
7438 struct mgmt_cp_set_manufacturer_data *cp;
7439 struct mgmt_pending_cmd *cmd;
7441 BT_DBG("status 0x%02x", status);
7445 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
7452 mgmt_cmd_status(cmd->sk, hdev->id,
7453 MGMT_OP_SET_MANUFACTURER_DATA,
7454 mgmt_status(status));
7456 mgmt_cmd_complete(cmd->sk, hdev->id,
7457 MGMT_OP_SET_MANUFACTURER_DATA, 0,
7460 mgmt_pending_remove(cmd);
7463 hci_dev_unlock(hdev);
7466 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
7467 void *data, u16 len)
7469 struct mgmt_pending_cmd *cmd;
7470 struct hci_request req;
7471 struct mgmt_cp_set_manufacturer_data *cp = data;
7472 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
7476 BT_DBG("%s", hdev->name);
7478 if (!lmp_bredr_capable(hdev))
7479 return mgmt_cmd_status(sk, hdev->id,
7480 MGMT_OP_SET_MANUFACTURER_DATA,
7481 MGMT_STATUS_NOT_SUPPORTED);
7483 if (cp->data[0] == 0 ||
7484 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
7485 return mgmt_cmd_status(sk, hdev->id,
7486 MGMT_OP_SET_MANUFACTURER_DATA,
7487 MGMT_STATUS_INVALID_PARAMS);
7489 if (cp->data[1] != 0xFF)
7490 return mgmt_cmd_status(sk, hdev->id,
7491 MGMT_OP_SET_MANUFACTURER_DATA,
7492 MGMT_STATUS_NOT_SUPPORTED);
7496 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
7497 err = mgmt_cmd_status(sk, hdev->id,
7498 MGMT_OP_SET_MANUFACTURER_DATA,
7503 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
7510 hci_req_init(&req, hdev);
7512 /* if new data is same as previous data then return command
7515 if (hdev->manufacturer_len == cp->data[0] - 1 &&
7516 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
7517 mgmt_pending_remove(cmd);
7518 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
7519 0, cp, sizeof(*cp));
7524 old_len = hdev->manufacturer_len;
7526 memcpy(old_data, hdev->manufacturer_data, old_len);
7528 hdev->manufacturer_len = cp->data[0] - 1;
7529 if (hdev->manufacturer_len > 0)
7530 memcpy(hdev->manufacturer_data, cp->data + 2,
7531 hdev->manufacturer_len);
7533 __hci_req_update_eir(&req);
7535 err = hci_req_run(&req, set_manufacturer_data_complete);
7537 mgmt_pending_remove(cmd);
7542 hci_dev_unlock(hdev);
7547 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
7548 hdev->manufacturer_len = old_len;
7549 if (hdev->manufacturer_len > 0)
7550 memcpy(hdev->manufacturer_data, old_data,
7551 hdev->manufacturer_len);
7552 hci_dev_unlock(hdev);
7556 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
7557 void *data, u16 len)
7559 struct mgmt_cp_le_set_scan_params *cp = data;
7560 __u16 interval, window;
7563 BT_DBG("%s", hdev->name);
7565 if (!lmp_le_capable(hdev))
7566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7567 MGMT_STATUS_NOT_SUPPORTED);
7569 interval = __le16_to_cpu(cp->interval);
7571 if (interval < 0x0004 || interval > 0x4000)
7572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7573 MGMT_STATUS_INVALID_PARAMS);
7575 window = __le16_to_cpu(cp->window);
7577 if (window < 0x0004 || window > 0x4000)
7578 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7579 MGMT_STATUS_INVALID_PARAMS);
7581 if (window > interval)
7582 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7583 MGMT_STATUS_INVALID_PARAMS);
7587 hdev->le_scan_type = cp->type;
7588 hdev->le_scan_interval = interval;
7589 hdev->le_scan_window = window;
7591 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
7594 /* If background scan is running, restart it so new parameters are
7597 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
7598 hdev->discovery.state == DISCOVERY_STOPPED) {
7599 struct hci_request req;
7601 hci_req_init(&req, hdev);
7603 hci_req_add_le_scan_disable(&req, false);
7604 hci_req_add_le_passive_scan(&req);
7606 hci_req_run(&req, NULL);
7609 hci_dev_unlock(hdev);
7614 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
7616 struct mgmt_ev_hardware_error ev;
7618 ev.error_code = err_code;
7619 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
7621 #endif /* TIZEN_BT */
7623 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7625 if (key->master != 0x00 && key->master != 0x01)
7628 switch (key->addr.type) {
7629 case BDADDR_LE_PUBLIC:
7632 case BDADDR_LE_RANDOM:
7633 /* Two most significant bits shall be set */
7634 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7642 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7643 void *cp_data, u16 len)
7645 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7646 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7647 sizeof(struct mgmt_ltk_info));
7648 u16 key_count, expected_len;
7651 bt_dev_dbg(hdev, "sock %p", sk);
7653 if (!lmp_le_capable(hdev))
7654 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7655 MGMT_STATUS_NOT_SUPPORTED);
7657 key_count = __le16_to_cpu(cp->key_count);
7658 if (key_count > max_key_count) {
7659 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7661 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7662 MGMT_STATUS_INVALID_PARAMS);
7665 expected_len = struct_size(cp, keys, key_count);
7666 if (expected_len != len) {
7667 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7669 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7670 MGMT_STATUS_INVALID_PARAMS);
7673 bt_dev_dbg(hdev, "key_count %u", key_count);
7675 for (i = 0; i < key_count; i++) {
7676 struct mgmt_ltk_info *key = &cp->keys[i];
7678 if (!ltk_is_valid(key))
7679 return mgmt_cmd_status(sk, hdev->id,
7680 MGMT_OP_LOAD_LONG_TERM_KEYS,
7681 MGMT_STATUS_INVALID_PARAMS);
7686 hci_smp_ltks_clear(hdev);
7688 for (i = 0; i < key_count; i++) {
7689 struct mgmt_ltk_info *key = &cp->keys[i];
7690 u8 type, authenticated;
7692 if (hci_is_blocked_key(hdev,
7693 HCI_BLOCKED_KEY_TYPE_LTK,
7695 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7700 switch (key->type) {
7701 case MGMT_LTK_UNAUTHENTICATED:
7702 authenticated = 0x00;
7703 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
7705 case MGMT_LTK_AUTHENTICATED:
7706 authenticated = 0x01;
7707 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
7709 case MGMT_LTK_P256_UNAUTH:
7710 authenticated = 0x00;
7711 type = SMP_LTK_P256;
7713 case MGMT_LTK_P256_AUTH:
7714 authenticated = 0x01;
7715 type = SMP_LTK_P256;
7717 case MGMT_LTK_P256_DEBUG:
7718 authenticated = 0x00;
7719 type = SMP_LTK_P256_DEBUG;
7725 hci_add_ltk(hdev, &key->addr.bdaddr,
7726 le_addr_type(key->addr.type), type, authenticated,
7727 key->val, key->enc_size, key->ediv, key->rand);
7730 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7733 hci_dev_unlock(hdev);
7738 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
7740 struct hci_conn *conn = cmd->user_data;
7741 struct mgmt_rp_get_conn_info rp;
7744 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
7746 if (status == MGMT_STATUS_SUCCESS) {
7747 rp.rssi = conn->rssi;
7748 rp.tx_power = conn->tx_power;
7749 rp.max_tx_power = conn->max_tx_power;
7751 rp.rssi = HCI_RSSI_INVALID;
7752 rp.tx_power = HCI_TX_POWER_INVALID;
7753 rp.max_tx_power = HCI_TX_POWER_INVALID;
7756 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
7757 status, &rp, sizeof(rp));
7759 hci_conn_drop(conn);
7765 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
7768 struct hci_cp_read_rssi *cp;
7769 struct mgmt_pending_cmd *cmd;
7770 struct hci_conn *conn;
7774 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
7778 /* Commands sent in request are either Read RSSI or Read Transmit Power
7779 * Level so we check which one was last sent to retrieve connection
7780 * handle. Both commands have handle as first parameter so it's safe to
7781 * cast data on the same command struct.
7783 * First command sent is always Read RSSI and we fail only if it fails.
7784 * In other case we simply override error to indicate success as we
7785 * already remembered if TX power value is actually valid.
7787 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
7789 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
7790 status = MGMT_STATUS_SUCCESS;
7792 status = mgmt_status(hci_status);
7796 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
7800 handle = __le16_to_cpu(cp->handle);
7801 conn = hci_conn_hash_lookup_handle(hdev, handle);
7803 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
7808 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
7812 cmd->cmd_complete(cmd, status);
7813 mgmt_pending_remove(cmd);
7816 hci_dev_unlock(hdev);
7819 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7822 struct mgmt_cp_get_conn_info *cp = data;
7823 struct mgmt_rp_get_conn_info rp;
7824 struct hci_conn *conn;
7825 unsigned long conn_info_age;
7828 bt_dev_dbg(hdev, "sock %p", sk);
7830 memset(&rp, 0, sizeof(rp));
7831 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7832 rp.addr.type = cp->addr.type;
7834 if (!bdaddr_type_is_valid(cp->addr.type))
7835 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7836 MGMT_STATUS_INVALID_PARAMS,
7841 if (!hdev_is_powered(hdev)) {
7842 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7843 MGMT_STATUS_NOT_POWERED, &rp,
7848 if (cp->addr.type == BDADDR_BREDR)
7849 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7852 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7854 if (!conn || conn->state != BT_CONNECTED) {
7855 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7856 MGMT_STATUS_NOT_CONNECTED, &rp,
7861 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
7862 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7863 MGMT_STATUS_BUSY, &rp, sizeof(rp));
7867 /* To avoid client trying to guess when to poll again for information we
7868 * calculate conn info age as random value between min/max set in hdev.
7870 conn_info_age = hdev->conn_info_min_age +
7871 prandom_u32_max(hdev->conn_info_max_age -
7872 hdev->conn_info_min_age);
7874 /* Query controller to refresh cached values if they are too old or were
7877 if (time_after(jiffies, conn->conn_info_timestamp +
7878 msecs_to_jiffies(conn_info_age)) ||
7879 !conn->conn_info_timestamp) {
7880 struct hci_request req;
7881 struct hci_cp_read_tx_power req_txp_cp;
7882 struct hci_cp_read_rssi req_rssi_cp;
7883 struct mgmt_pending_cmd *cmd;
7885 hci_req_init(&req, hdev);
7886 req_rssi_cp.handle = cpu_to_le16(conn->handle);
7887 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
7890 /* For LE links TX power does not change thus we don't need to
7891 * query for it once value is known.
7893 if (!bdaddr_type_is_le(cp->addr.type) ||
7894 conn->tx_power == HCI_TX_POWER_INVALID) {
7895 req_txp_cp.handle = cpu_to_le16(conn->handle);
7896 req_txp_cp.type = 0x00;
7897 hci_req_add(&req, HCI_OP_READ_TX_POWER,
7898 sizeof(req_txp_cp), &req_txp_cp);
7901 /* Max TX power needs to be read only once per connection */
7902 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
7903 req_txp_cp.handle = cpu_to_le16(conn->handle);
7904 req_txp_cp.type = 0x01;
7905 hci_req_add(&req, HCI_OP_READ_TX_POWER,
7906 sizeof(req_txp_cp), &req_txp_cp);
7909 err = hci_req_run(&req, conn_info_refresh_complete);
7913 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
7920 hci_conn_hold(conn);
7921 cmd->user_data = hci_conn_get(conn);
7922 cmd->cmd_complete = conn_info_cmd_complete;
7924 conn->conn_info_timestamp = jiffies;
7926 /* Cache is valid, just reply with values cached in hci_conn */
7927 rp.rssi = conn->rssi;
7928 rp.tx_power = conn->tx_power;
7929 rp.max_tx_power = conn->max_tx_power;
7931 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7932 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7936 hci_dev_unlock(hdev);
7940 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
7942 struct hci_conn *conn = cmd->user_data;
7943 struct mgmt_rp_get_clock_info rp;
7944 struct hci_dev *hdev;
7947 memset(&rp, 0, sizeof(rp));
7948 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
7953 hdev = hci_dev_get(cmd->index);
7955 rp.local_clock = cpu_to_le32(hdev->clock);
7960 rp.piconet_clock = cpu_to_le32(conn->clock);
7961 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7965 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7969 hci_conn_drop(conn);
7976 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7978 struct hci_cp_read_clock *hci_cp;
7979 struct mgmt_pending_cmd *cmd;
7980 struct hci_conn *conn;
7982 bt_dev_dbg(hdev, "status %u", status);
7986 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
7990 if (hci_cp->which) {
7991 u16 handle = __le16_to_cpu(hci_cp->handle);
7992 conn = hci_conn_hash_lookup_handle(hdev, handle);
7997 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
8001 cmd->cmd_complete(cmd, mgmt_status(status));
8002 mgmt_pending_remove(cmd);
8005 hci_dev_unlock(hdev);
8008 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
8011 struct mgmt_cp_get_clock_info *cp = data;
8012 struct mgmt_rp_get_clock_info rp;
8013 struct hci_cp_read_clock hci_cp;
8014 struct mgmt_pending_cmd *cmd;
8015 struct hci_request req;
8016 struct hci_conn *conn;
8019 bt_dev_dbg(hdev, "sock %p", sk);
8021 memset(&rp, 0, sizeof(rp));
8022 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8023 rp.addr.type = cp->addr.type;
8025 if (cp->addr.type != BDADDR_BREDR)
8026 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8027 MGMT_STATUS_INVALID_PARAMS,
8032 if (!hdev_is_powered(hdev)) {
8033 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8034 MGMT_STATUS_NOT_POWERED, &rp,
8039 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8040 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8042 if (!conn || conn->state != BT_CONNECTED) {
8043 err = mgmt_cmd_complete(sk, hdev->id,
8044 MGMT_OP_GET_CLOCK_INFO,
8045 MGMT_STATUS_NOT_CONNECTED,
8053 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
8059 cmd->cmd_complete = clock_info_cmd_complete;
8061 hci_req_init(&req, hdev);
8063 memset(&hci_cp, 0, sizeof(hci_cp));
8064 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8067 hci_conn_hold(conn);
8068 cmd->user_data = hci_conn_get(conn);
8070 hci_cp.handle = cpu_to_le16(conn->handle);
8071 hci_cp.which = 0x01; /* Piconet clock */
8072 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8075 err = hci_req_run(&req, get_clock_info_complete);
8077 mgmt_pending_remove(cmd);
8080 hci_dev_unlock(hdev);
8084 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
8086 struct hci_conn *conn;
8088 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
8092 if (conn->dst_type != type)
8095 if (conn->state != BT_CONNECTED)
8101 /* This function requires the caller holds hdev->lock */
8102 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
8103 u8 addr_type, u8 auto_connect)
8105 struct hci_conn_params *params;
8107 params = hci_conn_params_add(hdev, addr, addr_type);
8111 if (params->auto_connect == auto_connect)
8114 list_del_init(¶ms->action);
8116 switch (auto_connect) {
8117 case HCI_AUTO_CONN_DISABLED:
8118 case HCI_AUTO_CONN_LINK_LOSS:
8119 /* If auto connect is being disabled when we're trying to
8120 * connect to device, keep connecting.
8122 if (params->explicit_connect)
8123 list_add(¶ms->action, &hdev->pend_le_conns);
8125 case HCI_AUTO_CONN_REPORT:
8126 if (params->explicit_connect)
8127 list_add(¶ms->action, &hdev->pend_le_conns);
8129 list_add(¶ms->action, &hdev->pend_le_reports);
8131 case HCI_AUTO_CONN_DIRECT:
8132 case HCI_AUTO_CONN_ALWAYS:
8133 if (!is_connected(hdev, addr, addr_type))
8134 list_add(¶ms->action, &hdev->pend_le_conns);
8138 params->auto_connect = auto_connect;
8140 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
8141 addr, addr_type, auto_connect);
8146 static void device_added(struct sock *sk, struct hci_dev *hdev,
8147 bdaddr_t *bdaddr, u8 type, u8 action)
8149 struct mgmt_ev_device_added ev;
8151 bacpy(&ev.addr.bdaddr, bdaddr);
8152 ev.addr.type = type;
8155 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
8158 static int add_device(struct sock *sk, struct hci_dev *hdev,
8159 void *data, u16 len)
8161 struct mgmt_cp_add_device *cp = data;
8162 u8 auto_conn, addr_type;
8163 struct hci_conn_params *params;
8165 u32 current_flags = 0;
8167 bt_dev_dbg(hdev, "sock %p", sk);
8169 if (!bdaddr_type_is_valid(cp->addr.type) ||
8170 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
8171 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8172 MGMT_STATUS_INVALID_PARAMS,
8173 &cp->addr, sizeof(cp->addr));
8175 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
8176 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8177 MGMT_STATUS_INVALID_PARAMS,
8178 &cp->addr, sizeof(cp->addr));
8182 if (cp->addr.type == BDADDR_BREDR) {
8183 /* Only incoming connections action is supported for now */
8184 if (cp->action != 0x01) {
8185 err = mgmt_cmd_complete(sk, hdev->id,
8187 MGMT_STATUS_INVALID_PARAMS,
8188 &cp->addr, sizeof(cp->addr));
8192 err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
8198 hci_req_update_scan(hdev);
8203 addr_type = le_addr_type(cp->addr.type);
8205 if (cp->action == 0x02)
8206 auto_conn = HCI_AUTO_CONN_ALWAYS;
8207 else if (cp->action == 0x01)
8208 auto_conn = HCI_AUTO_CONN_DIRECT;
8210 auto_conn = HCI_AUTO_CONN_REPORT;
8212 /* Kernel internally uses conn_params with resolvable private
8213 * address, but Add Device allows only identity addresses.
8214 * Make sure it is enforced before calling
8215 * hci_conn_params_lookup.
8217 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8218 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8219 MGMT_STATUS_INVALID_PARAMS,
8220 &cp->addr, sizeof(cp->addr));
8224 /* If the connection parameters don't exist for this device,
8225 * they will be created and configured with defaults.
8227 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
8229 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8230 MGMT_STATUS_FAILED, &cp->addr,
8234 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8237 current_flags = params->current_flags;
8240 hci_update_background_scan(hdev);
8243 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
8244 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
8245 SUPPORTED_DEVICE_FLAGS(), current_flags);
8247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8248 MGMT_STATUS_SUCCESS, &cp->addr,
8252 hci_dev_unlock(hdev);
8256 static void device_removed(struct sock *sk, struct hci_dev *hdev,
8257 bdaddr_t *bdaddr, u8 type)
8259 struct mgmt_ev_device_removed ev;
8261 bacpy(&ev.addr.bdaddr, bdaddr);
8262 ev.addr.type = type;
8264 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
8267 static int remove_device(struct sock *sk, struct hci_dev *hdev,
8268 void *data, u16 len)
8270 struct mgmt_cp_remove_device *cp = data;
8273 bt_dev_dbg(hdev, "sock %p", sk);
8277 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8278 struct hci_conn_params *params;
8281 if (!bdaddr_type_is_valid(cp->addr.type)) {
8282 err = mgmt_cmd_complete(sk, hdev->id,
8283 MGMT_OP_REMOVE_DEVICE,
8284 MGMT_STATUS_INVALID_PARAMS,
8285 &cp->addr, sizeof(cp->addr));
8289 if (cp->addr.type == BDADDR_BREDR) {
8290 err = hci_bdaddr_list_del(&hdev->whitelist,
8294 err = mgmt_cmd_complete(sk, hdev->id,
8295 MGMT_OP_REMOVE_DEVICE,
8296 MGMT_STATUS_INVALID_PARAMS,
8302 hci_req_update_scan(hdev);
8304 device_removed(sk, hdev, &cp->addr.bdaddr,
8309 addr_type = le_addr_type(cp->addr.type);
8311 /* Kernel internally uses conn_params with resolvable private
8312 * address, but Remove Device allows only identity addresses.
8313 * Make sure it is enforced before calling
8314 * hci_conn_params_lookup.
8316 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8317 err = mgmt_cmd_complete(sk, hdev->id,
8318 MGMT_OP_REMOVE_DEVICE,
8319 MGMT_STATUS_INVALID_PARAMS,
8320 &cp->addr, sizeof(cp->addr));
8324 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8327 err = mgmt_cmd_complete(sk, hdev->id,
8328 MGMT_OP_REMOVE_DEVICE,
8329 MGMT_STATUS_INVALID_PARAMS,
8330 &cp->addr, sizeof(cp->addr));
8334 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
8335 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
8336 err = mgmt_cmd_complete(sk, hdev->id,
8337 MGMT_OP_REMOVE_DEVICE,
8338 MGMT_STATUS_INVALID_PARAMS,
8339 &cp->addr, sizeof(cp->addr));
8343 list_del(¶ms->action);
8344 list_del(¶ms->list);
8346 hci_update_background_scan(hdev);
8348 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
8350 struct hci_conn_params *p, *tmp;
8351 struct bdaddr_list *b, *btmp;
8353 if (cp->addr.type) {
8354 err = mgmt_cmd_complete(sk, hdev->id,
8355 MGMT_OP_REMOVE_DEVICE,
8356 MGMT_STATUS_INVALID_PARAMS,
8357 &cp->addr, sizeof(cp->addr));
8361 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
8362 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
8367 hci_req_update_scan(hdev);
8369 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
8370 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
8372 device_removed(sk, hdev, &p->addr, p->addr_type);
8373 if (p->explicit_connect) {
8374 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
8377 list_del(&p->action);
8382 bt_dev_dbg(hdev, "All LE connection parameters were removed");
8384 hci_update_background_scan(hdev);
8388 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
8389 MGMT_STATUS_SUCCESS, &cp->addr,
8392 hci_dev_unlock(hdev);
8396 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
8399 struct mgmt_cp_load_conn_param *cp = data;
8400 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
8401 sizeof(struct mgmt_conn_param));
8402 u16 param_count, expected_len;
8405 if (!lmp_le_capable(hdev))
8406 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8407 MGMT_STATUS_NOT_SUPPORTED);
8409 param_count = __le16_to_cpu(cp->param_count);
8410 if (param_count > max_param_count) {
8411 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
8413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8414 MGMT_STATUS_INVALID_PARAMS);
8417 expected_len = struct_size(cp, params, param_count);
8418 if (expected_len != len) {
8419 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
8421 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8422 MGMT_STATUS_INVALID_PARAMS);
8425 bt_dev_dbg(hdev, "param_count %u", param_count);
8429 hci_conn_params_clear_disabled(hdev);
8431 for (i = 0; i < param_count; i++) {
8432 struct mgmt_conn_param *param = &cp->params[i];
8433 struct hci_conn_params *hci_param;
8434 u16 min, max, latency, timeout;
8437 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
8440 if (param->addr.type == BDADDR_LE_PUBLIC) {
8441 addr_type = ADDR_LE_DEV_PUBLIC;
8442 } else if (param->addr.type == BDADDR_LE_RANDOM) {
8443 addr_type = ADDR_LE_DEV_RANDOM;
8445 bt_dev_err(hdev, "ignoring invalid connection parameters");
8449 min = le16_to_cpu(param->min_interval);
8450 max = le16_to_cpu(param->max_interval);
8451 latency = le16_to_cpu(param->latency);
8452 timeout = le16_to_cpu(param->timeout);
8454 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
8455 min, max, latency, timeout);
8457 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8458 bt_dev_err(hdev, "ignoring invalid connection parameters");
8462 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
8465 bt_dev_err(hdev, "failed to add connection parameters");
8469 hci_param->conn_min_interval = min;
8470 hci_param->conn_max_interval = max;
8471 hci_param->conn_latency = latency;
8472 hci_param->supervision_timeout = timeout;
8475 hci_dev_unlock(hdev);
8477 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8481 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8482 void *data, u16 len)
8484 struct mgmt_cp_set_external_config *cp = data;
8488 bt_dev_dbg(hdev, "sock %p", sk);
8490 if (hdev_is_powered(hdev))
8491 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8492 MGMT_STATUS_REJECTED);
8494 if (cp->config != 0x00 && cp->config != 0x01)
8495 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8496 MGMT_STATUS_INVALID_PARAMS);
8498 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
8499 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8500 MGMT_STATUS_NOT_SUPPORTED);
8505 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8507 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8509 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8516 err = new_options(hdev, sk);
8518 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8519 mgmt_index_removed(hdev);
8521 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8522 hci_dev_set_flag(hdev, HCI_CONFIG);
8523 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8525 queue_work(hdev->req_workqueue, &hdev->power_on);
8527 set_bit(HCI_RAW, &hdev->flags);
8528 mgmt_index_added(hdev);
8533 hci_dev_unlock(hdev);
8537 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8538 void *data, u16 len)
8540 struct mgmt_cp_set_public_address *cp = data;
8544 bt_dev_dbg(hdev, "sock %p", sk);
8546 if (hdev_is_powered(hdev))
8547 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8548 MGMT_STATUS_REJECTED);
8550 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8552 MGMT_STATUS_INVALID_PARAMS);
8554 if (!hdev->set_bdaddr)
8555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8556 MGMT_STATUS_NOT_SUPPORTED);
8560 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8561 bacpy(&hdev->public_addr, &cp->bdaddr);
8563 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8570 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8571 err = new_options(hdev, sk);
8573 if (is_configured(hdev)) {
8574 mgmt_index_removed(hdev);
8576 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8578 hci_dev_set_flag(hdev, HCI_CONFIG);
8579 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8581 queue_work(hdev->req_workqueue, &hdev->power_on);
8585 hci_dev_unlock(hdev);
8590 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
8594 struct mgmt_ev_device_name_update *ev = (void *)buf;
8600 bacpy(&ev->addr.bdaddr, bdaddr);
8601 ev->addr.type = BDADDR_BREDR;
8603 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8606 ev->eir_len = cpu_to_le16(eir_len);
8608 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
8609 sizeof(*ev) + eir_len, NULL);
8612 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8613 u8 link_type, u8 addr_type, u8 status)
8615 struct mgmt_ev_conn_update_failed ev;
8617 bacpy(&ev.addr.bdaddr, bdaddr);
8618 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8621 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
8622 &ev, sizeof(ev), NULL);
8625 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
8626 u8 link_type, u8 addr_type, u16 conn_interval,
8627 u16 conn_latency, u16 supervision_timeout)
8629 struct mgmt_ev_conn_updated ev;
8631 bacpy(&ev.addr.bdaddr, bdaddr);
8632 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8633 ev.conn_interval = cpu_to_le16(conn_interval);
8634 ev.conn_latency = cpu_to_le16(conn_latency);
8635 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
8637 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
8638 &ev, sizeof(ev), NULL);
8642 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
8643 u16 opcode, struct sk_buff *skb)
8645 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8646 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8647 u8 *h192, *r192, *h256, *r256;
8648 struct mgmt_pending_cmd *cmd;
8652 bt_dev_dbg(hdev, "status %u", status);
8654 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
8658 mgmt_cp = cmd->param;
8661 status = mgmt_status(status);
8668 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
8669 struct hci_rp_read_local_oob_data *rp;
8671 if (skb->len != sizeof(*rp)) {
8672 status = MGMT_STATUS_FAILED;
8675 status = MGMT_STATUS_SUCCESS;
8676 rp = (void *)skb->data;
8678 eir_len = 5 + 18 + 18;
8685 struct hci_rp_read_local_oob_ext_data *rp;
8687 if (skb->len != sizeof(*rp)) {
8688 status = MGMT_STATUS_FAILED;
8691 status = MGMT_STATUS_SUCCESS;
8692 rp = (void *)skb->data;
8694 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8695 eir_len = 5 + 18 + 18;
8699 eir_len = 5 + 18 + 18 + 18 + 18;
8709 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8716 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8717 hdev->dev_class, 3);
8720 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8721 EIR_SSP_HASH_C192, h192, 16);
8722 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8723 EIR_SSP_RAND_R192, r192, 16);
8727 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8728 EIR_SSP_HASH_C256, h256, 16);
8729 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8730 EIR_SSP_RAND_R256, r256, 16);
8734 mgmt_rp->type = mgmt_cp->type;
8735 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8737 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8738 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8739 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8740 if (err < 0 || status)
8743 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8745 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8746 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8747 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8750 mgmt_pending_remove(cmd);
8753 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8754 struct mgmt_cp_read_local_oob_ext_data *cp)
8756 struct mgmt_pending_cmd *cmd;
8757 struct hci_request req;
8760 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8765 hci_req_init(&req, hdev);
8767 if (bredr_sc_enabled(hdev))
8768 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
8770 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
8772 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
8774 mgmt_pending_remove(cmd);
8781 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8782 void *data, u16 data_len)
8784 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8785 struct mgmt_rp_read_local_oob_ext_data *rp;
8788 u8 status, flags, role, addr[7], hash[16], rand[16];
8791 bt_dev_dbg(hdev, "sock %p", sk);
8793 if (hdev_is_powered(hdev)) {
8795 case BIT(BDADDR_BREDR):
8796 status = mgmt_bredr_support(hdev);
8802 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8803 status = mgmt_le_support(hdev);
8807 eir_len = 9 + 3 + 18 + 18 + 3;
8810 status = MGMT_STATUS_INVALID_PARAMS;
8815 status = MGMT_STATUS_NOT_POWERED;
8819 rp_len = sizeof(*rp) + eir_len;
8820 rp = kmalloc(rp_len, GFP_ATOMIC);
8831 case BIT(BDADDR_BREDR):
8832 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8833 err = read_local_ssp_oob_req(hdev, sk, cp);
8834 hci_dev_unlock(hdev);
8838 status = MGMT_STATUS_FAILED;
8841 eir_len = eir_append_data(rp->eir, eir_len,
8843 hdev->dev_class, 3);
8846 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8847 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8848 smp_generate_oob(hdev, hash, rand) < 0) {
8849 hci_dev_unlock(hdev);
8850 status = MGMT_STATUS_FAILED;
8854 /* This should return the active RPA, but since the RPA
8855 * is only programmed on demand, it is really hard to fill
8856 * this in at the moment. For now disallow retrieving
8857 * local out-of-band data when privacy is in use.
8859 * Returning the identity address will not help here since
8860 * pairing happens before the identity resolving key is
8861 * known and thus the connection establishment happens
8862 * based on the RPA and not the identity address.
8864 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8865 hci_dev_unlock(hdev);
8866 status = MGMT_STATUS_REJECTED;
8870 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8871 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8872 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8873 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8874 memcpy(addr, &hdev->static_addr, 6);
8877 memcpy(addr, &hdev->bdaddr, 6);
8881 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8882 addr, sizeof(addr));
8884 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8889 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8890 &role, sizeof(role));
8892 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8893 eir_len = eir_append_data(rp->eir, eir_len,
8895 hash, sizeof(hash));
8897 eir_len = eir_append_data(rp->eir, eir_len,
8899 rand, sizeof(rand));
8902 flags = mgmt_get_adv_discov_flags(hdev);
8904 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8905 flags |= LE_AD_NO_BREDR;
8907 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8908 &flags, sizeof(flags));
8912 hci_dev_unlock(hdev);
8914 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8916 status = MGMT_STATUS_SUCCESS;
8919 rp->type = cp->type;
8920 rp->eir_len = cpu_to_le16(eir_len);
8922 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8923 status, rp, sizeof(*rp) + eir_len);
8924 if (err < 0 || status)
8927 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8928 rp, sizeof(*rp) + eir_len,
8929 HCI_MGMT_OOB_DATA_EVENTS, sk);
8937 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8941 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8942 flags |= MGMT_ADV_FLAG_DISCOV;
8943 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8944 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8945 flags |= MGMT_ADV_FLAG_APPEARANCE;
8946 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8948 /* In extended adv TX_POWER returned from Set Adv Param
8949 * will be always valid.
8951 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
8952 ext_adv_capable(hdev))
8953 flags |= MGMT_ADV_FLAG_TX_POWER;
8955 if (ext_adv_capable(hdev)) {
8956 flags |= MGMT_ADV_FLAG_SEC_1M;
8957 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8958 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8960 if (hdev->le_features[1] & HCI_LE_PHY_2M)
8961 flags |= MGMT_ADV_FLAG_SEC_2M;
8963 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8964 flags |= MGMT_ADV_FLAG_SEC_CODED;
8970 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8971 void *data, u16 data_len)
8973 struct mgmt_rp_read_adv_features *rp;
8976 struct adv_info *adv_instance;
8977 u32 supported_flags;
8980 bt_dev_dbg(hdev, "sock %p", sk);
8982 if (!lmp_le_capable(hdev))
8983 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8984 MGMT_STATUS_REJECTED);
8986 /* Enabling the experimental LL Privay support disables support for
8989 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
8991 MGMT_STATUS_NOT_SUPPORTED);
8995 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8996 rp = kmalloc(rp_len, GFP_ATOMIC);
8998 hci_dev_unlock(hdev);
9002 supported_flags = get_supported_adv_flags(hdev);
9004 rp->supported_flags = cpu_to_le32(supported_flags);
9005 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
9006 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
9007 rp->max_instances = hdev->le_num_of_adv_sets;
9008 rp->num_instances = hdev->adv_instance_cnt;
9010 instance = rp->instance;
9011 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
9012 *instance = adv_instance->instance;
9016 hci_dev_unlock(hdev);
9018 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9019 MGMT_STATUS_SUCCESS, rp, rp_len);
9026 static u8 calculate_name_len(struct hci_dev *hdev)
9028 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
9030 return append_local_name(hdev, buf, 0);
9033 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
9036 u8 max_len = HCI_MAX_AD_LENGTH;
9039 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
9040 MGMT_ADV_FLAG_LIMITED_DISCOV |
9041 MGMT_ADV_FLAG_MANAGED_FLAGS))
9044 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
9047 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
9048 max_len -= calculate_name_len(hdev);
9050 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
9057 static bool flags_managed(u32 adv_flags)
9059 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
9060 MGMT_ADV_FLAG_LIMITED_DISCOV |
9061 MGMT_ADV_FLAG_MANAGED_FLAGS);
9064 static bool tx_power_managed(u32 adv_flags)
9066 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
9069 static bool name_managed(u32 adv_flags)
9071 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
9074 static bool appearance_managed(u32 adv_flags)
9076 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
9079 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
9080 u8 len, bool is_adv_data)
9085 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
9090 /* Make sure that the data is correctly formatted. */
9091 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
9097 if (data[i + 1] == EIR_FLAGS &&
9098 (!is_adv_data || flags_managed(adv_flags)))
9101 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
9104 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
9107 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
9110 if (data[i + 1] == EIR_APPEARANCE &&
9111 appearance_managed(adv_flags))
9114 /* If the current field length would exceed the total data
9115 * length, then it's invalid.
9117 if (i + cur_len >= len)
9124 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
9127 struct mgmt_pending_cmd *cmd;
9128 struct mgmt_cp_add_advertising *cp;
9129 struct mgmt_rp_add_advertising rp;
9130 struct adv_info *adv_instance, *n;
9133 bt_dev_dbg(hdev, "status %d", status);
9137 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
9139 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
9140 if (!adv_instance->pending)
9144 adv_instance->pending = false;
9148 instance = adv_instance->instance;
9150 if (hdev->cur_adv_instance == instance)
9151 cancel_adv_timeout(hdev);
9153 hci_remove_adv_instance(hdev, instance);
9154 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
9161 rp.instance = cp->instance;
9164 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9165 mgmt_status(status));
9167 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9168 mgmt_status(status), &rp, sizeof(rp));
9170 mgmt_pending_remove(cmd);
9173 hci_dev_unlock(hdev);
9176 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
9177 void *data, u16 data_len)
9179 struct mgmt_cp_add_advertising *cp = data;
9180 struct mgmt_rp_add_advertising rp;
9182 u32 supported_flags, phy_flags;
9184 u16 timeout, duration;
9185 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
9186 u8 schedule_instance = 0;
9187 struct adv_info *next_instance;
9189 struct mgmt_pending_cmd *cmd;
9190 struct hci_request req;
9192 bt_dev_dbg(hdev, "sock %p", sk);
9194 status = mgmt_le_support(hdev);
9196 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9199 /* Enabling the experimental LL Privay support disables support for
9202 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9203 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9204 MGMT_STATUS_NOT_SUPPORTED);
9206 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9207 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9208 MGMT_STATUS_INVALID_PARAMS);
9210 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
9211 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9212 MGMT_STATUS_INVALID_PARAMS);
9214 flags = __le32_to_cpu(cp->flags);
9215 timeout = __le16_to_cpu(cp->timeout);
9216 duration = __le16_to_cpu(cp->duration);
9218 /* The current implementation only supports a subset of the specified
9219 * flags. Also need to check mutual exclusiveness of sec flags.
9221 supported_flags = get_supported_adv_flags(hdev);
9222 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
9223 if (flags & ~supported_flags ||
9224 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
9225 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9226 MGMT_STATUS_INVALID_PARAMS);
9230 if (timeout && !hdev_is_powered(hdev)) {
9231 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9232 MGMT_STATUS_REJECTED);
9236 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
9237 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
9238 pending_find(MGMT_OP_SET_LE, hdev)) {
9239 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9244 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
9245 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
9246 cp->scan_rsp_len, false)) {
9247 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9248 MGMT_STATUS_INVALID_PARAMS);
9252 err = hci_add_adv_instance(hdev, cp->instance, flags,
9253 cp->adv_data_len, cp->data,
9255 cp->data + cp->adv_data_len,
9258 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9259 MGMT_STATUS_FAILED);
9263 /* Only trigger an advertising added event if a new instance was
9266 if (hdev->adv_instance_cnt > prev_instance_cnt)
9267 mgmt_advertising_added(sk, hdev, cp->instance);
9269 if (hdev->cur_adv_instance == cp->instance) {
9270 /* If the currently advertised instance is being changed then
9271 * cancel the current advertising and schedule the next
9272 * instance. If there is only one instance then the overridden
9273 * advertising data will be visible right away.
9275 cancel_adv_timeout(hdev);
9277 next_instance = hci_get_next_instance(hdev, cp->instance);
9279 schedule_instance = next_instance->instance;
9280 } else if (!hdev->adv_instance_timeout) {
9281 /* Immediately advertise the new instance if no other
9282 * instance is currently being advertised.
9284 schedule_instance = cp->instance;
9287 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
9288 * there is no instance to be advertised then we have no HCI
9289 * communication to make. Simply return.
9291 if (!hdev_is_powered(hdev) ||
9292 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
9293 !schedule_instance) {
9294 rp.instance = cp->instance;
9295 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9296 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9300 /* We're good to go, update advertising data, parameters, and start
9303 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
9310 hci_req_init(&req, hdev);
9312 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
9315 err = hci_req_run(&req, add_advertising_complete);
9318 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9319 MGMT_STATUS_FAILED);
9320 mgmt_pending_remove(cmd);
9324 hci_dev_unlock(hdev);
9329 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
9332 struct mgmt_pending_cmd *cmd;
9333 struct mgmt_cp_remove_advertising *cp;
9334 struct mgmt_rp_remove_advertising rp;
9336 bt_dev_dbg(hdev, "status %d", status);
9340 /* A failure status here only means that we failed to disable
9341 * advertising. Otherwise, the advertising instance has been removed,
9342 * so report success.
9344 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
9349 rp.instance = cp->instance;
9351 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
9353 mgmt_pending_remove(cmd);
9356 hci_dev_unlock(hdev);
9359 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9360 void *data, u16 data_len)
9362 struct mgmt_cp_remove_advertising *cp = data;
9363 struct mgmt_rp_remove_advertising rp;
9364 struct mgmt_pending_cmd *cmd;
9365 struct hci_request req;
9368 bt_dev_dbg(hdev, "sock %p", sk);
9370 /* Enabling the experimental LL Privay support disables support for
9373 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9374 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
9375 MGMT_STATUS_NOT_SUPPORTED);
9379 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9380 err = mgmt_cmd_status(sk, hdev->id,
9381 MGMT_OP_REMOVE_ADVERTISING,
9382 MGMT_STATUS_INVALID_PARAMS);
9386 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
9387 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
9388 pending_find(MGMT_OP_SET_LE, hdev)) {
9389 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9394 if (list_empty(&hdev->adv_instances)) {
9395 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9396 MGMT_STATUS_INVALID_PARAMS);
9400 hci_req_init(&req, hdev);
9402 /* If we use extended advertising, instance is disabled and removed */
9403 if (ext_adv_capable(hdev)) {
9404 __hci_req_disable_ext_adv_instance(&req, cp->instance);
9405 __hci_req_remove_ext_adv_instance(&req, cp->instance);
9408 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
9410 if (list_empty(&hdev->adv_instances))
9411 __hci_req_disable_advertising(&req);
9413 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
9414 * flag is set or the device isn't powered then we have no HCI
9415 * communication to make. Simply return.
9417 if (skb_queue_empty(&req.cmd_q) ||
9418 !hdev_is_powered(hdev) ||
9419 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
9420 hci_req_purge(&req);
9421 rp.instance = cp->instance;
9422 err = mgmt_cmd_complete(sk, hdev->id,
9423 MGMT_OP_REMOVE_ADVERTISING,
9424 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9428 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9435 err = hci_req_run(&req, remove_advertising_complete);
9437 mgmt_pending_remove(cmd);
9440 hci_dev_unlock(hdev);
9445 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9446 void *data, u16 data_len)
9448 struct mgmt_cp_get_adv_size_info *cp = data;
9449 struct mgmt_rp_get_adv_size_info rp;
9450 u32 flags, supported_flags;
9453 bt_dev_dbg(hdev, "sock %p", sk);
9455 if (!lmp_le_capable(hdev))
9456 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9457 MGMT_STATUS_REJECTED);
9459 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9460 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9461 MGMT_STATUS_INVALID_PARAMS);
9463 flags = __le32_to_cpu(cp->flags);
9465 /* The current implementation only supports a subset of the specified
9468 supported_flags = get_supported_adv_flags(hdev);
9469 if (flags & ~supported_flags)
9470 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9471 MGMT_STATUS_INVALID_PARAMS);
9473 rp.instance = cp->instance;
9474 rp.flags = cp->flags;
9475 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9476 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9478 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9479 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9484 static const struct hci_mgmt_handler mgmt_handlers[] = {
9485 { NULL }, /* 0x0000 (no command) */
9486 { read_version, MGMT_READ_VERSION_SIZE,
9488 HCI_MGMT_UNTRUSTED },
9489 { read_commands, MGMT_READ_COMMANDS_SIZE,
9491 HCI_MGMT_UNTRUSTED },
9492 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9494 HCI_MGMT_UNTRUSTED },
9495 { read_controller_info, MGMT_READ_INFO_SIZE,
9496 HCI_MGMT_UNTRUSTED },
9497 { set_powered, MGMT_SETTING_SIZE },
9498 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9499 { set_connectable, MGMT_SETTING_SIZE },
9500 { set_fast_connectable, MGMT_SETTING_SIZE },
9501 { set_bondable, MGMT_SETTING_SIZE },
9502 { set_link_security, MGMT_SETTING_SIZE },
9503 { set_ssp, MGMT_SETTING_SIZE },
9504 { set_hs, MGMT_SETTING_SIZE },
9505 { set_le, MGMT_SETTING_SIZE },
9506 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9507 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9508 { add_uuid, MGMT_ADD_UUID_SIZE },
9509 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9510 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9512 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9514 { disconnect, MGMT_DISCONNECT_SIZE },
9515 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9516 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9517 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9518 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9519 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9520 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9521 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9522 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9523 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9524 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9525 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9526 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9527 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9529 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9530 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9531 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9532 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9533 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9534 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9535 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9536 { set_advertising, MGMT_SETTING_SIZE },
9537 { set_bredr, MGMT_SETTING_SIZE },
9538 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9539 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9540 { set_secure_conn, MGMT_SETTING_SIZE },
9541 { set_debug_keys, MGMT_SETTING_SIZE },
9542 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9543 { load_irks, MGMT_LOAD_IRKS_SIZE,
9545 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9546 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9547 { add_device, MGMT_ADD_DEVICE_SIZE },
9548 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9549 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9551 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9553 HCI_MGMT_UNTRUSTED },
9554 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9555 HCI_MGMT_UNCONFIGURED |
9556 HCI_MGMT_UNTRUSTED },
9557 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9558 HCI_MGMT_UNCONFIGURED },
9559 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9560 HCI_MGMT_UNCONFIGURED },
9561 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9563 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9564 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9566 HCI_MGMT_UNTRUSTED },
9567 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9568 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9570 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9571 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9572 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9573 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9574 HCI_MGMT_UNTRUSTED },
9575 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9576 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9577 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9578 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9580 { set_wideband_speech, MGMT_SETTING_SIZE },
9581 { read_security_info, MGMT_READ_SECURITY_INFO_SIZE,
9582 HCI_MGMT_UNTRUSTED },
9583 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9584 HCI_MGMT_UNTRUSTED |
9585 HCI_MGMT_HDEV_OPTIONAL },
9586 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9588 HCI_MGMT_HDEV_OPTIONAL },
9589 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9590 HCI_MGMT_UNTRUSTED },
9591 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9593 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9594 HCI_MGMT_UNTRUSTED },
9595 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9597 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9598 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9599 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9600 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9602 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9606 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
9607 { NULL }, /* 0x0000 (no command) */
9608 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
9609 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
9611 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
9613 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
9614 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
9615 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
9616 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
9617 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
9618 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
9619 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
9620 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
9621 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
9622 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
9623 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
9624 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
9628 void mgmt_index_added(struct hci_dev *hdev)
9630 struct mgmt_ev_ext_index ev;
9632 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9635 switch (hdev->dev_type) {
9637 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9638 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9639 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9642 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9643 HCI_MGMT_INDEX_EVENTS);
9656 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9657 HCI_MGMT_EXT_INDEX_EVENTS);
9660 void mgmt_index_removed(struct hci_dev *hdev)
9662 struct mgmt_ev_ext_index ev;
9663 u8 status = MGMT_STATUS_INVALID_INDEX;
9665 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9668 switch (hdev->dev_type) {
9670 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9672 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9673 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9674 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9677 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9678 HCI_MGMT_INDEX_EVENTS);
9691 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9692 HCI_MGMT_EXT_INDEX_EVENTS);
9695 /* This function requires the caller holds hdev->lock */
9696 static void restart_le_actions(struct hci_dev *hdev)
9698 struct hci_conn_params *p;
9700 list_for_each_entry(p, &hdev->le_conn_params, list) {
9701 /* Needed for AUTO_OFF case where might not "really"
9702 * have been powered off.
9704 list_del_init(&p->action);
9706 switch (p->auto_connect) {
9707 case HCI_AUTO_CONN_DIRECT:
9708 case HCI_AUTO_CONN_ALWAYS:
9709 list_add(&p->action, &hdev->pend_le_conns);
9711 case HCI_AUTO_CONN_REPORT:
9712 list_add(&p->action, &hdev->pend_le_reports);
9720 void mgmt_power_on(struct hci_dev *hdev, int err)
9722 struct cmd_lookup match = { NULL, hdev };
9724 bt_dev_dbg(hdev, "err %d", err);
9729 restart_le_actions(hdev);
9730 hci_update_background_scan(hdev);
9733 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9735 new_settings(hdev, match.sk);
9740 hci_dev_unlock(hdev);
9743 void __mgmt_power_off(struct hci_dev *hdev)
9745 struct cmd_lookup match = { NULL, hdev };
9746 u8 status, zero_cod[] = { 0, 0, 0 };
9748 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9750 /* If the power off is because of hdev unregistration let
9751 * use the appropriate INVALID_INDEX status. Otherwise use
9752 * NOT_POWERED. We cover both scenarios here since later in
9753 * mgmt_index_removed() any hci_conn callbacks will have already
9754 * been triggered, potentially causing misleading DISCONNECTED
9757 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9758 status = MGMT_STATUS_INVALID_INDEX;
9760 status = MGMT_STATUS_NOT_POWERED;
9762 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9764 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9765 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9766 zero_cod, sizeof(zero_cod),
9767 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9768 ext_info_changed(hdev, NULL);
9771 new_settings(hdev, match.sk);
9777 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9779 struct mgmt_pending_cmd *cmd;
9782 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9786 if (err == -ERFKILL)
9787 status = MGMT_STATUS_RFKILLED;
9789 status = MGMT_STATUS_FAILED;
9791 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9793 mgmt_pending_remove(cmd);
9796 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9799 struct mgmt_ev_new_link_key ev;
9801 memset(&ev, 0, sizeof(ev));
9803 ev.store_hint = persistent;
9804 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9805 ev.key.addr.type = BDADDR_BREDR;
9806 ev.key.type = key->type;
9807 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9808 ev.key.pin_len = key->pin_len;
9810 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9813 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9815 switch (ltk->type) {
9818 if (ltk->authenticated)
9819 return MGMT_LTK_AUTHENTICATED;
9820 return MGMT_LTK_UNAUTHENTICATED;
9822 if (ltk->authenticated)
9823 return MGMT_LTK_P256_AUTH;
9824 return MGMT_LTK_P256_UNAUTH;
9825 case SMP_LTK_P256_DEBUG:
9826 return MGMT_LTK_P256_DEBUG;
9829 return MGMT_LTK_UNAUTHENTICATED;
9832 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9834 struct mgmt_ev_new_long_term_key ev;
9836 memset(&ev, 0, sizeof(ev));
9838 /* Devices using resolvable or non-resolvable random addresses
9839 * without providing an identity resolving key don't require
9840 * to store long term keys. Their addresses will change the
9843 * Only when a remote device provides an identity address
9844 * make sure the long term key is stored. If the remote
9845 * identity is known, the long term keys are internally
9846 * mapped to the identity address. So allow static random
9847 * and public addresses here.
9849 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9850 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9851 ev.store_hint = 0x00;
9853 ev.store_hint = persistent;
9855 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9856 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9857 ev.key.type = mgmt_ltk_type(key);
9858 ev.key.enc_size = key->enc_size;
9859 ev.key.ediv = key->ediv;
9860 ev.key.rand = key->rand;
9862 if (key->type == SMP_LTK)
9865 /* Make sure we copy only the significant bytes based on the
9866 * encryption key size, and set the rest of the value to zeroes.
9868 memcpy(ev.key.val, key->val, key->enc_size);
9869 memset(ev.key.val + key->enc_size, 0,
9870 sizeof(ev.key.val) - key->enc_size);
9872 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9875 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9877 struct mgmt_ev_new_irk ev;
9879 memset(&ev, 0, sizeof(ev));
9881 ev.store_hint = persistent;
9883 bacpy(&ev.rpa, &irk->rpa);
9884 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9885 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9886 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9888 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9891 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9894 struct mgmt_ev_new_csrk ev;
9896 memset(&ev, 0, sizeof(ev));
9898 /* Devices using resolvable or non-resolvable random addresses
9899 * without providing an identity resolving key don't require
9900 * to store signature resolving keys. Their addresses will change
9901 * the next time around.
9903 * Only when a remote device provides an identity address
9904 * make sure the signature resolving key is stored. So allow
9905 * static random and public addresses here.
9907 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9908 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9909 ev.store_hint = 0x00;
9911 ev.store_hint = persistent;
9913 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9914 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9915 ev.key.type = csrk->type;
9916 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9918 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9921 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9922 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9923 u16 max_interval, u16 latency, u16 timeout)
9925 struct mgmt_ev_new_conn_param ev;
9927 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9930 memset(&ev, 0, sizeof(ev));
9931 bacpy(&ev.addr.bdaddr, bdaddr);
9932 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9933 ev.store_hint = store_hint;
9934 ev.min_interval = cpu_to_le16(min_interval);
9935 ev.max_interval = cpu_to_le16(max_interval);
9936 ev.latency = cpu_to_le16(latency);
9937 ev.timeout = cpu_to_le16(timeout);
9939 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9942 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9943 u32 flags, u8 *name, u8 name_len)
9946 struct mgmt_ev_device_connected *ev = (void *) buf;
9949 bacpy(&ev->addr.bdaddr, &conn->dst);
9950 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9952 ev->flags = __cpu_to_le32(flags);
9954 /* We must ensure that the EIR Data fields are ordered and
9955 * unique. Keep it simple for now and avoid the problem by not
9956 * adding any BR/EDR data to the LE adv.
9958 if (conn->le_adv_data_len > 0) {
9959 memcpy(&ev->eir[eir_len],
9960 conn->le_adv_data, conn->le_adv_data_len);
9961 eir_len = conn->le_adv_data_len;
9964 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9967 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
9968 eir_len = eir_append_data(ev->eir, eir_len,
9970 conn->dev_class, 3);
9973 ev->eir_len = cpu_to_le16(eir_len);
9975 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
9976 sizeof(*ev) + eir_len, NULL);
9979 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9981 struct sock **sk = data;
9983 cmd->cmd_complete(cmd, 0);
9988 mgmt_pending_remove(cmd);
9991 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9993 struct hci_dev *hdev = data;
9994 struct mgmt_cp_unpair_device *cp = cmd->param;
9996 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9998 cmd->cmd_complete(cmd, 0);
9999 mgmt_pending_remove(cmd);
10002 bool mgmt_powering_down(struct hci_dev *hdev)
10004 struct mgmt_pending_cmd *cmd;
10005 struct mgmt_mode *cp;
10007 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10018 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
10019 u8 link_type, u8 addr_type, u8 reason,
10020 bool mgmt_connected)
10022 struct mgmt_ev_device_disconnected ev;
10023 struct sock *sk = NULL;
10025 /* The connection is still in hci_conn_hash so test for 1
10026 * instead of 0 to know if this is the last one.
10028 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10029 cancel_delayed_work(&hdev->power_off);
10030 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10033 if (!mgmt_connected)
10036 if (link_type != ACL_LINK && link_type != LE_LINK)
10039 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
10041 bacpy(&ev.addr.bdaddr, bdaddr);
10042 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10043 ev.reason = reason;
10045 /* Report disconnects due to suspend */
10046 if (hdev->suspended)
10047 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
10049 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
10054 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10058 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
10059 u8 link_type, u8 addr_type, u8 status)
10061 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
10062 struct mgmt_cp_disconnect *cp;
10063 struct mgmt_pending_cmd *cmd;
10065 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10068 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
10074 if (bacmp(bdaddr, &cp->addr.bdaddr))
10077 if (cp->addr.type != bdaddr_type)
10080 cmd->cmd_complete(cmd, mgmt_status(status));
10081 mgmt_pending_remove(cmd);
10084 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10085 u8 addr_type, u8 status)
10087 struct mgmt_ev_connect_failed ev;
10089 /* The connection is still in hci_conn_hash so test for 1
10090 * instead of 0 to know if this is the last one.
10092 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10093 cancel_delayed_work(&hdev->power_off);
10094 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10097 bacpy(&ev.addr.bdaddr, bdaddr);
10098 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10099 ev.status = mgmt_status(status);
10101 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
10104 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
10106 struct mgmt_ev_pin_code_request ev;
10108 bacpy(&ev.addr.bdaddr, bdaddr);
10109 ev.addr.type = BDADDR_BREDR;
10110 ev.secure = secure;
10112 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
10115 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10118 struct mgmt_pending_cmd *cmd;
10120 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
10124 cmd->cmd_complete(cmd, mgmt_status(status));
10125 mgmt_pending_remove(cmd);
10128 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10131 struct mgmt_pending_cmd *cmd;
10133 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
10137 cmd->cmd_complete(cmd, mgmt_status(status));
10138 mgmt_pending_remove(cmd);
10141 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10142 u8 link_type, u8 addr_type, u32 value,
10145 struct mgmt_ev_user_confirm_request ev;
10147 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10149 bacpy(&ev.addr.bdaddr, bdaddr);
10150 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10151 ev.confirm_hint = confirm_hint;
10152 ev.value = cpu_to_le32(value);
10154 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
10158 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10159 u8 link_type, u8 addr_type)
10161 struct mgmt_ev_user_passkey_request ev;
10163 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10165 bacpy(&ev.addr.bdaddr, bdaddr);
10166 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10168 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
10172 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10173 u8 link_type, u8 addr_type, u8 status,
10176 struct mgmt_pending_cmd *cmd;
10178 cmd = pending_find(opcode, hdev);
10182 cmd->cmd_complete(cmd, mgmt_status(status));
10183 mgmt_pending_remove(cmd);
10188 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10189 u8 link_type, u8 addr_type, u8 status)
10191 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10192 status, MGMT_OP_USER_CONFIRM_REPLY);
10195 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10196 u8 link_type, u8 addr_type, u8 status)
10198 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10200 MGMT_OP_USER_CONFIRM_NEG_REPLY);
10203 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10204 u8 link_type, u8 addr_type, u8 status)
10206 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10207 status, MGMT_OP_USER_PASSKEY_REPLY);
10210 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10211 u8 link_type, u8 addr_type, u8 status)
10213 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10215 MGMT_OP_USER_PASSKEY_NEG_REPLY);
10218 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
10219 u8 link_type, u8 addr_type, u32 passkey,
10222 struct mgmt_ev_passkey_notify ev;
10224 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10226 bacpy(&ev.addr.bdaddr, bdaddr);
10227 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10228 ev.passkey = __cpu_to_le32(passkey);
10229 ev.entered = entered;
10231 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10234 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10236 struct mgmt_ev_auth_failed ev;
10237 struct mgmt_pending_cmd *cmd;
10238 u8 status = mgmt_status(hci_status);
10240 bacpy(&ev.addr.bdaddr, &conn->dst);
10241 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10242 ev.status = status;
10244 cmd = find_pairing(conn);
10246 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10247 cmd ? cmd->sk : NULL);
10250 cmd->cmd_complete(cmd, status);
10251 mgmt_pending_remove(cmd);
10255 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10257 struct cmd_lookup match = { NULL, hdev };
10261 u8 mgmt_err = mgmt_status(status);
10262 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10263 cmd_status_rsp, &mgmt_err);
10267 if (test_bit(HCI_AUTH, &hdev->flags))
10268 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10270 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10272 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10276 new_settings(hdev, match.sk);
10279 sock_put(match.sk);
10282 static void clear_eir(struct hci_request *req)
10284 struct hci_dev *hdev = req->hdev;
10285 struct hci_cp_write_eir cp;
10287 if (!lmp_ext_inq_capable(hdev))
10290 memset(hdev->eir, 0, sizeof(hdev->eir));
10292 memset(&cp, 0, sizeof(cp));
10294 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
10297 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
10299 struct cmd_lookup match = { NULL, hdev };
10300 struct hci_request req;
10301 bool changed = false;
10304 u8 mgmt_err = mgmt_status(status);
10306 if (enable && hci_dev_test_and_clear_flag(hdev,
10307 HCI_SSP_ENABLED)) {
10308 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
10309 new_settings(hdev, NULL);
10312 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
10318 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
10320 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
10322 changed = hci_dev_test_and_clear_flag(hdev,
10325 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
10328 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
10331 new_settings(hdev, match.sk);
10334 sock_put(match.sk);
10336 hci_req_init(&req, hdev);
10338 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
10339 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
10340 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
10341 sizeof(enable), &enable);
10342 __hci_req_update_eir(&req);
10347 hci_req_run(&req, NULL);
10350 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10352 struct cmd_lookup *match = data;
10354 if (match->sk == NULL) {
10355 match->sk = cmd->sk;
10356 sock_hold(match->sk);
10360 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10363 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10365 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10366 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10367 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10370 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10371 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10372 ext_info_changed(hdev, NULL);
10376 sock_put(match.sk);
10379 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10381 struct mgmt_cp_set_local_name ev;
10382 struct mgmt_pending_cmd *cmd;
10387 memset(&ev, 0, sizeof(ev));
10388 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10389 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10391 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10393 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10395 /* If this is a HCI command related to powering on the
10396 * HCI dev don't send any mgmt signals.
10398 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10402 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10403 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10404 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10407 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10411 for (i = 0; i < uuid_count; i++) {
10412 if (!memcmp(uuid, uuids[i], 16))
10419 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10423 while (parsed < eir_len) {
10424 u8 field_len = eir[0];
10428 if (field_len == 0)
10431 if (eir_len - parsed < field_len + 1)
10435 case EIR_UUID16_ALL:
10436 case EIR_UUID16_SOME:
10437 for (i = 0; i + 3 <= field_len; i += 2) {
10438 memcpy(uuid, bluetooth_base_uuid, 16);
10439 uuid[13] = eir[i + 3];
10440 uuid[12] = eir[i + 2];
10441 if (has_uuid(uuid, uuid_count, uuids))
10445 case EIR_UUID32_ALL:
10446 case EIR_UUID32_SOME:
10447 for (i = 0; i + 5 <= field_len; i += 4) {
10448 memcpy(uuid, bluetooth_base_uuid, 16);
10449 uuid[15] = eir[i + 5];
10450 uuid[14] = eir[i + 4];
10451 uuid[13] = eir[i + 3];
10452 uuid[12] = eir[i + 2];
10453 if (has_uuid(uuid, uuid_count, uuids))
10457 case EIR_UUID128_ALL:
10458 case EIR_UUID128_SOME:
10459 for (i = 0; i + 17 <= field_len; i += 16) {
10460 memcpy(uuid, eir + i + 2, 16);
10461 if (has_uuid(uuid, uuid_count, uuids))
10467 parsed += field_len + 1;
10468 eir += field_len + 1;
10474 static void restart_le_scan(struct hci_dev *hdev)
10476 /* If controller is not scanning we are done. */
10477 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10480 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10481 hdev->discovery.scan_start +
10482 hdev->discovery.scan_duration))
10485 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10486 DISCOV_LE_RESTART_DELAY);
10489 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10490 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10492 /* If a RSSI threshold has been specified, and
10493 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10494 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10495 * is set, let it through for further processing, as we might need to
10496 * restart the scan.
10498 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10499 * the results are also dropped.
10501 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10502 (rssi == HCI_RSSI_INVALID ||
10503 (rssi < hdev->discovery.rssi &&
10504 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10507 if (hdev->discovery.uuid_count != 0) {
10508 /* If a list of UUIDs is provided in filter, results with no
10509 * matching UUID should be dropped.
10511 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10512 hdev->discovery.uuids) &&
10513 !eir_has_uuids(scan_rsp, scan_rsp_len,
10514 hdev->discovery.uuid_count,
10515 hdev->discovery.uuids))
10519 /* If duplicate filtering does not report RSSI changes, then restart
10520 * scanning to ensure updated result with updated RSSI values.
10522 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10523 restart_le_scan(hdev);
10525 /* Validate RSSI value against the RSSI threshold once more. */
10526 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10527 rssi < hdev->discovery.rssi)
10534 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10535 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10536 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10539 struct mgmt_ev_device_found *ev = (void *)buf;
10542 /* Don't send events for a non-kernel initiated discovery. With
10543 * LE one exception is if we have pend_le_reports > 0 in which
10544 * case we're doing passive scanning and want these events.
10546 if (!hci_discovery_active(hdev)) {
10547 if (link_type == ACL_LINK)
10549 if (link_type == LE_LINK &&
10550 list_empty(&hdev->pend_le_reports) &&
10551 !hci_is_adv_monitoring(hdev)) {
10556 if (hdev->discovery.result_filtering) {
10557 /* We are using service discovery */
10558 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10563 if (hdev->discovery.limited) {
10564 /* Check for limited discoverable bit */
10566 if (!(dev_class[1] & 0x20))
10569 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10570 if (!flags || !(flags[0] & LE_AD_LIMITED))
10575 /* Make sure that the buffer is big enough. The 5 extra bytes
10576 * are for the potential CoD field.
10578 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
10581 memset(buf, 0, sizeof(buf));
10583 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10584 * RSSI value was reported as 0 when not available. This behavior
10585 * is kept when using device discovery. This is required for full
10586 * backwards compatibility with the API.
10588 * However when using service discovery, the value 127 will be
10589 * returned when the RSSI is not available.
10591 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10592 link_type == ACL_LINK)
10595 bacpy(&ev->addr.bdaddr, bdaddr);
10596 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10598 ev->flags = cpu_to_le32(flags);
10601 /* Copy EIR or advertising data into event */
10602 memcpy(ev->eir, eir, eir_len);
10604 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
10606 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
10609 if (scan_rsp_len > 0)
10610 /* Append scan response data to event */
10611 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
10613 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10614 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
10616 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
10619 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10620 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10622 struct mgmt_ev_device_found *ev;
10623 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
10626 ev = (struct mgmt_ev_device_found *) buf;
10628 memset(buf, 0, sizeof(buf));
10630 bacpy(&ev->addr.bdaddr, bdaddr);
10631 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10634 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
10637 ev->eir_len = cpu_to_le16(eir_len);
10639 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
10642 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10644 struct mgmt_ev_discovering ev;
10646 bt_dev_dbg(hdev, "discovering %u", discovering);
10648 memset(&ev, 0, sizeof(ev));
10649 ev.type = hdev->discovery.type;
10650 ev.discovering = discovering;
10652 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10655 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10657 struct mgmt_ev_controller_suspend ev;
10659 ev.suspend_state = state;
10660 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10663 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10666 struct mgmt_ev_controller_resume ev;
10668 ev.wake_reason = reason;
10670 bacpy(&ev.addr.bdaddr, bdaddr);
10671 ev.addr.type = addr_type;
10673 memset(&ev.addr, 0, sizeof(ev.addr));
10676 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10679 static struct hci_mgmt_chan chan = {
10680 .channel = HCI_CHANNEL_CONTROL,
10681 .handler_count = ARRAY_SIZE(mgmt_handlers),
10682 .handlers = mgmt_handlers,
10684 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
10685 .tizen_handlers = tizen_mgmt_handlers,
10687 .hdev_init = mgmt_init_hdev,
10690 int mgmt_init(void)
10692 return hci_mgmt_chan_register(&chan);
10695 void mgmt_exit(void)
10697 hci_mgmt_chan_unregister(&chan);