2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
39 #include "hci_request.h"
41 #include "mgmt_util.h"
42 #include "mgmt_config.h"
45 #define MGMT_VERSION 1
46 #define MGMT_REVISION 18
48 static const u16 mgmt_commands[] = {
49 MGMT_OP_READ_INDEX_LIST,
52 MGMT_OP_SET_DISCOVERABLE,
53 MGMT_OP_SET_CONNECTABLE,
54 MGMT_OP_SET_FAST_CONNECTABLE,
56 MGMT_OP_SET_LINK_SECURITY,
60 MGMT_OP_SET_DEV_CLASS,
61 MGMT_OP_SET_LOCAL_NAME,
64 MGMT_OP_LOAD_LINK_KEYS,
65 MGMT_OP_LOAD_LONG_TERM_KEYS,
67 MGMT_OP_GET_CONNECTIONS,
68 MGMT_OP_PIN_CODE_REPLY,
69 MGMT_OP_PIN_CODE_NEG_REPLY,
70 MGMT_OP_SET_IO_CAPABILITY,
72 MGMT_OP_CANCEL_PAIR_DEVICE,
73 MGMT_OP_UNPAIR_DEVICE,
74 MGMT_OP_USER_CONFIRM_REPLY,
75 MGMT_OP_USER_CONFIRM_NEG_REPLY,
76 MGMT_OP_USER_PASSKEY_REPLY,
77 MGMT_OP_USER_PASSKEY_NEG_REPLY,
78 MGMT_OP_READ_LOCAL_OOB_DATA,
79 MGMT_OP_ADD_REMOTE_OOB_DATA,
80 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
81 MGMT_OP_START_DISCOVERY,
82 MGMT_OP_STOP_DISCOVERY,
85 MGMT_OP_UNBLOCK_DEVICE,
86 MGMT_OP_SET_DEVICE_ID,
87 MGMT_OP_SET_ADVERTISING,
89 MGMT_OP_SET_STATIC_ADDRESS,
90 MGMT_OP_SET_SCAN_PARAMS,
91 MGMT_OP_SET_SECURE_CONN,
92 MGMT_OP_SET_DEBUG_KEYS,
95 MGMT_OP_GET_CONN_INFO,
96 MGMT_OP_GET_CLOCK_INFO,
98 MGMT_OP_REMOVE_DEVICE,
99 MGMT_OP_LOAD_CONN_PARAM,
100 MGMT_OP_READ_UNCONF_INDEX_LIST,
101 MGMT_OP_READ_CONFIG_INFO,
102 MGMT_OP_SET_EXTERNAL_CONFIG,
103 MGMT_OP_SET_PUBLIC_ADDRESS,
104 MGMT_OP_START_SERVICE_DISCOVERY,
105 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
106 MGMT_OP_READ_EXT_INDEX_LIST,
107 MGMT_OP_READ_ADV_FEATURES,
108 MGMT_OP_ADD_ADVERTISING,
109 MGMT_OP_REMOVE_ADVERTISING,
110 MGMT_OP_GET_ADV_SIZE_INFO,
111 MGMT_OP_START_LIMITED_DISCOVERY,
112 MGMT_OP_READ_EXT_INFO,
113 MGMT_OP_SET_APPEARANCE,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_SECURITY_INFO,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
130 static const u16 mgmt_events[] = {
131 MGMT_EV_CONTROLLER_ERROR,
133 MGMT_EV_INDEX_REMOVED,
134 MGMT_EV_NEW_SETTINGS,
135 MGMT_EV_CLASS_OF_DEV_CHANGED,
136 MGMT_EV_LOCAL_NAME_CHANGED,
137 MGMT_EV_NEW_LINK_KEY,
138 MGMT_EV_NEW_LONG_TERM_KEY,
139 MGMT_EV_DEVICE_CONNECTED,
140 MGMT_EV_DEVICE_DISCONNECTED,
141 MGMT_EV_CONNECT_FAILED,
142 MGMT_EV_PIN_CODE_REQUEST,
143 MGMT_EV_USER_CONFIRM_REQUEST,
144 MGMT_EV_USER_PASSKEY_REQUEST,
146 MGMT_EV_DEVICE_FOUND,
148 MGMT_EV_DEVICE_BLOCKED,
149 MGMT_EV_DEVICE_UNBLOCKED,
150 MGMT_EV_DEVICE_UNPAIRED,
151 MGMT_EV_PASSKEY_NOTIFY,
154 MGMT_EV_DEVICE_ADDED,
155 MGMT_EV_DEVICE_REMOVED,
156 MGMT_EV_NEW_CONN_PARAM,
157 MGMT_EV_UNCONF_INDEX_ADDED,
158 MGMT_EV_UNCONF_INDEX_REMOVED,
159 MGMT_EV_NEW_CONFIG_OPTIONS,
160 MGMT_EV_EXT_INDEX_ADDED,
161 MGMT_EV_EXT_INDEX_REMOVED,
162 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
163 MGMT_EV_ADVERTISING_ADDED,
164 MGMT_EV_ADVERTISING_REMOVED,
165 MGMT_EV_EXT_INFO_CHANGED,
166 MGMT_EV_PHY_CONFIGURATION_CHANGED,
167 MGMT_EV_EXP_FEATURE_CHANGED,
168 MGMT_EV_DEVICE_FLAGS_CHANGED,
169 MGMT_EV_CONTROLLER_SUSPEND,
170 MGMT_EV_CONTROLLER_RESUME,
173 static const u16 mgmt_untrusted_commands[] = {
174 MGMT_OP_READ_INDEX_LIST,
176 MGMT_OP_READ_UNCONF_INDEX_LIST,
177 MGMT_OP_READ_CONFIG_INFO,
178 MGMT_OP_READ_EXT_INDEX_LIST,
179 MGMT_OP_READ_EXT_INFO,
180 MGMT_OP_READ_SECURITY_INFO,
181 MGMT_OP_READ_EXP_FEATURES_INFO,
182 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
183 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
186 static const u16 mgmt_untrusted_events[] = {
188 MGMT_EV_INDEX_REMOVED,
189 MGMT_EV_NEW_SETTINGS,
190 MGMT_EV_CLASS_OF_DEV_CHANGED,
191 MGMT_EV_LOCAL_NAME_CHANGED,
192 MGMT_EV_UNCONF_INDEX_ADDED,
193 MGMT_EV_UNCONF_INDEX_REMOVED,
194 MGMT_EV_NEW_CONFIG_OPTIONS,
195 MGMT_EV_EXT_INDEX_ADDED,
196 MGMT_EV_EXT_INDEX_REMOVED,
197 MGMT_EV_EXT_INFO_CHANGED,
198 MGMT_EV_EXP_FEATURE_CHANGED,
199 MGMT_EV_ADV_MONITOR_ADDED,
200 MGMT_EV_ADV_MONITOR_REMOVED,
203 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
205 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
206 "\x00\x00\x00\x00\x00\x00\x00\x00"
208 /* HCI to MGMT error code conversion table */
209 static const u8 mgmt_status_table[] = {
211 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
212 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
213 MGMT_STATUS_FAILED, /* Hardware Failure */
214 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
215 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
216 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
217 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
218 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
219 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
220 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
221 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
222 MGMT_STATUS_BUSY, /* Command Disallowed */
223 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
224 MGMT_STATUS_REJECTED, /* Rejected Security */
225 MGMT_STATUS_REJECTED, /* Rejected Personal */
226 MGMT_STATUS_TIMEOUT, /* Host Timeout */
227 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
228 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
229 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
230 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
231 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
232 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
233 MGMT_STATUS_BUSY, /* Repeated Attempts */
234 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
235 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
236 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
237 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
238 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
239 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
240 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
241 MGMT_STATUS_FAILED, /* Unspecified Error */
242 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
243 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
244 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
245 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
246 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
247 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
248 MGMT_STATUS_FAILED, /* Unit Link Key Used */
249 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
250 MGMT_STATUS_TIMEOUT, /* Instant Passed */
251 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
252 MGMT_STATUS_FAILED, /* Transaction Collision */
253 MGMT_STATUS_FAILED, /* Reserved for future use */
254 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
255 MGMT_STATUS_REJECTED, /* QoS Rejected */
256 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
257 MGMT_STATUS_REJECTED, /* Insufficient Security */
258 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
259 MGMT_STATUS_FAILED, /* Reserved for future use */
260 MGMT_STATUS_BUSY, /* Role Switch Pending */
261 MGMT_STATUS_FAILED, /* Reserved for future use */
262 MGMT_STATUS_FAILED, /* Slot Violation */
263 MGMT_STATUS_FAILED, /* Role Switch Failed */
264 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
265 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
266 MGMT_STATUS_BUSY, /* Host Busy Pairing */
267 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
268 MGMT_STATUS_BUSY, /* Controller Busy */
269 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
270 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
271 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
272 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
273 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
276 static u8 mgmt_status(u8 hci_status)
278 if (hci_status < ARRAY_SIZE(mgmt_status_table))
279 return mgmt_status_table[hci_status];
281 return MGMT_STATUS_FAILED;
284 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
287 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
291 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
292 u16 len, int flag, struct sock *skip_sk)
294 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
298 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
299 struct sock *skip_sk)
301 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
302 HCI_SOCK_TRUSTED, skip_sk);
305 static u8 le_addr_type(u8 mgmt_addr_type)
307 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
308 return ADDR_LE_DEV_PUBLIC;
310 return ADDR_LE_DEV_RANDOM;
313 void mgmt_fill_version_info(void *ver)
315 struct mgmt_rp_read_version *rp = ver;
317 rp->version = MGMT_VERSION;
318 rp->revision = cpu_to_le16(MGMT_REVISION);
321 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
324 struct mgmt_rp_read_version rp;
326 bt_dev_dbg(hdev, "sock %p", sk);
328 mgmt_fill_version_info(&rp);
330 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
334 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
337 struct mgmt_rp_read_commands *rp;
338 u16 num_commands, num_events;
342 bt_dev_dbg(hdev, "sock %p", sk);
344 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
345 num_commands = ARRAY_SIZE(mgmt_commands);
346 num_events = ARRAY_SIZE(mgmt_events);
348 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
349 num_events = ARRAY_SIZE(mgmt_untrusted_events);
352 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
354 rp = kmalloc(rp_size, GFP_KERNEL);
358 rp->num_commands = cpu_to_le16(num_commands);
359 rp->num_events = cpu_to_le16(num_events);
361 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
362 __le16 *opcode = rp->opcodes;
364 for (i = 0; i < num_commands; i++, opcode++)
365 put_unaligned_le16(mgmt_commands[i], opcode);
367 for (i = 0; i < num_events; i++, opcode++)
368 put_unaligned_le16(mgmt_events[i], opcode);
370 __le16 *opcode = rp->opcodes;
372 for (i = 0; i < num_commands; i++, opcode++)
373 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
375 for (i = 0; i < num_events; i++, opcode++)
376 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
379 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
386 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
389 struct mgmt_rp_read_index_list *rp;
395 bt_dev_dbg(hdev, "sock %p", sk);
397 read_lock(&hci_dev_list_lock);
400 list_for_each_entry(d, &hci_dev_list, list) {
401 if (d->dev_type == HCI_PRIMARY &&
402 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
406 rp_len = sizeof(*rp) + (2 * count);
407 rp = kmalloc(rp_len, GFP_ATOMIC);
409 read_unlock(&hci_dev_list_lock);
414 list_for_each_entry(d, &hci_dev_list, list) {
415 if (hci_dev_test_flag(d, HCI_SETUP) ||
416 hci_dev_test_flag(d, HCI_CONFIG) ||
417 hci_dev_test_flag(d, HCI_USER_CHANNEL))
420 /* Devices marked as raw-only are neither configured
421 * nor unconfigured controllers.
423 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
426 if (d->dev_type == HCI_PRIMARY &&
427 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
428 rp->index[count++] = cpu_to_le16(d->id);
429 bt_dev_dbg(hdev, "Added hci%u", d->id);
433 rp->num_controllers = cpu_to_le16(count);
434 rp_len = sizeof(*rp) + (2 * count);
436 read_unlock(&hci_dev_list_lock);
438 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
446 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
447 void *data, u16 data_len)
449 struct mgmt_rp_read_unconf_index_list *rp;
455 bt_dev_dbg(hdev, "sock %p", sk);
457 read_lock(&hci_dev_list_lock);
460 list_for_each_entry(d, &hci_dev_list, list) {
461 if (d->dev_type == HCI_PRIMARY &&
462 hci_dev_test_flag(d, HCI_UNCONFIGURED))
466 rp_len = sizeof(*rp) + (2 * count);
467 rp = kmalloc(rp_len, GFP_ATOMIC);
469 read_unlock(&hci_dev_list_lock);
474 list_for_each_entry(d, &hci_dev_list, list) {
475 if (hci_dev_test_flag(d, HCI_SETUP) ||
476 hci_dev_test_flag(d, HCI_CONFIG) ||
477 hci_dev_test_flag(d, HCI_USER_CHANNEL))
480 /* Devices marked as raw-only are neither configured
481 * nor unconfigured controllers.
483 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
486 if (d->dev_type == HCI_PRIMARY &&
487 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
488 rp->index[count++] = cpu_to_le16(d->id);
489 bt_dev_dbg(hdev, "Added hci%u", d->id);
493 rp->num_controllers = cpu_to_le16(count);
494 rp_len = sizeof(*rp) + (2 * count);
496 read_unlock(&hci_dev_list_lock);
498 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
499 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
506 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
507 void *data, u16 data_len)
509 struct mgmt_rp_read_ext_index_list *rp;
514 bt_dev_dbg(hdev, "sock %p", sk);
516 read_lock(&hci_dev_list_lock);
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
524 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
526 read_unlock(&hci_dev_list_lock);
531 list_for_each_entry(d, &hci_dev_list, list) {
532 if (hci_dev_test_flag(d, HCI_SETUP) ||
533 hci_dev_test_flag(d, HCI_CONFIG) ||
534 hci_dev_test_flag(d, HCI_USER_CHANNEL))
537 /* Devices marked as raw-only are neither configured
538 * nor unconfigured controllers.
540 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
543 if (d->dev_type == HCI_PRIMARY) {
544 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
545 rp->entry[count].type = 0x01;
547 rp->entry[count].type = 0x00;
548 } else if (d->dev_type == HCI_AMP) {
549 rp->entry[count].type = 0x02;
554 rp->entry[count].bus = d->bus;
555 rp->entry[count++].index = cpu_to_le16(d->id);
556 bt_dev_dbg(hdev, "Added hci%u", d->id);
559 rp->num_controllers = cpu_to_le16(count);
561 read_unlock(&hci_dev_list_lock);
563 /* If this command is called at least once, then all the
564 * default index and unconfigured index events are disabled
565 * and from now on only extended index events are used.
567 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
568 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
569 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
571 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
572 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
573 struct_size(rp, entry, count));
580 static bool is_configured(struct hci_dev *hdev)
582 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
583 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
586 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
587 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
588 !bacmp(&hdev->public_addr, BDADDR_ANY))
594 static __le32 get_missing_options(struct hci_dev *hdev)
598 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
599 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
600 options |= MGMT_OPTION_EXTERNAL_CONFIG;
602 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
603 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
604 !bacmp(&hdev->public_addr, BDADDR_ANY))
605 options |= MGMT_OPTION_PUBLIC_ADDRESS;
607 return cpu_to_le32(options);
610 static int new_options(struct hci_dev *hdev, struct sock *skip)
612 __le32 options = get_missing_options(hdev);
614 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
615 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
618 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
620 __le32 options = get_missing_options(hdev);
622 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
626 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
627 void *data, u16 data_len)
629 struct mgmt_rp_read_config_info rp;
632 bt_dev_dbg(hdev, "sock %p", sk);
636 memset(&rp, 0, sizeof(rp));
637 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
639 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
640 options |= MGMT_OPTION_EXTERNAL_CONFIG;
642 if (hdev->set_bdaddr)
643 options |= MGMT_OPTION_PUBLIC_ADDRESS;
645 rp.supported_options = cpu_to_le32(options);
646 rp.missing_options = get_missing_options(hdev);
648 hci_dev_unlock(hdev);
650 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
654 static u32 get_supported_phys(struct hci_dev *hdev)
656 u32 supported_phys = 0;
658 if (lmp_bredr_capable(hdev)) {
659 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
661 if (hdev->features[0][0] & LMP_3SLOT)
662 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
664 if (hdev->features[0][0] & LMP_5SLOT)
665 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
667 if (lmp_edr_2m_capable(hdev)) {
668 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
670 if (lmp_edr_3slot_capable(hdev))
671 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
673 if (lmp_edr_5slot_capable(hdev))
674 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
676 if (lmp_edr_3m_capable(hdev)) {
677 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
679 if (lmp_edr_3slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
682 if (lmp_edr_5slot_capable(hdev))
683 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
688 if (lmp_le_capable(hdev)) {
689 supported_phys |= MGMT_PHY_LE_1M_TX;
690 supported_phys |= MGMT_PHY_LE_1M_RX;
692 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
693 supported_phys |= MGMT_PHY_LE_2M_TX;
694 supported_phys |= MGMT_PHY_LE_2M_RX;
697 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
698 supported_phys |= MGMT_PHY_LE_CODED_TX;
699 supported_phys |= MGMT_PHY_LE_CODED_RX;
703 return supported_phys;
706 static u32 get_selected_phys(struct hci_dev *hdev)
708 u32 selected_phys = 0;
710 if (lmp_bredr_capable(hdev)) {
711 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
713 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
714 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
716 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
717 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
719 if (lmp_edr_2m_capable(hdev)) {
720 if (!(hdev->pkt_type & HCI_2DH1))
721 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
723 if (lmp_edr_3slot_capable(hdev) &&
724 !(hdev->pkt_type & HCI_2DH3))
725 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
727 if (lmp_edr_5slot_capable(hdev) &&
728 !(hdev->pkt_type & HCI_2DH5))
729 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
731 if (lmp_edr_3m_capable(hdev)) {
732 if (!(hdev->pkt_type & HCI_3DH1))
733 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
735 if (lmp_edr_3slot_capable(hdev) &&
736 !(hdev->pkt_type & HCI_3DH3))
737 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
739 if (lmp_edr_5slot_capable(hdev) &&
740 !(hdev->pkt_type & HCI_3DH5))
741 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
746 if (lmp_le_capable(hdev)) {
747 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
748 selected_phys |= MGMT_PHY_LE_1M_TX;
750 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
751 selected_phys |= MGMT_PHY_LE_1M_RX;
753 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
754 selected_phys |= MGMT_PHY_LE_2M_TX;
756 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
757 selected_phys |= MGMT_PHY_LE_2M_RX;
759 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
760 selected_phys |= MGMT_PHY_LE_CODED_TX;
762 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
763 selected_phys |= MGMT_PHY_LE_CODED_RX;
766 return selected_phys;
769 static u32 get_configurable_phys(struct hci_dev *hdev)
771 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
772 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
775 static u32 get_supported_settings(struct hci_dev *hdev)
779 settings |= MGMT_SETTING_POWERED;
780 settings |= MGMT_SETTING_BONDABLE;
781 settings |= MGMT_SETTING_DEBUG_KEYS;
782 settings |= MGMT_SETTING_CONNECTABLE;
783 settings |= MGMT_SETTING_DISCOVERABLE;
785 if (lmp_bredr_capable(hdev)) {
786 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
787 settings |= MGMT_SETTING_FAST_CONNECTABLE;
788 settings |= MGMT_SETTING_BREDR;
789 settings |= MGMT_SETTING_LINK_SECURITY;
791 if (lmp_ssp_capable(hdev)) {
792 settings |= MGMT_SETTING_SSP;
793 if (IS_ENABLED(CONFIG_BT_HS))
794 settings |= MGMT_SETTING_HS;
797 if (lmp_sc_capable(hdev))
798 settings |= MGMT_SETTING_SECURE_CONN;
800 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
802 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
805 if (lmp_le_capable(hdev)) {
806 settings |= MGMT_SETTING_LE;
807 settings |= MGMT_SETTING_SECURE_CONN;
808 settings |= MGMT_SETTING_PRIVACY;
809 settings |= MGMT_SETTING_STATIC_ADDRESS;
811 /* When the experimental feature for LL Privacy support is
812 * enabled, then advertising is no longer supported.
814 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
815 settings |= MGMT_SETTING_ADVERTISING;
818 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
820 settings |= MGMT_SETTING_CONFIGURATION;
822 settings |= MGMT_SETTING_PHY_CONFIGURATION;
827 static u32 get_current_settings(struct hci_dev *hdev)
831 if (hdev_is_powered(hdev))
832 settings |= MGMT_SETTING_POWERED;
834 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
835 settings |= MGMT_SETTING_CONNECTABLE;
837 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
838 settings |= MGMT_SETTING_FAST_CONNECTABLE;
840 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
841 settings |= MGMT_SETTING_DISCOVERABLE;
843 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
844 settings |= MGMT_SETTING_BONDABLE;
846 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
847 settings |= MGMT_SETTING_BREDR;
849 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
850 settings |= MGMT_SETTING_LE;
852 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
853 settings |= MGMT_SETTING_LINK_SECURITY;
855 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
856 settings |= MGMT_SETTING_SSP;
858 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
859 settings |= MGMT_SETTING_HS;
861 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
862 settings |= MGMT_SETTING_ADVERTISING;
864 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
865 settings |= MGMT_SETTING_SECURE_CONN;
867 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
868 settings |= MGMT_SETTING_DEBUG_KEYS;
870 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
871 settings |= MGMT_SETTING_PRIVACY;
873 /* The current setting for static address has two purposes. The
874 * first is to indicate if the static address will be used and
875 * the second is to indicate if it is actually set.
877 * This means if the static address is not configured, this flag
878 * will never be set. If the address is configured, then if the
879 * address is actually used decides if the flag is set or not.
881 * For single mode LE only controllers and dual-mode controllers
882 * with BR/EDR disabled, the existence of the static address will
885 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
886 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
887 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
888 if (bacmp(&hdev->static_addr, BDADDR_ANY))
889 settings |= MGMT_SETTING_STATIC_ADDRESS;
892 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
893 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
898 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
900 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
903 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
904 struct hci_dev *hdev,
907 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
910 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
912 struct mgmt_pending_cmd *cmd;
914 /* If there's a pending mgmt command the flags will not yet have
915 * their final values, so check for this first.
917 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
919 struct mgmt_mode *cp = cmd->param;
921 return LE_AD_GENERAL;
922 else if (cp->val == 0x02)
923 return LE_AD_LIMITED;
925 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
926 return LE_AD_LIMITED;
927 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
928 return LE_AD_GENERAL;
934 bool mgmt_get_connectable(struct hci_dev *hdev)
936 struct mgmt_pending_cmd *cmd;
938 /* If there's a pending mgmt command the flag will not yet have
939 * it's final value, so check for this first.
941 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
943 struct mgmt_mode *cp = cmd->param;
948 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
951 static void service_cache_off(struct work_struct *work)
953 struct hci_dev *hdev = container_of(work, struct hci_dev,
955 struct hci_request req;
957 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
960 hci_req_init(&req, hdev);
964 __hci_req_update_eir(&req);
965 __hci_req_update_class(&req);
967 hci_dev_unlock(hdev);
969 hci_req_run(&req, NULL);
972 static void rpa_expired(struct work_struct *work)
974 struct hci_dev *hdev = container_of(work, struct hci_dev,
976 struct hci_request req;
978 bt_dev_dbg(hdev, "");
980 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
982 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
985 /* The generation of a new RPA and programming it into the
986 * controller happens in the hci_req_enable_advertising()
989 hci_req_init(&req, hdev);
990 if (ext_adv_capable(hdev))
991 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
993 __hci_req_enable_advertising(&req);
994 hci_req_run(&req, NULL);
997 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
999 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1002 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1003 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1005 /* Non-mgmt controlled devices get this bit set
1006 * implicitly so that pairing works for them, however
1007 * for mgmt we require user-space to explicitly enable
1010 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1013 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1014 void *data, u16 data_len)
1016 struct mgmt_rp_read_info rp;
1018 bt_dev_dbg(hdev, "sock %p", sk);
1022 memset(&rp, 0, sizeof(rp));
1024 bacpy(&rp.bdaddr, &hdev->bdaddr);
1026 rp.version = hdev->hci_ver;
1027 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1029 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1030 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1032 memcpy(rp.dev_class, hdev->dev_class, 3);
1034 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1035 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1037 hci_dev_unlock(hdev);
1039 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1043 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1048 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1049 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1050 hdev->dev_class, 3);
1052 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1053 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1056 name_len = strlen(hdev->dev_name);
1057 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1058 hdev->dev_name, name_len);
1060 name_len = strlen(hdev->short_name);
1061 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1062 hdev->short_name, name_len);
1067 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1068 void *data, u16 data_len)
1071 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1074 bt_dev_dbg(hdev, "sock %p", sk);
1076 memset(&buf, 0, sizeof(buf));
1080 bacpy(&rp->bdaddr, &hdev->bdaddr);
1082 rp->version = hdev->hci_ver;
1083 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1085 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1086 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1089 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1090 rp->eir_len = cpu_to_le16(eir_len);
1092 hci_dev_unlock(hdev);
1094 /* If this command is called at least once, then the events
1095 * for class of device and local name changes are disabled
1096 * and only the new extended controller information event
1099 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1100 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1101 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1103 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1104 sizeof(*rp) + eir_len);
1107 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1110 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1113 memset(buf, 0, sizeof(buf));
1115 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1116 ev->eir_len = cpu_to_le16(eir_len);
1118 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1119 sizeof(*ev) + eir_len,
1120 HCI_MGMT_EXT_INFO_EVENTS, skip);
1123 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1125 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1127 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1131 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1133 bt_dev_dbg(hdev, "status 0x%02x", status);
1135 if (hci_conn_count(hdev) == 0) {
1136 cancel_delayed_work(&hdev->power_off);
1137 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1141 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1143 struct mgmt_ev_advertising_added ev;
1145 ev.instance = instance;
1147 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1150 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1153 struct mgmt_ev_advertising_removed ev;
1155 ev.instance = instance;
1157 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1160 static void cancel_adv_timeout(struct hci_dev *hdev)
1162 if (hdev->adv_instance_timeout) {
1163 hdev->adv_instance_timeout = 0;
1164 cancel_delayed_work(&hdev->adv_instance_expire);
1168 static int clean_up_hci_state(struct hci_dev *hdev)
1170 struct hci_request req;
1171 struct hci_conn *conn;
1172 bool discov_stopped;
1175 hci_req_init(&req, hdev);
1177 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1178 test_bit(HCI_PSCAN, &hdev->flags)) {
1180 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1183 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1185 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1186 __hci_req_disable_advertising(&req);
1188 discov_stopped = hci_req_stop_discovery(&req);
1190 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1191 /* 0x15 == Terminated due to Power Off */
1192 __hci_abort_conn(&req, conn, 0x15);
1195 err = hci_req_run(&req, clean_up_hci_complete);
1196 if (!err && discov_stopped)
1197 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1202 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1205 struct mgmt_mode *cp = data;
1206 struct mgmt_pending_cmd *cmd;
1209 bt_dev_dbg(hdev, "sock %p", sk);
1211 if (cp->val != 0x00 && cp->val != 0x01)
1212 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1213 MGMT_STATUS_INVALID_PARAMS);
1217 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1218 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1223 if (!!cp->val == hdev_is_powered(hdev)) {
1224 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1228 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1235 queue_work(hdev->req_workqueue, &hdev->power_on);
1238 /* Disconnect connections, stop scans, etc */
1239 err = clean_up_hci_state(hdev);
1241 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1242 HCI_POWER_OFF_TIMEOUT);
1244 /* ENODATA means there were no HCI commands queued */
1245 if (err == -ENODATA) {
1246 cancel_delayed_work(&hdev->power_off);
1247 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1253 hci_dev_unlock(hdev);
1257 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1259 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1261 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1262 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1265 int mgmt_new_settings(struct hci_dev *hdev)
1267 return new_settings(hdev, NULL);
1272 struct hci_dev *hdev;
1276 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1278 struct cmd_lookup *match = data;
1280 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1282 list_del(&cmd->list);
1284 if (match->sk == NULL) {
1285 match->sk = cmd->sk;
1286 sock_hold(match->sk);
1289 mgmt_pending_free(cmd);
1292 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1296 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1297 mgmt_pending_remove(cmd);
1300 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1302 if (cmd->cmd_complete) {
1305 cmd->cmd_complete(cmd, *status);
1306 mgmt_pending_remove(cmd);
1311 cmd_status_rsp(cmd, data);
1314 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1316 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1317 cmd->param, cmd->param_len);
1320 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1322 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1323 cmd->param, sizeof(struct mgmt_addr_info));
1326 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1328 if (!lmp_bredr_capable(hdev))
1329 return MGMT_STATUS_NOT_SUPPORTED;
1330 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1331 return MGMT_STATUS_REJECTED;
1333 return MGMT_STATUS_SUCCESS;
1336 static u8 mgmt_le_support(struct hci_dev *hdev)
1338 if (!lmp_le_capable(hdev))
1339 return MGMT_STATUS_NOT_SUPPORTED;
1340 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1341 return MGMT_STATUS_REJECTED;
1343 return MGMT_STATUS_SUCCESS;
1346 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1348 struct mgmt_pending_cmd *cmd;
1350 bt_dev_dbg(hdev, "status 0x%02x", status);
1354 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1359 u8 mgmt_err = mgmt_status(status);
1360 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1361 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1365 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1366 hdev->discov_timeout > 0) {
1367 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1368 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1371 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1372 new_settings(hdev, cmd->sk);
1375 mgmt_pending_remove(cmd);
1378 hci_dev_unlock(hdev);
1381 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1384 struct mgmt_cp_set_discoverable *cp = data;
1385 struct mgmt_pending_cmd *cmd;
1389 bt_dev_dbg(hdev, "sock %p", sk);
1391 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1392 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1393 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1394 MGMT_STATUS_REJECTED);
1396 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1397 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1398 MGMT_STATUS_INVALID_PARAMS);
1400 timeout = __le16_to_cpu(cp->timeout);
1402 /* Disabling discoverable requires that no timeout is set,
1403 * and enabling limited discoverable requires a timeout.
1405 if ((cp->val == 0x00 && timeout > 0) ||
1406 (cp->val == 0x02 && timeout == 0))
1407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1408 MGMT_STATUS_INVALID_PARAMS);
1412 if (!hdev_is_powered(hdev) && timeout > 0) {
1413 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1414 MGMT_STATUS_NOT_POWERED);
1418 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1419 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1420 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1425 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1427 MGMT_STATUS_REJECTED);
1431 if (hdev->advertising_paused) {
1432 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1437 if (!hdev_is_powered(hdev)) {
1438 bool changed = false;
1440 /* Setting limited discoverable when powered off is
1441 * not a valid operation since it requires a timeout
1442 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1444 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1445 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1449 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1454 err = new_settings(hdev, sk);
1459 /* If the current mode is the same, then just update the timeout
1460 * value with the new value. And if only the timeout gets updated,
1461 * then no need for any HCI transactions.
1463 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1464 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1465 HCI_LIMITED_DISCOVERABLE)) {
1466 cancel_delayed_work(&hdev->discov_off);
1467 hdev->discov_timeout = timeout;
1469 if (cp->val && hdev->discov_timeout > 0) {
1470 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1471 queue_delayed_work(hdev->req_workqueue,
1472 &hdev->discov_off, to);
1475 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1479 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1485 /* Cancel any potential discoverable timeout that might be
1486 * still active and store new timeout value. The arming of
1487 * the timeout happens in the complete handler.
1489 cancel_delayed_work(&hdev->discov_off);
1490 hdev->discov_timeout = timeout;
1493 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1495 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1497 /* Limited discoverable mode */
1498 if (cp->val == 0x02)
1499 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1501 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1503 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1507 hci_dev_unlock(hdev);
1511 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1513 struct mgmt_pending_cmd *cmd;
1515 bt_dev_dbg(hdev, "status 0x%02x", status);
1519 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1524 u8 mgmt_err = mgmt_status(status);
1525 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1529 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1530 new_settings(hdev, cmd->sk);
1533 mgmt_pending_remove(cmd);
1536 hci_dev_unlock(hdev);
1539 static int set_connectable_update_settings(struct hci_dev *hdev,
1540 struct sock *sk, u8 val)
1542 bool changed = false;
1545 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1549 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1551 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1552 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1555 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1560 hci_req_update_scan(hdev);
1561 hci_update_background_scan(hdev);
1562 return new_settings(hdev, sk);
1568 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1571 struct mgmt_mode *cp = data;
1572 struct mgmt_pending_cmd *cmd;
1575 bt_dev_dbg(hdev, "sock %p", sk);
1577 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1578 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1579 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1580 MGMT_STATUS_REJECTED);
1582 if (cp->val != 0x00 && cp->val != 0x01)
1583 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1584 MGMT_STATUS_INVALID_PARAMS);
1588 if (!hdev_is_powered(hdev)) {
1589 err = set_connectable_update_settings(hdev, sk, cp->val);
1593 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1594 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1595 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1600 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1607 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1609 if (hdev->discov_timeout > 0)
1610 cancel_delayed_work(&hdev->discov_off);
1612 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1613 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1614 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1617 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1621 hci_dev_unlock(hdev);
1625 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1628 struct mgmt_mode *cp = data;
1632 bt_dev_dbg(hdev, "sock %p", sk);
1634 if (cp->val != 0x00 && cp->val != 0x01)
1635 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1636 MGMT_STATUS_INVALID_PARAMS);
1641 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1643 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1645 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1650 /* In limited privacy mode the change of bondable mode
1651 * may affect the local advertising address.
1653 if (hdev_is_powered(hdev) &&
1654 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1655 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1656 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1657 queue_work(hdev->req_workqueue,
1658 &hdev->discoverable_update);
1660 err = new_settings(hdev, sk);
1664 hci_dev_unlock(hdev);
1668 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1671 struct mgmt_mode *cp = data;
1672 struct mgmt_pending_cmd *cmd;
1676 bt_dev_dbg(hdev, "sock %p", sk);
1678 status = mgmt_bredr_support(hdev);
1680 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1683 if (cp->val != 0x00 && cp->val != 0x01)
1684 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1685 MGMT_STATUS_INVALID_PARAMS);
1689 if (!hdev_is_powered(hdev)) {
1690 bool changed = false;
1692 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1693 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1697 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1702 err = new_settings(hdev, sk);
1707 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1708 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1715 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1716 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1720 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1726 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1728 mgmt_pending_remove(cmd);
1733 hci_dev_unlock(hdev);
1737 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1739 struct mgmt_mode *cp = data;
1740 struct mgmt_pending_cmd *cmd;
1744 bt_dev_dbg(hdev, "sock %p", sk);
1746 status = mgmt_bredr_support(hdev);
1748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1750 if (!lmp_ssp_capable(hdev))
1751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1752 MGMT_STATUS_NOT_SUPPORTED);
1754 if (cp->val != 0x00 && cp->val != 0x01)
1755 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1756 MGMT_STATUS_INVALID_PARAMS);
1760 if (!hdev_is_powered(hdev)) {
1764 changed = !hci_dev_test_and_set_flag(hdev,
1767 changed = hci_dev_test_and_clear_flag(hdev,
1770 changed = hci_dev_test_and_clear_flag(hdev,
1773 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1776 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1781 err = new_settings(hdev, sk);
1786 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1787 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1792 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1793 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1797 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1803 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1804 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1805 sizeof(cp->val), &cp->val);
1807 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1809 mgmt_pending_remove(cmd);
1814 hci_dev_unlock(hdev);
1818 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1820 struct mgmt_mode *cp = data;
1825 bt_dev_dbg(hdev, "sock %p", sk);
1827 if (!IS_ENABLED(CONFIG_BT_HS))
1828 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1829 MGMT_STATUS_NOT_SUPPORTED);
1831 status = mgmt_bredr_support(hdev);
1833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1835 if (!lmp_ssp_capable(hdev))
1836 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1837 MGMT_STATUS_NOT_SUPPORTED);
1839 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1840 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1841 MGMT_STATUS_REJECTED);
1843 if (cp->val != 0x00 && cp->val != 0x01)
1844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1845 MGMT_STATUS_INVALID_PARAMS);
1849 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1850 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1856 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1858 if (hdev_is_powered(hdev)) {
1859 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1860 MGMT_STATUS_REJECTED);
1864 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1867 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1872 err = new_settings(hdev, sk);
1875 hci_dev_unlock(hdev);
1879 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1881 struct cmd_lookup match = { NULL, hdev };
1886 u8 mgmt_err = mgmt_status(status);
1888 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1893 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1895 new_settings(hdev, match.sk);
1900 /* Make sure the controller has a good default for
1901 * advertising data. Restrict the update to when LE
1902 * has actually been enabled. During power on, the
1903 * update in powered_update_hci will take care of it.
1905 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1906 struct hci_request req;
1907 hci_req_init(&req, hdev);
1908 if (ext_adv_capable(hdev)) {
1911 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1913 __hci_req_update_scan_rsp_data(&req, 0x00);
1915 __hci_req_update_adv_data(&req, 0x00);
1916 __hci_req_update_scan_rsp_data(&req, 0x00);
1918 hci_req_run(&req, NULL);
1919 hci_update_background_scan(hdev);
1923 hci_dev_unlock(hdev);
1926 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1928 struct mgmt_mode *cp = data;
1929 struct hci_cp_write_le_host_supported hci_cp;
1930 struct mgmt_pending_cmd *cmd;
1931 struct hci_request req;
1935 bt_dev_dbg(hdev, "sock %p", sk);
1937 if (!lmp_le_capable(hdev))
1938 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1939 MGMT_STATUS_NOT_SUPPORTED);
1941 if (cp->val != 0x00 && cp->val != 0x01)
1942 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1943 MGMT_STATUS_INVALID_PARAMS);
1945 /* Bluetooth single mode LE only controllers or dual-mode
1946 * controllers configured as LE only devices, do not allow
1947 * switching LE off. These have either LE enabled explicitly
1948 * or BR/EDR has been previously switched off.
1950 * When trying to enable an already enabled LE, then gracefully
1951 * send a positive response. Trying to disable it however will
1952 * result into rejection.
1954 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1955 if (cp->val == 0x01)
1956 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1958 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1959 MGMT_STATUS_REJECTED);
1965 enabled = lmp_host_le_capable(hdev);
1968 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1970 if (!hdev_is_powered(hdev) || val == enabled) {
1971 bool changed = false;
1973 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1974 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1978 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1979 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1983 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1988 err = new_settings(hdev, sk);
1993 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1994 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1995 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2000 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2006 hci_req_init(&req, hdev);
2008 memset(&hci_cp, 0, sizeof(hci_cp));
2012 hci_cp.simul = 0x00;
2014 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2015 __hci_req_disable_advertising(&req);
2017 if (ext_adv_capable(hdev))
2018 __hci_req_clear_ext_adv_sets(&req);
2021 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2024 err = hci_req_run(&req, le_enable_complete);
2026 mgmt_pending_remove(cmd);
2029 hci_dev_unlock(hdev);
2033 /* This is a helper function to test for pending mgmt commands that can
2034 * cause CoD or EIR HCI commands. We can only allow one such pending
2035 * mgmt command at a time since otherwise we cannot easily track what
2036 * the current values are, will be, and based on that calculate if a new
2037 * HCI command needs to be sent and if yes with what value.
2039 static bool pending_eir_or_class(struct hci_dev *hdev)
2041 struct mgmt_pending_cmd *cmd;
2043 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2044 switch (cmd->opcode) {
2045 case MGMT_OP_ADD_UUID:
2046 case MGMT_OP_REMOVE_UUID:
2047 case MGMT_OP_SET_DEV_CLASS:
2048 case MGMT_OP_SET_POWERED:
2056 static const u8 bluetooth_base_uuid[] = {
2057 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2058 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2061 static u8 get_uuid_size(const u8 *uuid)
2065 if (memcmp(uuid, bluetooth_base_uuid, 12))
2068 val = get_unaligned_le32(&uuid[12]);
2075 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2077 struct mgmt_pending_cmd *cmd;
2081 cmd = pending_find(mgmt_op, hdev);
2085 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2086 mgmt_status(status), hdev->dev_class, 3);
2088 mgmt_pending_remove(cmd);
2091 hci_dev_unlock(hdev);
2094 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2096 bt_dev_dbg(hdev, "status 0x%02x", status);
2098 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2101 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2103 struct mgmt_cp_add_uuid *cp = data;
2104 struct mgmt_pending_cmd *cmd;
2105 struct hci_request req;
2106 struct bt_uuid *uuid;
2109 bt_dev_dbg(hdev, "sock %p", sk);
2113 if (pending_eir_or_class(hdev)) {
2114 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2119 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2125 memcpy(uuid->uuid, cp->uuid, 16);
2126 uuid->svc_hint = cp->svc_hint;
2127 uuid->size = get_uuid_size(cp->uuid);
2129 list_add_tail(&uuid->list, &hdev->uuids);
2131 hci_req_init(&req, hdev);
2133 __hci_req_update_class(&req);
2134 __hci_req_update_eir(&req);
2136 err = hci_req_run(&req, add_uuid_complete);
2138 if (err != -ENODATA)
2141 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2142 hdev->dev_class, 3);
2146 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2155 hci_dev_unlock(hdev);
2159 static bool enable_service_cache(struct hci_dev *hdev)
2161 if (!hdev_is_powered(hdev))
2164 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2165 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2173 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2175 bt_dev_dbg(hdev, "status 0x%02x", status);
2177 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2180 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2183 struct mgmt_cp_remove_uuid *cp = data;
2184 struct mgmt_pending_cmd *cmd;
2185 struct bt_uuid *match, *tmp;
2186 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2187 struct hci_request req;
2190 bt_dev_dbg(hdev, "sock %p", sk);
2194 if (pending_eir_or_class(hdev)) {
2195 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2200 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2201 hci_uuids_clear(hdev);
2203 if (enable_service_cache(hdev)) {
2204 err = mgmt_cmd_complete(sk, hdev->id,
2205 MGMT_OP_REMOVE_UUID,
2206 0, hdev->dev_class, 3);
2215 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2216 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2219 list_del(&match->list);
2225 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2226 MGMT_STATUS_INVALID_PARAMS);
2231 hci_req_init(&req, hdev);
2233 __hci_req_update_class(&req);
2234 __hci_req_update_eir(&req);
2236 err = hci_req_run(&req, remove_uuid_complete);
2238 if (err != -ENODATA)
2241 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2242 hdev->dev_class, 3);
2246 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2255 hci_dev_unlock(hdev);
2259 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2261 bt_dev_dbg(hdev, "status 0x%02x", status);
2263 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2266 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2269 struct mgmt_cp_set_dev_class *cp = data;
2270 struct mgmt_pending_cmd *cmd;
2271 struct hci_request req;
2274 bt_dev_dbg(hdev, "sock %p", sk);
2276 if (!lmp_bredr_capable(hdev))
2277 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2278 MGMT_STATUS_NOT_SUPPORTED);
2282 if (pending_eir_or_class(hdev)) {
2283 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2288 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2289 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2290 MGMT_STATUS_INVALID_PARAMS);
2294 hdev->major_class = cp->major;
2295 hdev->minor_class = cp->minor;
2297 if (!hdev_is_powered(hdev)) {
2298 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2299 hdev->dev_class, 3);
2303 hci_req_init(&req, hdev);
2305 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2306 hci_dev_unlock(hdev);
2307 cancel_delayed_work_sync(&hdev->service_cache);
2309 __hci_req_update_eir(&req);
2312 __hci_req_update_class(&req);
2314 err = hci_req_run(&req, set_class_complete);
2316 if (err != -ENODATA)
2319 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2320 hdev->dev_class, 3);
2324 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2333 hci_dev_unlock(hdev);
2337 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2340 struct mgmt_cp_load_link_keys *cp = data;
2341 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2342 sizeof(struct mgmt_link_key_info));
2343 u16 key_count, expected_len;
2347 bt_dev_dbg(hdev, "sock %p", sk);
2349 if (!lmp_bredr_capable(hdev))
2350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2351 MGMT_STATUS_NOT_SUPPORTED);
2353 key_count = __le16_to_cpu(cp->key_count);
2354 if (key_count > max_key_count) {
2355 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2357 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2358 MGMT_STATUS_INVALID_PARAMS);
2361 expected_len = struct_size(cp, keys, key_count);
2362 if (expected_len != len) {
2363 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2365 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2366 MGMT_STATUS_INVALID_PARAMS);
2369 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2371 MGMT_STATUS_INVALID_PARAMS);
2373 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2376 for (i = 0; i < key_count; i++) {
2377 struct mgmt_link_key_info *key = &cp->keys[i];
2379 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2380 return mgmt_cmd_status(sk, hdev->id,
2381 MGMT_OP_LOAD_LINK_KEYS,
2382 MGMT_STATUS_INVALID_PARAMS);
2387 hci_link_keys_clear(hdev);
2390 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2392 changed = hci_dev_test_and_clear_flag(hdev,
2393 HCI_KEEP_DEBUG_KEYS);
2396 new_settings(hdev, NULL);
2398 for (i = 0; i < key_count; i++) {
2399 struct mgmt_link_key_info *key = &cp->keys[i];
2401 if (hci_is_blocked_key(hdev,
2402 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2404 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2409 /* Always ignore debug keys and require a new pairing if
2410 * the user wants to use them.
2412 if (key->type == HCI_LK_DEBUG_COMBINATION)
2415 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2416 key->type, key->pin_len, NULL);
2419 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2421 hci_dev_unlock(hdev);
2426 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2427 u8 addr_type, struct sock *skip_sk)
2429 struct mgmt_ev_device_unpaired ev;
2431 bacpy(&ev.addr.bdaddr, bdaddr);
2432 ev.addr.type = addr_type;
2434 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2438 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2441 struct mgmt_cp_unpair_device *cp = data;
2442 struct mgmt_rp_unpair_device rp;
2443 struct hci_conn_params *params;
2444 struct mgmt_pending_cmd *cmd;
2445 struct hci_conn *conn;
2449 memset(&rp, 0, sizeof(rp));
2450 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2451 rp.addr.type = cp->addr.type;
2453 if (!bdaddr_type_is_valid(cp->addr.type))
2454 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2455 MGMT_STATUS_INVALID_PARAMS,
2458 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2459 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2460 MGMT_STATUS_INVALID_PARAMS,
2465 if (!hdev_is_powered(hdev)) {
2466 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2467 MGMT_STATUS_NOT_POWERED, &rp,
2472 if (cp->addr.type == BDADDR_BREDR) {
2473 /* If disconnection is requested, then look up the
2474 * connection. If the remote device is connected, it
2475 * will be later used to terminate the link.
2477 * Setting it to NULL explicitly will cause no
2478 * termination of the link.
2481 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2486 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2488 err = mgmt_cmd_complete(sk, hdev->id,
2489 MGMT_OP_UNPAIR_DEVICE,
2490 MGMT_STATUS_NOT_PAIRED, &rp,
2498 /* LE address type */
2499 addr_type = le_addr_type(cp->addr.type);
2501 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2502 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2504 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2505 MGMT_STATUS_NOT_PAIRED, &rp,
2510 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2512 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2517 /* Defer clearing up the connection parameters until closing to
2518 * give a chance of keeping them if a repairing happens.
2520 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2522 /* Disable auto-connection parameters if present */
2523 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2525 if (params->explicit_connect)
2526 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2528 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2531 /* If disconnection is not requested, then clear the connection
2532 * variable so that the link is not terminated.
2534 if (!cp->disconnect)
2538 /* If the connection variable is set, then termination of the
2539 * link is requested.
2542 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2544 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2548 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2555 cmd->cmd_complete = addr_cmd_complete;
2557 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2559 mgmt_pending_remove(cmd);
2562 hci_dev_unlock(hdev);
2566 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2569 struct mgmt_cp_disconnect *cp = data;
2570 struct mgmt_rp_disconnect rp;
2571 struct mgmt_pending_cmd *cmd;
2572 struct hci_conn *conn;
2575 bt_dev_dbg(hdev, "sock %p", sk);
2577 memset(&rp, 0, sizeof(rp));
2578 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2579 rp.addr.type = cp->addr.type;
2581 if (!bdaddr_type_is_valid(cp->addr.type))
2582 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2583 MGMT_STATUS_INVALID_PARAMS,
2588 if (!test_bit(HCI_UP, &hdev->flags)) {
2589 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2590 MGMT_STATUS_NOT_POWERED, &rp,
2595 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2596 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2597 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2601 if (cp->addr.type == BDADDR_BREDR)
2602 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2605 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2606 le_addr_type(cp->addr.type));
2608 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2609 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2610 MGMT_STATUS_NOT_CONNECTED, &rp,
2615 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2621 cmd->cmd_complete = generic_cmd_complete;
2623 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2625 mgmt_pending_remove(cmd);
2628 hci_dev_unlock(hdev);
2632 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2634 switch (link_type) {
2636 switch (addr_type) {
2637 case ADDR_LE_DEV_PUBLIC:
2638 return BDADDR_LE_PUBLIC;
2641 /* Fallback to LE Random address type */
2642 return BDADDR_LE_RANDOM;
2646 /* Fallback to BR/EDR type */
2647 return BDADDR_BREDR;
2651 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2654 struct mgmt_rp_get_connections *rp;
2659 bt_dev_dbg(hdev, "sock %p", sk);
2663 if (!hdev_is_powered(hdev)) {
2664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2665 MGMT_STATUS_NOT_POWERED);
2670 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2671 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2675 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2682 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2683 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2685 bacpy(&rp->addr[i].bdaddr, &c->dst);
2686 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2687 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2692 rp->conn_count = cpu_to_le16(i);
2694 /* Recalculate length in case of filtered SCO connections, etc */
2695 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2696 struct_size(rp, addr, i));
2701 hci_dev_unlock(hdev);
2705 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2706 struct mgmt_cp_pin_code_neg_reply *cp)
2708 struct mgmt_pending_cmd *cmd;
2711 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2716 cmd->cmd_complete = addr_cmd_complete;
2718 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2719 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2721 mgmt_pending_remove(cmd);
2726 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2729 struct hci_conn *conn;
2730 struct mgmt_cp_pin_code_reply *cp = data;
2731 struct hci_cp_pin_code_reply reply;
2732 struct mgmt_pending_cmd *cmd;
2735 bt_dev_dbg(hdev, "sock %p", sk);
2739 if (!hdev_is_powered(hdev)) {
2740 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2741 MGMT_STATUS_NOT_POWERED);
2745 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2747 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2748 MGMT_STATUS_NOT_CONNECTED);
2752 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2753 struct mgmt_cp_pin_code_neg_reply ncp;
2755 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2757 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2759 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2761 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2762 MGMT_STATUS_INVALID_PARAMS);
2767 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2773 cmd->cmd_complete = addr_cmd_complete;
2775 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2776 reply.pin_len = cp->pin_len;
2777 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2779 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2781 mgmt_pending_remove(cmd);
2784 hci_dev_unlock(hdev);
2788 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2791 struct mgmt_cp_set_io_capability *cp = data;
2793 bt_dev_dbg(hdev, "sock %p", sk);
2795 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2796 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2797 MGMT_STATUS_INVALID_PARAMS);
2801 hdev->io_capability = cp->io_capability;
2803 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2805 hci_dev_unlock(hdev);
2807 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2811 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2813 struct hci_dev *hdev = conn->hdev;
2814 struct mgmt_pending_cmd *cmd;
2816 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2817 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2820 if (cmd->user_data != conn)
2829 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2831 struct mgmt_rp_pair_device rp;
2832 struct hci_conn *conn = cmd->user_data;
2835 bacpy(&rp.addr.bdaddr, &conn->dst);
2836 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2838 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2839 status, &rp, sizeof(rp));
2841 /* So we don't get further callbacks for this connection */
2842 conn->connect_cfm_cb = NULL;
2843 conn->security_cfm_cb = NULL;
2844 conn->disconn_cfm_cb = NULL;
2846 hci_conn_drop(conn);
2848 /* The device is paired so there is no need to remove
2849 * its connection parameters anymore.
2851 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2858 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2860 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2861 struct mgmt_pending_cmd *cmd;
2863 cmd = find_pairing(conn);
2865 cmd->cmd_complete(cmd, status);
2866 mgmt_pending_remove(cmd);
2870 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2872 struct mgmt_pending_cmd *cmd;
2874 BT_DBG("status %u", status);
2876 cmd = find_pairing(conn);
2878 BT_DBG("Unable to find a pending command");
2882 cmd->cmd_complete(cmd, mgmt_status(status));
2883 mgmt_pending_remove(cmd);
2886 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2888 struct mgmt_pending_cmd *cmd;
2890 BT_DBG("status %u", status);
2895 cmd = find_pairing(conn);
2897 BT_DBG("Unable to find a pending command");
2901 cmd->cmd_complete(cmd, mgmt_status(status));
2902 mgmt_pending_remove(cmd);
2905 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2908 struct mgmt_cp_pair_device *cp = data;
2909 struct mgmt_rp_pair_device rp;
2910 struct mgmt_pending_cmd *cmd;
2911 u8 sec_level, auth_type;
2912 struct hci_conn *conn;
2915 bt_dev_dbg(hdev, "sock %p", sk);
2917 memset(&rp, 0, sizeof(rp));
2918 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2919 rp.addr.type = cp->addr.type;
2921 if (!bdaddr_type_is_valid(cp->addr.type))
2922 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2923 MGMT_STATUS_INVALID_PARAMS,
2926 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2927 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2928 MGMT_STATUS_INVALID_PARAMS,
2933 if (!hdev_is_powered(hdev)) {
2934 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2935 MGMT_STATUS_NOT_POWERED, &rp,
2940 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2941 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2942 MGMT_STATUS_ALREADY_PAIRED, &rp,
2947 sec_level = BT_SECURITY_MEDIUM;
2948 auth_type = HCI_AT_DEDICATED_BONDING;
2950 if (cp->addr.type == BDADDR_BREDR) {
2951 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2952 auth_type, CONN_REASON_PAIR_DEVICE);
2954 u8 addr_type = le_addr_type(cp->addr.type);
2955 struct hci_conn_params *p;
2957 /* When pairing a new device, it is expected to remember
2958 * this device for future connections. Adding the connection
2959 * parameter information ahead of time allows tracking
2960 * of the slave preferred values and will speed up any
2961 * further connection establishment.
2963 * If connection parameters already exist, then they
2964 * will be kept and this function does nothing.
2966 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2968 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2969 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2971 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2972 sec_level, HCI_LE_CONN_TIMEOUT,
2973 CONN_REASON_PAIR_DEVICE);
2979 if (PTR_ERR(conn) == -EBUSY)
2980 status = MGMT_STATUS_BUSY;
2981 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2982 status = MGMT_STATUS_NOT_SUPPORTED;
2983 else if (PTR_ERR(conn) == -ECONNREFUSED)
2984 status = MGMT_STATUS_REJECTED;
2986 status = MGMT_STATUS_CONNECT_FAILED;
2988 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2989 status, &rp, sizeof(rp));
2993 if (conn->connect_cfm_cb) {
2994 hci_conn_drop(conn);
2995 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2996 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3000 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3003 hci_conn_drop(conn);
3007 cmd->cmd_complete = pairing_complete;
3009 /* For LE, just connecting isn't a proof that the pairing finished */
3010 if (cp->addr.type == BDADDR_BREDR) {
3011 conn->connect_cfm_cb = pairing_complete_cb;
3012 conn->security_cfm_cb = pairing_complete_cb;
3013 conn->disconn_cfm_cb = pairing_complete_cb;
3015 conn->connect_cfm_cb = le_pairing_complete_cb;
3016 conn->security_cfm_cb = le_pairing_complete_cb;
3017 conn->disconn_cfm_cb = le_pairing_complete_cb;
3020 conn->io_capability = cp->io_cap;
3021 cmd->user_data = hci_conn_get(conn);
3023 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3024 hci_conn_security(conn, sec_level, auth_type, true)) {
3025 cmd->cmd_complete(cmd, 0);
3026 mgmt_pending_remove(cmd);
3032 hci_dev_unlock(hdev);
3036 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3039 struct mgmt_addr_info *addr = data;
3040 struct mgmt_pending_cmd *cmd;
3041 struct hci_conn *conn;
3044 bt_dev_dbg(hdev, "sock %p", sk);
3048 if (!hdev_is_powered(hdev)) {
3049 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3050 MGMT_STATUS_NOT_POWERED);
3054 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3056 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3057 MGMT_STATUS_INVALID_PARAMS);
3061 conn = cmd->user_data;
3063 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3064 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3065 MGMT_STATUS_INVALID_PARAMS);
3069 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3070 mgmt_pending_remove(cmd);
3072 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3073 addr, sizeof(*addr));
3075 /* Since user doesn't want to proceed with the connection, abort any
3076 * ongoing pairing and then terminate the link if it was created
3077 * because of the pair device action.
3079 if (addr->type == BDADDR_BREDR)
3080 hci_remove_link_key(hdev, &addr->bdaddr);
3082 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3083 le_addr_type(addr->type));
3085 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3086 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3089 hci_dev_unlock(hdev);
3093 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3094 struct mgmt_addr_info *addr, u16 mgmt_op,
3095 u16 hci_op, __le32 passkey)
3097 struct mgmt_pending_cmd *cmd;
3098 struct hci_conn *conn;
3103 if (!hdev_is_powered(hdev)) {
3104 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3105 MGMT_STATUS_NOT_POWERED, addr,
3110 if (addr->type == BDADDR_BREDR)
3111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3113 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3114 le_addr_type(addr->type));
3117 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3118 MGMT_STATUS_NOT_CONNECTED, addr,
3123 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3124 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3126 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3127 MGMT_STATUS_SUCCESS, addr,
3130 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3131 MGMT_STATUS_FAILED, addr,
3137 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3143 cmd->cmd_complete = addr_cmd_complete;
3145 /* Continue with pairing via HCI */
3146 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3147 struct hci_cp_user_passkey_reply cp;
3149 bacpy(&cp.bdaddr, &addr->bdaddr);
3150 cp.passkey = passkey;
3151 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3153 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3157 mgmt_pending_remove(cmd);
3160 hci_dev_unlock(hdev);
3164 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3165 void *data, u16 len)
3167 struct mgmt_cp_pin_code_neg_reply *cp = data;
3169 bt_dev_dbg(hdev, "sock %p", sk);
3171 return user_pairing_resp(sk, hdev, &cp->addr,
3172 MGMT_OP_PIN_CODE_NEG_REPLY,
3173 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3176 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3179 struct mgmt_cp_user_confirm_reply *cp = data;
3181 bt_dev_dbg(hdev, "sock %p", sk);
3183 if (len != sizeof(*cp))
3184 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3185 MGMT_STATUS_INVALID_PARAMS);
3187 return user_pairing_resp(sk, hdev, &cp->addr,
3188 MGMT_OP_USER_CONFIRM_REPLY,
3189 HCI_OP_USER_CONFIRM_REPLY, 0);
3192 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3193 void *data, u16 len)
3195 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3197 bt_dev_dbg(hdev, "sock %p", sk);
3199 return user_pairing_resp(sk, hdev, &cp->addr,
3200 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3201 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3204 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3207 struct mgmt_cp_user_passkey_reply *cp = data;
3209 bt_dev_dbg(hdev, "sock %p", sk);
3211 return user_pairing_resp(sk, hdev, &cp->addr,
3212 MGMT_OP_USER_PASSKEY_REPLY,
3213 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3216 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3217 void *data, u16 len)
3219 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3221 bt_dev_dbg(hdev, "sock %p", sk);
3223 return user_pairing_resp(sk, hdev, &cp->addr,
3224 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3225 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3228 static void adv_expire(struct hci_dev *hdev, u32 flags)
3230 struct adv_info *adv_instance;
3231 struct hci_request req;
3234 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3238 /* stop if current instance doesn't need to be changed */
3239 if (!(adv_instance->flags & flags))
3242 cancel_adv_timeout(hdev);
3244 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3248 hci_req_init(&req, hdev);
3249 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3254 hci_req_run(&req, NULL);
3257 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3259 struct mgmt_cp_set_local_name *cp;
3260 struct mgmt_pending_cmd *cmd;
3262 bt_dev_dbg(hdev, "status 0x%02x", status);
3266 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3273 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3274 mgmt_status(status));
3276 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3279 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3280 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3283 mgmt_pending_remove(cmd);
3286 hci_dev_unlock(hdev);
3289 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3292 struct mgmt_cp_set_local_name *cp = data;
3293 struct mgmt_pending_cmd *cmd;
3294 struct hci_request req;
3297 bt_dev_dbg(hdev, "sock %p", sk);
3301 /* If the old values are the same as the new ones just return a
3302 * direct command complete event.
3304 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3305 !memcmp(hdev->short_name, cp->short_name,
3306 sizeof(hdev->short_name))) {
3307 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3312 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3314 if (!hdev_is_powered(hdev)) {
3315 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3317 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3322 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3323 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3324 ext_info_changed(hdev, sk);
3329 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3335 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3337 hci_req_init(&req, hdev);
3339 if (lmp_bredr_capable(hdev)) {
3340 __hci_req_update_name(&req);
3341 __hci_req_update_eir(&req);
3344 /* The name is stored in the scan response data and so
3345 * no need to udpate the advertising data here.
3347 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3348 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3350 err = hci_req_run(&req, set_name_complete);
3352 mgmt_pending_remove(cmd);
3355 hci_dev_unlock(hdev);
3359 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3362 struct mgmt_cp_set_appearance *cp = data;
3366 bt_dev_dbg(hdev, "sock %p", sk);
3368 if (!lmp_le_capable(hdev))
3369 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3370 MGMT_STATUS_NOT_SUPPORTED);
3372 appearance = le16_to_cpu(cp->appearance);
3376 if (hdev->appearance != appearance) {
3377 hdev->appearance = appearance;
3379 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3380 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3382 ext_info_changed(hdev, sk);
3385 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3388 hci_dev_unlock(hdev);
3393 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3394 void *data, u16 len)
3396 struct mgmt_rp_get_phy_confguration rp;
3398 bt_dev_dbg(hdev, "sock %p", sk);
3402 memset(&rp, 0, sizeof(rp));
3404 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3405 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3406 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3408 hci_dev_unlock(hdev);
3410 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3414 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3416 struct mgmt_ev_phy_configuration_changed ev;
3418 memset(&ev, 0, sizeof(ev));
3420 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3422 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3426 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3427 u16 opcode, struct sk_buff *skb)
3429 struct mgmt_pending_cmd *cmd;
3431 bt_dev_dbg(hdev, "status 0x%02x", status);
3435 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3440 mgmt_cmd_status(cmd->sk, hdev->id,
3441 MGMT_OP_SET_PHY_CONFIGURATION,
3442 mgmt_status(status));
3444 mgmt_cmd_complete(cmd->sk, hdev->id,
3445 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3448 mgmt_phy_configuration_changed(hdev, cmd->sk);
3451 mgmt_pending_remove(cmd);
3454 hci_dev_unlock(hdev);
3457 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3458 void *data, u16 len)
3460 struct mgmt_cp_set_phy_confguration *cp = data;
3461 struct hci_cp_le_set_default_phy cp_phy;
3462 struct mgmt_pending_cmd *cmd;
3463 struct hci_request req;
3464 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3465 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3466 bool changed = false;
3469 bt_dev_dbg(hdev, "sock %p", sk);
3471 configurable_phys = get_configurable_phys(hdev);
3472 supported_phys = get_supported_phys(hdev);
3473 selected_phys = __le32_to_cpu(cp->selected_phys);
3475 if (selected_phys & ~supported_phys)
3476 return mgmt_cmd_status(sk, hdev->id,
3477 MGMT_OP_SET_PHY_CONFIGURATION,
3478 MGMT_STATUS_INVALID_PARAMS);
3480 unconfigure_phys = supported_phys & ~configurable_phys;
3482 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3483 return mgmt_cmd_status(sk, hdev->id,
3484 MGMT_OP_SET_PHY_CONFIGURATION,
3485 MGMT_STATUS_INVALID_PARAMS);
3487 if (selected_phys == get_selected_phys(hdev))
3488 return mgmt_cmd_complete(sk, hdev->id,
3489 MGMT_OP_SET_PHY_CONFIGURATION,
3494 if (!hdev_is_powered(hdev)) {
3495 err = mgmt_cmd_status(sk, hdev->id,
3496 MGMT_OP_SET_PHY_CONFIGURATION,
3497 MGMT_STATUS_REJECTED);
3501 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3502 err = mgmt_cmd_status(sk, hdev->id,
3503 MGMT_OP_SET_PHY_CONFIGURATION,
3508 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3509 pkt_type |= (HCI_DH3 | HCI_DM3);
3511 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3513 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3514 pkt_type |= (HCI_DH5 | HCI_DM5);
3516 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3518 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3519 pkt_type &= ~HCI_2DH1;
3521 pkt_type |= HCI_2DH1;
3523 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3524 pkt_type &= ~HCI_2DH3;
3526 pkt_type |= HCI_2DH3;
3528 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3529 pkt_type &= ~HCI_2DH5;
3531 pkt_type |= HCI_2DH5;
3533 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3534 pkt_type &= ~HCI_3DH1;
3536 pkt_type |= HCI_3DH1;
3538 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3539 pkt_type &= ~HCI_3DH3;
3541 pkt_type |= HCI_3DH3;
3543 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3544 pkt_type &= ~HCI_3DH5;
3546 pkt_type |= HCI_3DH5;
3548 if (pkt_type != hdev->pkt_type) {
3549 hdev->pkt_type = pkt_type;
3553 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3554 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3556 mgmt_phy_configuration_changed(hdev, sk);
3558 err = mgmt_cmd_complete(sk, hdev->id,
3559 MGMT_OP_SET_PHY_CONFIGURATION,
3565 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3572 hci_req_init(&req, hdev);
3574 memset(&cp_phy, 0, sizeof(cp_phy));
3576 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3577 cp_phy.all_phys |= 0x01;
3579 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3580 cp_phy.all_phys |= 0x02;
3582 if (selected_phys & MGMT_PHY_LE_1M_TX)
3583 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3585 if (selected_phys & MGMT_PHY_LE_2M_TX)
3586 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3588 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3589 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3591 if (selected_phys & MGMT_PHY_LE_1M_RX)
3592 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3594 if (selected_phys & MGMT_PHY_LE_2M_RX)
3595 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3597 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3598 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3600 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3602 err = hci_req_run_skb(&req, set_default_phy_complete);
3604 mgmt_pending_remove(cmd);
3607 hci_dev_unlock(hdev);
3612 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3615 int err = MGMT_STATUS_SUCCESS;
3616 struct mgmt_cp_set_blocked_keys *keys = data;
3617 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3618 sizeof(struct mgmt_blocked_key_info));
3619 u16 key_count, expected_len;
3622 bt_dev_dbg(hdev, "sock %p", sk);
3624 key_count = __le16_to_cpu(keys->key_count);
3625 if (key_count > max_key_count) {
3626 bt_dev_err(hdev, "too big key_count value %u", key_count);
3627 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3628 MGMT_STATUS_INVALID_PARAMS);
3631 expected_len = struct_size(keys, keys, key_count);
3632 if (expected_len != len) {
3633 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3635 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3636 MGMT_STATUS_INVALID_PARAMS);
3641 hci_blocked_keys_clear(hdev);
3643 for (i = 0; i < keys->key_count; ++i) {
3644 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3647 err = MGMT_STATUS_NO_RESOURCES;
3651 b->type = keys->keys[i].type;
3652 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3653 list_add_rcu(&b->list, &hdev->blocked_keys);
3655 hci_dev_unlock(hdev);
3657 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3661 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3662 void *data, u16 len)
3664 struct mgmt_mode *cp = data;
3666 bool changed = false;
3668 bt_dev_dbg(hdev, "sock %p", sk);
3670 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3671 return mgmt_cmd_status(sk, hdev->id,
3672 MGMT_OP_SET_WIDEBAND_SPEECH,
3673 MGMT_STATUS_NOT_SUPPORTED);
3675 if (cp->val != 0x00 && cp->val != 0x01)
3676 return mgmt_cmd_status(sk, hdev->id,
3677 MGMT_OP_SET_WIDEBAND_SPEECH,
3678 MGMT_STATUS_INVALID_PARAMS);
3682 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3683 err = mgmt_cmd_status(sk, hdev->id,
3684 MGMT_OP_SET_WIDEBAND_SPEECH,
3689 if (hdev_is_powered(hdev) &&
3690 !!cp->val != hci_dev_test_flag(hdev,
3691 HCI_WIDEBAND_SPEECH_ENABLED)) {
3692 err = mgmt_cmd_status(sk, hdev->id,
3693 MGMT_OP_SET_WIDEBAND_SPEECH,
3694 MGMT_STATUS_REJECTED);
3699 changed = !hci_dev_test_and_set_flag(hdev,
3700 HCI_WIDEBAND_SPEECH_ENABLED);
3702 changed = hci_dev_test_and_clear_flag(hdev,
3703 HCI_WIDEBAND_SPEECH_ENABLED);
3705 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3710 err = new_settings(hdev, sk);
3713 hci_dev_unlock(hdev);
3717 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3718 void *data, u16 data_len)
3721 struct mgmt_rp_read_security_info *rp = (void *)buf;
3725 bt_dev_dbg(hdev, "sock %p", sk);
3727 memset(&buf, 0, sizeof(buf));
3731 /* When the Read Simple Pairing Options command is supported, then
3732 * the remote public key validation is supported.
3734 if (hdev->commands[41] & 0x08)
3735 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3737 flags |= 0x02; /* Remote public key validation (LE) */
3739 /* When the Read Encryption Key Size command is supported, then the
3740 * encryption key size is enforced.
3742 if (hdev->commands[20] & 0x10)
3743 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3745 flags |= 0x08; /* Encryption key size enforcement (LE) */
3747 sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3749 /* When the Read Simple Pairing Options command is supported, then
3750 * also max encryption key size information is provided.
3752 if (hdev->commands[41] & 0x08)
3753 sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3754 hdev->max_enc_key_size);
3756 sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3758 rp->sec_len = cpu_to_le16(sec_len);
3760 hci_dev_unlock(hdev);
3762 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3763 rp, sizeof(*rp) + sec_len);
3766 #ifdef CONFIG_BT_FEATURE_DEBUG
3767 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3768 static const u8 debug_uuid[16] = {
3769 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3770 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3774 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3775 static const u8 simult_central_periph_uuid[16] = {
3776 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3777 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3780 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3781 static const u8 rpa_resolution_uuid[16] = {
3782 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3783 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3786 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3787 void *data, u16 data_len)
3789 char buf[62]; /* Enough space for 3 features */
3790 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3794 bt_dev_dbg(hdev, "sock %p", sk);
3796 memset(&buf, 0, sizeof(buf));
3798 #ifdef CONFIG_BT_FEATURE_DEBUG
3800 flags = bt_dbg_get() ? BIT(0) : 0;
3802 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3803 rp->features[idx].flags = cpu_to_le32(flags);
3809 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3810 (hdev->le_states[4] & 0x08) && /* Central */
3811 (hdev->le_states[4] & 0x40) && /* Peripheral */
3812 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3817 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3818 rp->features[idx].flags = cpu_to_le32(flags);
3822 if (hdev && use_ll_privacy(hdev)) {
3823 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3824 flags = BIT(0) | BIT(1);
3828 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3829 rp->features[idx].flags = cpu_to_le32(flags);
3833 rp->feature_count = cpu_to_le16(idx);
3835 /* After reading the experimental features information, enable
3836 * the events to update client on any future change.
3838 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3840 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3841 MGMT_OP_READ_EXP_FEATURES_INFO,
3842 0, rp, sizeof(*rp) + (20 * idx));
3845 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3848 struct mgmt_ev_exp_feature_changed ev;
3850 memset(&ev, 0, sizeof(ev));
3851 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3852 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3854 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3856 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3860 #ifdef CONFIG_BT_FEATURE_DEBUG
3861 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3863 struct mgmt_ev_exp_feature_changed ev;
3865 memset(&ev, 0, sizeof(ev));
3866 memcpy(ev.uuid, debug_uuid, 16);
3867 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3869 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3871 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3875 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3876 void *data, u16 data_len)
3878 struct mgmt_cp_set_exp_feature *cp = data;
3879 struct mgmt_rp_set_exp_feature rp;
3881 bt_dev_dbg(hdev, "sock %p", sk);
3883 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3884 memset(rp.uuid, 0, 16);
3885 rp.flags = cpu_to_le32(0);
3887 #ifdef CONFIG_BT_FEATURE_DEBUG
3889 bool changed = bt_dbg_get();
3894 exp_debug_feature_changed(false, sk);
3898 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3899 bool changed = hci_dev_test_flag(hdev,
3900 HCI_ENABLE_LL_PRIVACY);
3902 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3905 exp_ll_privacy_feature_changed(false, hdev, sk);
3908 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3910 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3911 MGMT_OP_SET_EXP_FEATURE, 0,
3915 #ifdef CONFIG_BT_FEATURE_DEBUG
3916 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3920 /* Command requires to use the non-controller index */
3922 return mgmt_cmd_status(sk, hdev->id,
3923 MGMT_OP_SET_EXP_FEATURE,
3924 MGMT_STATUS_INVALID_INDEX);
3926 /* Parameters are limited to a single octet */
3927 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3928 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3929 MGMT_OP_SET_EXP_FEATURE,
3930 MGMT_STATUS_INVALID_PARAMS);
3932 /* Only boolean on/off is supported */
3933 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3934 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3935 MGMT_OP_SET_EXP_FEATURE,
3936 MGMT_STATUS_INVALID_PARAMS);
3938 val = !!cp->param[0];
3939 changed = val ? !bt_dbg_get() : bt_dbg_get();
3942 memcpy(rp.uuid, debug_uuid, 16);
3943 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3945 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3947 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3948 MGMT_OP_SET_EXP_FEATURE, 0,
3952 exp_debug_feature_changed(val, sk);
3958 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3963 /* Command requires to use the controller index */
3965 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3966 MGMT_OP_SET_EXP_FEATURE,
3967 MGMT_STATUS_INVALID_INDEX);
3969 /* Changes can only be made when controller is powered down */
3970 if (hdev_is_powered(hdev))
3971 return mgmt_cmd_status(sk, hdev->id,
3972 MGMT_OP_SET_EXP_FEATURE,
3973 MGMT_STATUS_NOT_POWERED);
3975 /* Parameters are limited to a single octet */
3976 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3977 return mgmt_cmd_status(sk, hdev->id,
3978 MGMT_OP_SET_EXP_FEATURE,
3979 MGMT_STATUS_INVALID_PARAMS);
3981 /* Only boolean on/off is supported */
3982 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3983 return mgmt_cmd_status(sk, hdev->id,
3984 MGMT_OP_SET_EXP_FEATURE,
3985 MGMT_STATUS_INVALID_PARAMS);
3987 val = !!cp->param[0];
3990 changed = !hci_dev_test_flag(hdev,
3991 HCI_ENABLE_LL_PRIVACY);
3992 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3993 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3995 /* Enable LL privacy + supported settings changed */
3996 flags = BIT(0) | BIT(1);
3998 changed = hci_dev_test_flag(hdev,
3999 HCI_ENABLE_LL_PRIVACY);
4000 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4002 /* Disable LL privacy + supported settings changed */
4006 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4007 rp.flags = cpu_to_le32(flags);
4009 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4011 err = mgmt_cmd_complete(sk, hdev->id,
4012 MGMT_OP_SET_EXP_FEATURE, 0,
4016 exp_ll_privacy_feature_changed(val, hdev, sk);
4021 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4022 MGMT_OP_SET_EXP_FEATURE,
4023 MGMT_STATUS_NOT_SUPPORTED);
4026 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4028 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4031 struct mgmt_cp_get_device_flags *cp = data;
4032 struct mgmt_rp_get_device_flags rp;
4033 struct bdaddr_list_with_flags *br_params;
4034 struct hci_conn_params *params;
4035 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4036 u32 current_flags = 0;
4037 u8 status = MGMT_STATUS_INVALID_PARAMS;
4039 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4040 &cp->addr.bdaddr, cp->addr.type);
4044 memset(&rp, 0, sizeof(rp));
4046 if (cp->addr.type == BDADDR_BREDR) {
4047 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4053 current_flags = br_params->current_flags;
4055 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4056 le_addr_type(cp->addr.type));
4061 current_flags = params->current_flags;
4064 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4065 rp.addr.type = cp->addr.type;
4066 rp.supported_flags = cpu_to_le32(supported_flags);
4067 rp.current_flags = cpu_to_le32(current_flags);
4069 status = MGMT_STATUS_SUCCESS;
4072 hci_dev_unlock(hdev);
4074 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4078 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4079 bdaddr_t *bdaddr, u8 bdaddr_type,
4080 u32 supported_flags, u32 current_flags)
4082 struct mgmt_ev_device_flags_changed ev;
4084 bacpy(&ev.addr.bdaddr, bdaddr);
4085 ev.addr.type = bdaddr_type;
4086 ev.supported_flags = cpu_to_le32(supported_flags);
4087 ev.current_flags = cpu_to_le32(current_flags);
4089 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4092 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4095 struct mgmt_cp_set_device_flags *cp = data;
4096 struct bdaddr_list_with_flags *br_params;
4097 struct hci_conn_params *params;
4098 u8 status = MGMT_STATUS_INVALID_PARAMS;
4099 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4100 u32 current_flags = __le32_to_cpu(cp->current_flags);
4102 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4103 &cp->addr.bdaddr, cp->addr.type,
4104 __le32_to_cpu(current_flags));
4106 if ((supported_flags | current_flags) != supported_flags) {
4107 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4108 current_flags, supported_flags);
4114 if (cp->addr.type == BDADDR_BREDR) {
4115 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4120 br_params->current_flags = current_flags;
4121 status = MGMT_STATUS_SUCCESS;
4123 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4124 &cp->addr.bdaddr, cp->addr.type);
4127 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4128 le_addr_type(cp->addr.type));
4130 params->current_flags = current_flags;
4131 status = MGMT_STATUS_SUCCESS;
4133 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4135 le_addr_type(cp->addr.type));
4140 hci_dev_unlock(hdev);
4142 if (status == MGMT_STATUS_SUCCESS)
4143 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4144 supported_flags, current_flags);
4146 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4147 &cp->addr, sizeof(cp->addr));
4150 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4153 struct mgmt_ev_adv_monitor_added ev;
4155 ev.monitor_handle = cpu_to_le16(handle);
4157 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4160 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4163 struct mgmt_ev_adv_monitor_added ev;
4165 ev.monitor_handle = cpu_to_le16(handle);
4167 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4170 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4171 void *data, u16 len)
4173 struct adv_monitor *monitor = NULL;
4174 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4177 __u32 supported = 0;
4178 __u16 num_handles = 0;
4179 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4181 BT_DBG("request for %s", hdev->name);
4185 if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4186 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4188 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4189 handles[num_handles++] = monitor->handle;
4192 hci_dev_unlock(hdev);
4194 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4195 rp = kmalloc(rp_size, GFP_KERNEL);
4199 /* Once controller-based monitoring is in place, the enabled_features
4200 * should reflect the use.
4202 rp->supported_features = cpu_to_le32(supported);
4203 rp->enabled_features = 0;
4204 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4205 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4206 rp->num_handles = cpu_to_le16(num_handles);
4208 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4210 err = mgmt_cmd_complete(sk, hdev->id,
4211 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4212 MGMT_STATUS_SUCCESS, rp, rp_size);
4219 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4220 void *data, u16 len)
4222 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4223 struct mgmt_rp_add_adv_patterns_monitor rp;
4224 struct adv_monitor *m = NULL;
4225 struct adv_pattern *p = NULL;
4226 unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4227 __u8 cp_ofst = 0, cp_len = 0;
4230 BT_DBG("request for %s", hdev->name);
4232 if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4233 err = mgmt_cmd_status(sk, hdev->id,
4234 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4235 MGMT_STATUS_INVALID_PARAMS);
4239 m = kmalloc(sizeof(*m), GFP_KERNEL);
4245 INIT_LIST_HEAD(&m->patterns);
4248 for (i = 0; i < cp->pattern_count; i++) {
4249 if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4250 err = mgmt_cmd_status(sk, hdev->id,
4251 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4252 MGMT_STATUS_INVALID_PARAMS);
4256 cp_ofst = cp->patterns[i].offset;
4257 cp_len = cp->patterns[i].length;
4258 if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4259 cp_len > HCI_MAX_AD_LENGTH ||
4260 (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4261 err = mgmt_cmd_status(sk, hdev->id,
4262 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4263 MGMT_STATUS_INVALID_PARAMS);
4267 p = kmalloc(sizeof(*p), GFP_KERNEL);
4273 p->ad_type = cp->patterns[i].ad_type;
4274 p->offset = cp->patterns[i].offset;
4275 p->length = cp->patterns[i].length;
4276 memcpy(p->value, cp->patterns[i].value, p->length);
4278 INIT_LIST_HEAD(&p->list);
4279 list_add(&p->list, &m->patterns);
4282 if (mp_cnt != cp->pattern_count) {
4283 err = mgmt_cmd_status(sk, hdev->id,
4284 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4285 MGMT_STATUS_INVALID_PARAMS);
4291 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4293 err = hci_add_adv_monitor(hdev, m);
4295 if (err == -ENOSPC) {
4296 mgmt_cmd_status(sk, hdev->id,
4297 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4298 MGMT_STATUS_NO_RESOURCES);
4303 if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4304 mgmt_adv_monitor_added(sk, hdev, m->handle);
4306 hci_dev_unlock(hdev);
4308 rp.monitor_handle = cpu_to_le16(m->handle);
4310 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4311 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4314 hci_dev_unlock(hdev);
4317 hci_free_adv_monitor(m);
4321 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4322 void *data, u16 len)
4324 struct mgmt_cp_remove_adv_monitor *cp = data;
4325 struct mgmt_rp_remove_adv_monitor rp;
4326 unsigned int prev_adv_monitors_cnt;
4330 BT_DBG("request for %s", hdev->name);
4334 handle = __le16_to_cpu(cp->monitor_handle);
4335 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4337 err = hci_remove_adv_monitor(hdev, handle);
4338 if (err == -ENOENT) {
4339 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4340 MGMT_STATUS_INVALID_INDEX);
4344 if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4345 mgmt_adv_monitor_removed(sk, hdev, handle);
4347 hci_dev_unlock(hdev);
4349 rp.monitor_handle = cp->monitor_handle;
4351 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4352 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4355 hci_dev_unlock(hdev);
4359 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4360 u16 opcode, struct sk_buff *skb)
4362 struct mgmt_rp_read_local_oob_data mgmt_rp;
4363 size_t rp_size = sizeof(mgmt_rp);
4364 struct mgmt_pending_cmd *cmd;
4366 bt_dev_dbg(hdev, "status %u", status);
4368 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4372 if (status || !skb) {
4373 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4374 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4378 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4380 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4381 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4383 if (skb->len < sizeof(*rp)) {
4384 mgmt_cmd_status(cmd->sk, hdev->id,
4385 MGMT_OP_READ_LOCAL_OOB_DATA,
4386 MGMT_STATUS_FAILED);
4390 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4391 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4393 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4395 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4397 if (skb->len < sizeof(*rp)) {
4398 mgmt_cmd_status(cmd->sk, hdev->id,
4399 MGMT_OP_READ_LOCAL_OOB_DATA,
4400 MGMT_STATUS_FAILED);
4404 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4405 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4407 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4408 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4411 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4412 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4415 mgmt_pending_remove(cmd);
4418 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4419 void *data, u16 data_len)
4421 struct mgmt_pending_cmd *cmd;
4422 struct hci_request req;
4425 bt_dev_dbg(hdev, "sock %p", sk);
4429 if (!hdev_is_powered(hdev)) {
4430 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4431 MGMT_STATUS_NOT_POWERED);
4435 if (!lmp_ssp_capable(hdev)) {
4436 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4437 MGMT_STATUS_NOT_SUPPORTED);
4441 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4442 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4447 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4453 hci_req_init(&req, hdev);
4455 if (bredr_sc_enabled(hdev))
4456 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4458 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4460 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4462 mgmt_pending_remove(cmd);
4465 hci_dev_unlock(hdev);
4469 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4470 void *data, u16 len)
4472 struct mgmt_addr_info *addr = data;
4475 bt_dev_dbg(hdev, "sock %p", sk);
4477 if (!bdaddr_type_is_valid(addr->type))
4478 return mgmt_cmd_complete(sk, hdev->id,
4479 MGMT_OP_ADD_REMOTE_OOB_DATA,
4480 MGMT_STATUS_INVALID_PARAMS,
4481 addr, sizeof(*addr));
4485 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4486 struct mgmt_cp_add_remote_oob_data *cp = data;
4489 if (cp->addr.type != BDADDR_BREDR) {
4490 err = mgmt_cmd_complete(sk, hdev->id,
4491 MGMT_OP_ADD_REMOTE_OOB_DATA,
4492 MGMT_STATUS_INVALID_PARAMS,
4493 &cp->addr, sizeof(cp->addr));
4497 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4498 cp->addr.type, cp->hash,
4499 cp->rand, NULL, NULL);
4501 status = MGMT_STATUS_FAILED;
4503 status = MGMT_STATUS_SUCCESS;
4505 err = mgmt_cmd_complete(sk, hdev->id,
4506 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4507 &cp->addr, sizeof(cp->addr));
4508 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4509 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4510 u8 *rand192, *hash192, *rand256, *hash256;
4513 if (bdaddr_type_is_le(cp->addr.type)) {
4514 /* Enforce zero-valued 192-bit parameters as
4515 * long as legacy SMP OOB isn't implemented.
4517 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4518 memcmp(cp->hash192, ZERO_KEY, 16)) {
4519 err = mgmt_cmd_complete(sk, hdev->id,
4520 MGMT_OP_ADD_REMOTE_OOB_DATA,
4521 MGMT_STATUS_INVALID_PARAMS,
4522 addr, sizeof(*addr));
4529 /* In case one of the P-192 values is set to zero,
4530 * then just disable OOB data for P-192.
4532 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4533 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4537 rand192 = cp->rand192;
4538 hash192 = cp->hash192;
4542 /* In case one of the P-256 values is set to zero, then just
4543 * disable OOB data for P-256.
4545 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4546 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4550 rand256 = cp->rand256;
4551 hash256 = cp->hash256;
4554 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4555 cp->addr.type, hash192, rand192,
4558 status = MGMT_STATUS_FAILED;
4560 status = MGMT_STATUS_SUCCESS;
4562 err = mgmt_cmd_complete(sk, hdev->id,
4563 MGMT_OP_ADD_REMOTE_OOB_DATA,
4564 status, &cp->addr, sizeof(cp->addr));
4566 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4568 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4569 MGMT_STATUS_INVALID_PARAMS);
4573 hci_dev_unlock(hdev);
4577 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4578 void *data, u16 len)
4580 struct mgmt_cp_remove_remote_oob_data *cp = data;
4584 bt_dev_dbg(hdev, "sock %p", sk);
4586 if (cp->addr.type != BDADDR_BREDR)
4587 return mgmt_cmd_complete(sk, hdev->id,
4588 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4589 MGMT_STATUS_INVALID_PARAMS,
4590 &cp->addr, sizeof(cp->addr));
4594 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4595 hci_remote_oob_data_clear(hdev);
4596 status = MGMT_STATUS_SUCCESS;
4600 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4602 status = MGMT_STATUS_INVALID_PARAMS;
4604 status = MGMT_STATUS_SUCCESS;
4607 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4608 status, &cp->addr, sizeof(cp->addr));
4610 hci_dev_unlock(hdev);
4614 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4616 struct mgmt_pending_cmd *cmd;
4618 bt_dev_dbg(hdev, "status %d", status);
4622 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4624 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4627 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4630 cmd->cmd_complete(cmd, mgmt_status(status));
4631 mgmt_pending_remove(cmd);
4634 hci_dev_unlock(hdev);
4636 /* Handle suspend notifier */
4637 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4638 hdev->suspend_tasks)) {
4639 bt_dev_dbg(hdev, "Unpaused discovery");
4640 wake_up(&hdev->suspend_wait_q);
4644 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4645 uint8_t *mgmt_status)
4648 case DISCOV_TYPE_LE:
4649 *mgmt_status = mgmt_le_support(hdev);
4653 case DISCOV_TYPE_INTERLEAVED:
4654 *mgmt_status = mgmt_le_support(hdev);
4658 case DISCOV_TYPE_BREDR:
4659 *mgmt_status = mgmt_bredr_support(hdev);
4664 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4671 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4672 u16 op, void *data, u16 len)
4674 struct mgmt_cp_start_discovery *cp = data;
4675 struct mgmt_pending_cmd *cmd;
4679 bt_dev_dbg(hdev, "sock %p", sk);
4683 if (!hdev_is_powered(hdev)) {
4684 err = mgmt_cmd_complete(sk, hdev->id, op,
4685 MGMT_STATUS_NOT_POWERED,
4686 &cp->type, sizeof(cp->type));
4690 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4691 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4692 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4693 &cp->type, sizeof(cp->type));
4697 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4698 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4699 &cp->type, sizeof(cp->type));
4703 /* Can't start discovery when it is paused */
4704 if (hdev->discovery_paused) {
4705 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4706 &cp->type, sizeof(cp->type));
4710 /* Clear the discovery filter first to free any previously
4711 * allocated memory for the UUID list.
4713 hci_discovery_filter_clear(hdev);
4715 hdev->discovery.type = cp->type;
4716 hdev->discovery.report_invalid_rssi = false;
4717 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4718 hdev->discovery.limited = true;
4720 hdev->discovery.limited = false;
4722 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4728 cmd->cmd_complete = generic_cmd_complete;
4730 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4731 queue_work(hdev->req_workqueue, &hdev->discov_update);
4735 hci_dev_unlock(hdev);
4739 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4740 void *data, u16 len)
4742 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4746 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4747 void *data, u16 len)
4749 return start_discovery_internal(sk, hdev,
4750 MGMT_OP_START_LIMITED_DISCOVERY,
4754 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4757 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4761 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4762 void *data, u16 len)
4764 struct mgmt_cp_start_service_discovery *cp = data;
4765 struct mgmt_pending_cmd *cmd;
4766 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4767 u16 uuid_count, expected_len;
4771 bt_dev_dbg(hdev, "sock %p", sk);
4775 if (!hdev_is_powered(hdev)) {
4776 err = mgmt_cmd_complete(sk, hdev->id,
4777 MGMT_OP_START_SERVICE_DISCOVERY,
4778 MGMT_STATUS_NOT_POWERED,
4779 &cp->type, sizeof(cp->type));
4783 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4784 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4785 err = mgmt_cmd_complete(sk, hdev->id,
4786 MGMT_OP_START_SERVICE_DISCOVERY,
4787 MGMT_STATUS_BUSY, &cp->type,
4792 uuid_count = __le16_to_cpu(cp->uuid_count);
4793 if (uuid_count > max_uuid_count) {
4794 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4796 err = mgmt_cmd_complete(sk, hdev->id,
4797 MGMT_OP_START_SERVICE_DISCOVERY,
4798 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4803 expected_len = sizeof(*cp) + uuid_count * 16;
4804 if (expected_len != len) {
4805 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4807 err = mgmt_cmd_complete(sk, hdev->id,
4808 MGMT_OP_START_SERVICE_DISCOVERY,
4809 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4814 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4815 err = mgmt_cmd_complete(sk, hdev->id,
4816 MGMT_OP_START_SERVICE_DISCOVERY,
4817 status, &cp->type, sizeof(cp->type));
4821 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4828 cmd->cmd_complete = service_discovery_cmd_complete;
4830 /* Clear the discovery filter first to free any previously
4831 * allocated memory for the UUID list.
4833 hci_discovery_filter_clear(hdev);
4835 hdev->discovery.result_filtering = true;
4836 hdev->discovery.type = cp->type;
4837 hdev->discovery.rssi = cp->rssi;
4838 hdev->discovery.uuid_count = uuid_count;
4840 if (uuid_count > 0) {
4841 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4843 if (!hdev->discovery.uuids) {
4844 err = mgmt_cmd_complete(sk, hdev->id,
4845 MGMT_OP_START_SERVICE_DISCOVERY,
4847 &cp->type, sizeof(cp->type));
4848 mgmt_pending_remove(cmd);
4853 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4854 queue_work(hdev->req_workqueue, &hdev->discov_update);
4858 hci_dev_unlock(hdev);
4862 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4864 struct mgmt_pending_cmd *cmd;
4866 bt_dev_dbg(hdev, "status %d", status);
4870 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4872 cmd->cmd_complete(cmd, mgmt_status(status));
4873 mgmt_pending_remove(cmd);
4876 hci_dev_unlock(hdev);
4878 /* Handle suspend notifier */
4879 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4880 bt_dev_dbg(hdev, "Paused discovery");
4881 wake_up(&hdev->suspend_wait_q);
4885 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4888 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4889 struct mgmt_pending_cmd *cmd;
4892 bt_dev_dbg(hdev, "sock %p", sk);
4896 if (!hci_discovery_active(hdev)) {
4897 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4898 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4899 sizeof(mgmt_cp->type));
4903 if (hdev->discovery.type != mgmt_cp->type) {
4904 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4905 MGMT_STATUS_INVALID_PARAMS,
4906 &mgmt_cp->type, sizeof(mgmt_cp->type));
4910 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4916 cmd->cmd_complete = generic_cmd_complete;
4918 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4919 queue_work(hdev->req_workqueue, &hdev->discov_update);
4923 hci_dev_unlock(hdev);
4927 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4930 struct mgmt_cp_confirm_name *cp = data;
4931 struct inquiry_entry *e;
4934 bt_dev_dbg(hdev, "sock %p", sk);
4938 if (!hci_discovery_active(hdev)) {
4939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4940 MGMT_STATUS_FAILED, &cp->addr,
4945 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4947 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4948 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4953 if (cp->name_known) {
4954 e->name_state = NAME_KNOWN;
4957 e->name_state = NAME_NEEDED;
4958 hci_inquiry_cache_update_resolve(hdev, e);
4961 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4962 &cp->addr, sizeof(cp->addr));
4965 hci_dev_unlock(hdev);
4969 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4972 struct mgmt_cp_block_device *cp = data;
4976 bt_dev_dbg(hdev, "sock %p", sk);
4978 if (!bdaddr_type_is_valid(cp->addr.type))
4979 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4980 MGMT_STATUS_INVALID_PARAMS,
4981 &cp->addr, sizeof(cp->addr));
4985 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4988 status = MGMT_STATUS_FAILED;
4992 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4994 status = MGMT_STATUS_SUCCESS;
4997 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4998 &cp->addr, sizeof(cp->addr));
5000 hci_dev_unlock(hdev);
5005 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5008 struct mgmt_cp_unblock_device *cp = data;
5012 bt_dev_dbg(hdev, "sock %p", sk);
5014 if (!bdaddr_type_is_valid(cp->addr.type))
5015 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5016 MGMT_STATUS_INVALID_PARAMS,
5017 &cp->addr, sizeof(cp->addr));
5021 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5024 status = MGMT_STATUS_INVALID_PARAMS;
5028 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5030 status = MGMT_STATUS_SUCCESS;
5033 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5034 &cp->addr, sizeof(cp->addr));
5036 hci_dev_unlock(hdev);
5041 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5044 struct mgmt_cp_set_device_id *cp = data;
5045 struct hci_request req;
5049 bt_dev_dbg(hdev, "sock %p", sk);
5051 source = __le16_to_cpu(cp->source);
5053 if (source > 0x0002)
5054 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5055 MGMT_STATUS_INVALID_PARAMS);
5059 hdev->devid_source = source;
5060 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5061 hdev->devid_product = __le16_to_cpu(cp->product);
5062 hdev->devid_version = __le16_to_cpu(cp->version);
5064 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5067 hci_req_init(&req, hdev);
5068 __hci_req_update_eir(&req);
5069 hci_req_run(&req, NULL);
5071 hci_dev_unlock(hdev);
5076 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5079 bt_dev_dbg(hdev, "status %d", status);
5082 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5085 struct cmd_lookup match = { NULL, hdev };
5086 struct hci_request req;
5088 struct adv_info *adv_instance;
5094 u8 mgmt_err = mgmt_status(status);
5096 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5097 cmd_status_rsp, &mgmt_err);
5101 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5102 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5104 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5106 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5109 new_settings(hdev, match.sk);
5114 /* Handle suspend notifier */
5115 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5116 hdev->suspend_tasks)) {
5117 bt_dev_dbg(hdev, "Paused advertising");
5118 wake_up(&hdev->suspend_wait_q);
5119 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5120 hdev->suspend_tasks)) {
5121 bt_dev_dbg(hdev, "Unpaused advertising");
5122 wake_up(&hdev->suspend_wait_q);
5125 /* If "Set Advertising" was just disabled and instance advertising was
5126 * set up earlier, then re-enable multi-instance advertising.
5128 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5129 list_empty(&hdev->adv_instances))
5132 instance = hdev->cur_adv_instance;
5134 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5135 struct adv_info, list);
5139 instance = adv_instance->instance;
5142 hci_req_init(&req, hdev);
5144 err = __hci_req_schedule_adv_instance(&req, instance, true);
5147 err = hci_req_run(&req, enable_advertising_instance);
5150 bt_dev_err(hdev, "failed to re-configure advertising");
5153 hci_dev_unlock(hdev);
5156 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5159 struct mgmt_mode *cp = data;
5160 struct mgmt_pending_cmd *cmd;
5161 struct hci_request req;
5165 bt_dev_dbg(hdev, "sock %p", sk);
5167 status = mgmt_le_support(hdev);
5169 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5172 /* Enabling the experimental LL Privay support disables support for
5175 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5176 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5177 MGMT_STATUS_NOT_SUPPORTED);
5179 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5180 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5181 MGMT_STATUS_INVALID_PARAMS);
5183 if (hdev->advertising_paused)
5184 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5191 /* The following conditions are ones which mean that we should
5192 * not do any HCI communication but directly send a mgmt
5193 * response to user space (after toggling the flag if
5196 if (!hdev_is_powered(hdev) ||
5197 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5198 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5199 hci_conn_num(hdev, LE_LINK) > 0 ||
5200 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5201 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5205 hdev->cur_adv_instance = 0x00;
5206 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5207 if (cp->val == 0x02)
5208 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5210 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5212 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5213 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5216 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5221 err = new_settings(hdev, sk);
5226 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5227 pending_find(MGMT_OP_SET_LE, hdev)) {
5228 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5233 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5239 hci_req_init(&req, hdev);
5241 if (cp->val == 0x02)
5242 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5244 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5246 cancel_adv_timeout(hdev);
5249 /* Switch to instance "0" for the Set Advertising setting.
5250 * We cannot use update_[adv|scan_rsp]_data() here as the
5251 * HCI_ADVERTISING flag is not yet set.
5253 hdev->cur_adv_instance = 0x00;
5255 if (ext_adv_capable(hdev)) {
5256 __hci_req_start_ext_adv(&req, 0x00);
5258 __hci_req_update_adv_data(&req, 0x00);
5259 __hci_req_update_scan_rsp_data(&req, 0x00);
5260 __hci_req_enable_advertising(&req);
5263 __hci_req_disable_advertising(&req);
5266 err = hci_req_run(&req, set_advertising_complete);
5268 mgmt_pending_remove(cmd);
5271 hci_dev_unlock(hdev);
5275 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5276 void *data, u16 len)
5278 struct mgmt_cp_set_static_address *cp = data;
5281 bt_dev_dbg(hdev, "sock %p", sk);
5283 if (!lmp_le_capable(hdev))
5284 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5285 MGMT_STATUS_NOT_SUPPORTED);
5287 if (hdev_is_powered(hdev))
5288 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5289 MGMT_STATUS_REJECTED);
5291 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5292 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5293 return mgmt_cmd_status(sk, hdev->id,
5294 MGMT_OP_SET_STATIC_ADDRESS,
5295 MGMT_STATUS_INVALID_PARAMS);
5297 /* Two most significant bits shall be set */
5298 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5299 return mgmt_cmd_status(sk, hdev->id,
5300 MGMT_OP_SET_STATIC_ADDRESS,
5301 MGMT_STATUS_INVALID_PARAMS);
5306 bacpy(&hdev->static_addr, &cp->bdaddr);
5308 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5312 err = new_settings(hdev, sk);
5315 hci_dev_unlock(hdev);
5319 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5320 void *data, u16 len)
5322 struct mgmt_cp_set_scan_params *cp = data;
5323 __u16 interval, window;
5326 bt_dev_dbg(hdev, "sock %p", sk);
5328 if (!lmp_le_capable(hdev))
5329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5330 MGMT_STATUS_NOT_SUPPORTED);
5332 interval = __le16_to_cpu(cp->interval);
5334 if (interval < 0x0004 || interval > 0x4000)
5335 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5336 MGMT_STATUS_INVALID_PARAMS);
5338 window = __le16_to_cpu(cp->window);
5340 if (window < 0x0004 || window > 0x4000)
5341 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5342 MGMT_STATUS_INVALID_PARAMS);
5344 if (window > interval)
5345 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5346 MGMT_STATUS_INVALID_PARAMS);
5350 hdev->le_scan_interval = interval;
5351 hdev->le_scan_window = window;
5353 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5356 /* If background scan is running, restart it so new parameters are
5359 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5360 hdev->discovery.state == DISCOVERY_STOPPED) {
5361 struct hci_request req;
5363 hci_req_init(&req, hdev);
5365 hci_req_add_le_scan_disable(&req, false);
5366 hci_req_add_le_passive_scan(&req);
5368 hci_req_run(&req, NULL);
5371 hci_dev_unlock(hdev);
5376 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5379 struct mgmt_pending_cmd *cmd;
5381 bt_dev_dbg(hdev, "status 0x%02x", status);
5385 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5390 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5391 mgmt_status(status));
5393 struct mgmt_mode *cp = cmd->param;
5396 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5398 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5400 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5401 new_settings(hdev, cmd->sk);
5404 mgmt_pending_remove(cmd);
5407 hci_dev_unlock(hdev);
5410 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5411 void *data, u16 len)
5413 struct mgmt_mode *cp = data;
5414 struct mgmt_pending_cmd *cmd;
5415 struct hci_request req;
5418 bt_dev_dbg(hdev, "sock %p", sk);
5420 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5421 hdev->hci_ver < BLUETOOTH_VER_1_2)
5422 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5423 MGMT_STATUS_NOT_SUPPORTED);
5425 if (cp->val != 0x00 && cp->val != 0x01)
5426 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5427 MGMT_STATUS_INVALID_PARAMS);
5431 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5432 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5437 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5438 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5443 if (!hdev_is_powered(hdev)) {
5444 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5445 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5447 new_settings(hdev, sk);
5451 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5458 hci_req_init(&req, hdev);
5460 __hci_req_write_fast_connectable(&req, cp->val);
5462 err = hci_req_run(&req, fast_connectable_complete);
5464 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5465 MGMT_STATUS_FAILED);
5466 mgmt_pending_remove(cmd);
5470 hci_dev_unlock(hdev);
5475 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5477 struct mgmt_pending_cmd *cmd;
5479 bt_dev_dbg(hdev, "status 0x%02x", status);
5483 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5488 u8 mgmt_err = mgmt_status(status);
5490 /* We need to restore the flag if related HCI commands
5493 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5495 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5497 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5498 new_settings(hdev, cmd->sk);
5501 mgmt_pending_remove(cmd);
5504 hci_dev_unlock(hdev);
5507 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5509 struct mgmt_mode *cp = data;
5510 struct mgmt_pending_cmd *cmd;
5511 struct hci_request req;
5514 bt_dev_dbg(hdev, "sock %p", sk);
5516 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5517 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5518 MGMT_STATUS_NOT_SUPPORTED);
5520 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5521 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5522 MGMT_STATUS_REJECTED);
5524 if (cp->val != 0x00 && cp->val != 0x01)
5525 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5526 MGMT_STATUS_INVALID_PARAMS);
5530 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5531 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5535 if (!hdev_is_powered(hdev)) {
5537 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5538 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5539 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5540 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5541 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5544 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5546 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5550 err = new_settings(hdev, sk);
5554 /* Reject disabling when powered on */
5556 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5557 MGMT_STATUS_REJECTED);
5560 /* When configuring a dual-mode controller to operate
5561 * with LE only and using a static address, then switching
5562 * BR/EDR back on is not allowed.
5564 * Dual-mode controllers shall operate with the public
5565 * address as its identity address for BR/EDR and LE. So
5566 * reject the attempt to create an invalid configuration.
5568 * The same restrictions applies when secure connections
5569 * has been enabled. For BR/EDR this is a controller feature
5570 * while for LE it is a host stack feature. This means that
5571 * switching BR/EDR back on when secure connections has been
5572 * enabled is not a supported transaction.
5574 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5575 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5576 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5577 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5578 MGMT_STATUS_REJECTED);
5583 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5584 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5589 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5595 /* We need to flip the bit already here so that
5596 * hci_req_update_adv_data generates the correct flags.
5598 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5600 hci_req_init(&req, hdev);
5602 __hci_req_write_fast_connectable(&req, false);
5603 __hci_req_update_scan(&req);
5605 /* Since only the advertising data flags will change, there
5606 * is no need to update the scan response data.
5608 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5610 err = hci_req_run(&req, set_bredr_complete);
5612 mgmt_pending_remove(cmd);
5615 hci_dev_unlock(hdev);
5619 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5621 struct mgmt_pending_cmd *cmd;
5622 struct mgmt_mode *cp;
5624 bt_dev_dbg(hdev, "status %u", status);
5628 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5633 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5634 mgmt_status(status));
5642 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5643 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5646 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5647 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5650 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5651 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5655 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5656 new_settings(hdev, cmd->sk);
5659 mgmt_pending_remove(cmd);
5661 hci_dev_unlock(hdev);
5664 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5665 void *data, u16 len)
5667 struct mgmt_mode *cp = data;
5668 struct mgmt_pending_cmd *cmd;
5669 struct hci_request req;
5673 bt_dev_dbg(hdev, "sock %p", sk);
5675 if (!lmp_sc_capable(hdev) &&
5676 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5677 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5678 MGMT_STATUS_NOT_SUPPORTED);
5680 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5681 lmp_sc_capable(hdev) &&
5682 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5683 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5684 MGMT_STATUS_REJECTED);
5686 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5687 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5688 MGMT_STATUS_INVALID_PARAMS);
5692 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5693 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5697 changed = !hci_dev_test_and_set_flag(hdev,
5699 if (cp->val == 0x02)
5700 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5702 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5704 changed = hci_dev_test_and_clear_flag(hdev,
5706 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5709 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5714 err = new_settings(hdev, sk);
5719 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5720 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5727 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5728 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5729 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5733 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5739 hci_req_init(&req, hdev);
5740 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5741 err = hci_req_run(&req, sc_enable_complete);
5743 mgmt_pending_remove(cmd);
5748 hci_dev_unlock(hdev);
5752 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5753 void *data, u16 len)
5755 struct mgmt_mode *cp = data;
5756 bool changed, use_changed;
5759 bt_dev_dbg(hdev, "sock %p", sk);
5761 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5763 MGMT_STATUS_INVALID_PARAMS);
5768 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5770 changed = hci_dev_test_and_clear_flag(hdev,
5771 HCI_KEEP_DEBUG_KEYS);
5773 if (cp->val == 0x02)
5774 use_changed = !hci_dev_test_and_set_flag(hdev,
5775 HCI_USE_DEBUG_KEYS);
5777 use_changed = hci_dev_test_and_clear_flag(hdev,
5778 HCI_USE_DEBUG_KEYS);
5780 if (hdev_is_powered(hdev) && use_changed &&
5781 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5782 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5783 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5784 sizeof(mode), &mode);
5787 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5792 err = new_settings(hdev, sk);
5795 hci_dev_unlock(hdev);
5799 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5802 struct mgmt_cp_set_privacy *cp = cp_data;
5806 bt_dev_dbg(hdev, "sock %p", sk);
5808 if (!lmp_le_capable(hdev))
5809 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5810 MGMT_STATUS_NOT_SUPPORTED);
5812 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5814 MGMT_STATUS_INVALID_PARAMS);
5816 if (hdev_is_powered(hdev))
5817 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5818 MGMT_STATUS_REJECTED);
5822 /* If user space supports this command it is also expected to
5823 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5825 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5828 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5829 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5830 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5831 hci_adv_instances_set_rpa_expired(hdev, true);
5832 if (cp->privacy == 0x02)
5833 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5835 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5837 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5838 memset(hdev->irk, 0, sizeof(hdev->irk));
5839 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5840 hci_adv_instances_set_rpa_expired(hdev, false);
5841 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5844 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5849 err = new_settings(hdev, sk);
5852 hci_dev_unlock(hdev);
5856 static bool irk_is_valid(struct mgmt_irk_info *irk)
5858 switch (irk->addr.type) {
5859 case BDADDR_LE_PUBLIC:
5862 case BDADDR_LE_RANDOM:
5863 /* Two most significant bits shall be set */
5864 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5872 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5875 struct mgmt_cp_load_irks *cp = cp_data;
5876 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5877 sizeof(struct mgmt_irk_info));
5878 u16 irk_count, expected_len;
5881 bt_dev_dbg(hdev, "sock %p", sk);
5883 if (!lmp_le_capable(hdev))
5884 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5885 MGMT_STATUS_NOT_SUPPORTED);
5887 irk_count = __le16_to_cpu(cp->irk_count);
5888 if (irk_count > max_irk_count) {
5889 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5891 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5892 MGMT_STATUS_INVALID_PARAMS);
5895 expected_len = struct_size(cp, irks, irk_count);
5896 if (expected_len != len) {
5897 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5899 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5900 MGMT_STATUS_INVALID_PARAMS);
5903 bt_dev_dbg(hdev, "irk_count %u", irk_count);
5905 for (i = 0; i < irk_count; i++) {
5906 struct mgmt_irk_info *key = &cp->irks[i];
5908 if (!irk_is_valid(key))
5909 return mgmt_cmd_status(sk, hdev->id,
5911 MGMT_STATUS_INVALID_PARAMS);
5916 hci_smp_irks_clear(hdev);
5918 for (i = 0; i < irk_count; i++) {
5919 struct mgmt_irk_info *irk = &cp->irks[i];
5921 if (hci_is_blocked_key(hdev,
5922 HCI_BLOCKED_KEY_TYPE_IRK,
5924 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5929 hci_add_irk(hdev, &irk->addr.bdaddr,
5930 le_addr_type(irk->addr.type), irk->val,
5934 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5936 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5938 hci_dev_unlock(hdev);
5944 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
5945 void *data, u16 len)
5947 struct mgmt_cp_set_advertising_params *cp = data;
5952 BT_DBG("%s", hdev->name);
5954 if (!lmp_le_capable(hdev))
5955 return mgmt_cmd_status(sk, hdev->id,
5956 MGMT_OP_SET_ADVERTISING_PARAMS,
5957 MGMT_STATUS_NOT_SUPPORTED);
5959 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
5960 return mgmt_cmd_status(sk, hdev->id,
5961 MGMT_OP_SET_ADVERTISING_PARAMS,
5964 min_interval = __le16_to_cpu(cp->interval_min);
5965 max_interval = __le16_to_cpu(cp->interval_max);
5967 if (min_interval > max_interval ||
5968 min_interval < 0x0020 || max_interval > 0x4000)
5969 return mgmt_cmd_status(sk, hdev->id,
5970 MGMT_OP_SET_ADVERTISING_PARAMS,
5971 MGMT_STATUS_INVALID_PARAMS);
5975 hdev->le_adv_min_interval = min_interval;
5976 hdev->le_adv_max_interval = max_interval;
5977 hdev->adv_filter_policy = cp->filter_policy;
5978 hdev->adv_type = cp->type;
5980 err = mgmt_cmd_complete(sk, hdev->id,
5981 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
5983 hci_dev_unlock(hdev);
5988 static void set_advertising_data_complete(struct hci_dev *hdev,
5989 u8 status, u16 opcode)
5991 struct mgmt_cp_set_advertising_data *cp;
5992 struct mgmt_pending_cmd *cmd;
5994 BT_DBG("status 0x%02x", status);
5998 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
6005 mgmt_cmd_status(cmd->sk, hdev->id,
6006 MGMT_OP_SET_ADVERTISING_DATA,
6007 mgmt_status(status));
6009 mgmt_cmd_complete(cmd->sk, hdev->id,
6010 MGMT_OP_SET_ADVERTISING_DATA, 0,
6013 mgmt_pending_remove(cmd);
6016 hci_dev_unlock(hdev);
6019 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
6020 void *data, u16 len)
6022 struct mgmt_pending_cmd *cmd;
6023 struct hci_request req;
6024 struct mgmt_cp_set_advertising_data *cp = data;
6025 struct hci_cp_le_set_adv_data adv;
6028 BT_DBG("%s", hdev->name);
6030 if (!lmp_le_capable(hdev)) {
6031 return mgmt_cmd_status(sk, hdev->id,
6032 MGMT_OP_SET_ADVERTISING_DATA,
6033 MGMT_STATUS_NOT_SUPPORTED);
6038 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
6039 err = mgmt_cmd_status(sk, hdev->id,
6040 MGMT_OP_SET_ADVERTISING_DATA,
6045 if (len > HCI_MAX_AD_LENGTH) {
6046 err = mgmt_cmd_status(sk, hdev->id,
6047 MGMT_OP_SET_ADVERTISING_DATA,
6048 MGMT_STATUS_INVALID_PARAMS);
6052 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
6059 hci_req_init(&req, hdev);
6061 memset(&adv, 0, sizeof(adv));
6062 memcpy(adv.data, cp->data, len);
6065 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
6067 err = hci_req_run(&req, set_advertising_data_complete);
6069 mgmt_pending_remove(cmd);
6072 hci_dev_unlock(hdev);
6077 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
6080 struct mgmt_cp_set_scan_rsp_data *cp;
6081 struct mgmt_pending_cmd *cmd;
6083 BT_DBG("status 0x%02x", status);
6087 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
6094 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6095 mgmt_status(status));
6097 mgmt_cmd_complete(cmd->sk, hdev->id,
6098 MGMT_OP_SET_SCAN_RSP_DATA, 0,
6101 mgmt_pending_remove(cmd);
6104 hci_dev_unlock(hdev);
6107 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
6110 struct mgmt_pending_cmd *cmd;
6111 struct hci_request req;
6112 struct mgmt_cp_set_scan_rsp_data *cp = data;
6113 struct hci_cp_le_set_scan_rsp_data rsp;
6116 BT_DBG("%s", hdev->name);
6118 if (!lmp_le_capable(hdev))
6119 return mgmt_cmd_status(sk, hdev->id,
6120 MGMT_OP_SET_SCAN_RSP_DATA,
6121 MGMT_STATUS_NOT_SUPPORTED);
6125 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
6126 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6131 if (len > HCI_MAX_AD_LENGTH) {
6132 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6133 MGMT_STATUS_INVALID_PARAMS);
6137 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
6143 hci_req_init(&req, hdev);
6145 memset(&rsp, 0, sizeof(rsp));
6146 memcpy(rsp.data, cp->data, len);
6149 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
6151 err = hci_req_run(&req, set_scan_rsp_data_complete);
6153 mgmt_pending_remove(cmd);
6156 hci_dev_unlock(hdev);
6161 /* Adv White List feature */
6162 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6164 struct mgmt_cp_add_dev_white_list *cp;
6165 struct mgmt_pending_cmd *cmd;
6167 BT_DBG("status 0x%02x", status);
6171 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
6178 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6179 mgmt_status(status));
6181 mgmt_cmd_complete(cmd->sk, hdev->id,
6182 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
6184 mgmt_pending_remove(cmd);
6187 hci_dev_unlock(hdev);
6190 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
6191 void *data, u16 len)
6193 struct mgmt_pending_cmd *cmd;
6194 struct mgmt_cp_add_dev_white_list *cp = data;
6195 struct hci_request req;
6198 BT_DBG("%s", hdev->name);
6200 if (!lmp_le_capable(hdev))
6201 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6202 MGMT_STATUS_NOT_SUPPORTED);
6204 if (!hdev_is_powered(hdev))
6205 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6206 MGMT_STATUS_REJECTED);
6210 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
6211 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
6216 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
6222 hci_req_init(&req, hdev);
6224 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
6226 err = hci_req_run(&req, add_white_list_complete);
6228 mgmt_pending_remove(cmd);
6233 hci_dev_unlock(hdev);
6238 static void remove_from_white_list_complete(struct hci_dev *hdev,
6239 u8 status, u16 opcode)
6241 struct mgmt_cp_remove_dev_from_white_list *cp;
6242 struct mgmt_pending_cmd *cmd;
6244 BT_DBG("status 0x%02x", status);
6248 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
6255 mgmt_cmd_status(cmd->sk, hdev->id,
6256 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6257 mgmt_status(status));
6259 mgmt_cmd_complete(cmd->sk, hdev->id,
6260 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
6263 mgmt_pending_remove(cmd);
6266 hci_dev_unlock(hdev);
6269 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
6270 void *data, u16 len)
6272 struct mgmt_pending_cmd *cmd;
6273 struct mgmt_cp_remove_dev_from_white_list *cp = data;
6274 struct hci_request req;
6277 BT_DBG("%s", hdev->name);
6279 if (!lmp_le_capable(hdev))
6280 return mgmt_cmd_status(sk, hdev->id,
6281 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6282 MGMT_STATUS_NOT_SUPPORTED);
6284 if (!hdev_is_powered(hdev))
6285 return mgmt_cmd_status(sk, hdev->id,
6286 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6287 MGMT_STATUS_REJECTED);
6291 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
6292 err = mgmt_cmd_status(sk, hdev->id,
6293 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6298 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
6305 hci_req_init(&req, hdev);
6307 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
6309 err = hci_req_run(&req, remove_from_white_list_complete);
6311 mgmt_pending_remove(cmd);
6316 hci_dev_unlock(hdev);
6321 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
6324 struct mgmt_pending_cmd *cmd;
6326 BT_DBG("status 0x%02x", status);
6330 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
6335 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6336 mgmt_status(status));
6338 mgmt_cmd_complete(cmd->sk, hdev->id,
6339 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6342 mgmt_pending_remove(cmd);
6345 hci_dev_unlock(hdev);
6348 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
6349 void *data, u16 len)
6351 struct mgmt_pending_cmd *cmd;
6352 struct hci_request req;
6355 BT_DBG("%s", hdev->name);
6357 if (!lmp_le_capable(hdev))
6358 return mgmt_cmd_status(sk, hdev->id,
6359 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6360 MGMT_STATUS_NOT_SUPPORTED);
6362 if (!hdev_is_powered(hdev))
6363 return mgmt_cmd_status(sk, hdev->id,
6364 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6365 MGMT_STATUS_REJECTED);
6369 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
6370 err = mgmt_cmd_status(sk, hdev->id,
6371 MGMT_OP_CLEAR_DEV_WHITE_LIST,
6376 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
6383 hci_req_init(&req, hdev);
6385 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
6387 err = hci_req_run(&req, clear_white_list_complete);
6389 mgmt_pending_remove(cmd);
6394 hci_dev_unlock(hdev);
6399 static void set_rssi_threshold_complete(struct hci_dev *hdev,
6400 u8 status, u16 opcode)
6402 struct mgmt_pending_cmd *cmd;
6404 BT_DBG("status 0x%02x", status);
6408 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6413 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6414 mgmt_status(status));
6416 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6419 mgmt_pending_remove(cmd);
6422 hci_dev_unlock(hdev);
6425 static void set_rssi_disable_complete(struct hci_dev *hdev,
6426 u8 status, u16 opcode)
6428 struct mgmt_pending_cmd *cmd;
6430 BT_DBG("status 0x%02x", status);
6434 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6439 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6440 mgmt_status(status));
6442 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6445 mgmt_pending_remove(cmd);
6448 hci_dev_unlock(hdev);
6451 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
6452 void *data, u16 len)
6455 struct hci_cp_set_rssi_threshold th = { 0, };
6456 struct mgmt_cp_set_enable_rssi *cp = data;
6457 struct hci_conn *conn;
6458 struct mgmt_pending_cmd *cmd;
6459 struct hci_request req;
6464 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6466 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6467 MGMT_STATUS_FAILED);
6471 if (!lmp_le_capable(hdev)) {
6472 mgmt_pending_remove(cmd);
6473 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6474 MGMT_STATUS_NOT_SUPPORTED);
6478 if (!hdev_is_powered(hdev)) {
6479 BT_DBG("%s", hdev->name);
6480 mgmt_pending_remove(cmd);
6481 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6482 MGMT_STATUS_NOT_POWERED);
6486 if (cp->link_type == 0x01)
6487 dest_type = LE_LINK;
6489 dest_type = ACL_LINK;
6491 /* Get LE/ACL link handle info */
6492 conn = hci_conn_hash_lookup_ba(hdev,
6493 dest_type, &cp->bdaddr);
6496 err = mgmt_cmd_complete(sk, hdev->id,
6497 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
6498 mgmt_pending_remove(cmd);
6502 hci_req_init(&req, hdev);
6504 th.hci_le_ext_opcode = 0x0B;
6506 th.conn_handle = conn->handle;
6507 th.alert_mask = 0x07;
6508 th.low_th = cp->low_th;
6509 th.in_range_th = cp->in_range_th;
6510 th.high_th = cp->high_th;
6512 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6513 err = hci_req_run(&req, set_rssi_threshold_complete);
6516 mgmt_pending_remove(cmd);
6517 BT_ERR("Error in requesting hci_req_run");
6522 hci_dev_unlock(hdev);
6526 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
6527 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6529 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
6530 struct mgmt_cp_set_enable_rssi *cp = data;
6531 struct mgmt_pending_cmd *cmd;
6536 mgmt_rp.status = rp->status;
6537 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6538 mgmt_rp.bt_address = cp->bdaddr;
6539 mgmt_rp.link_type = cp->link_type;
6541 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6542 MGMT_STATUS_SUCCESS, &mgmt_rp,
6543 sizeof(struct mgmt_cc_rsp_enable_rssi));
6545 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
6546 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6548 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
6549 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6550 &mgmt_rp.bt_address, true);
6554 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6556 mgmt_pending_remove(cmd);
6558 hci_dev_unlock(hdev);
6561 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
6562 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6564 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
6565 struct mgmt_cp_disable_rssi *cp = data;
6566 struct mgmt_pending_cmd *cmd;
6571 mgmt_rp.status = rp->status;
6572 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6573 mgmt_rp.bt_address = cp->bdaddr;
6574 mgmt_rp.link_type = cp->link_type;
6576 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6577 MGMT_STATUS_SUCCESS, &mgmt_rp,
6578 sizeof(struct mgmt_cc_rsp_enable_rssi));
6580 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
6581 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6583 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6584 &mgmt_rp.bt_address, false);
6588 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6590 mgmt_pending_remove(cmd);
6592 hci_dev_unlock(hdev);
6595 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
6596 void *data, u16 len)
6598 struct mgmt_pending_cmd *cmd;
6599 struct hci_request req;
6600 struct hci_cp_set_enable_rssi cp_en = { 0, };
6603 BT_DBG("Set Disable RSSI.");
6605 cp_en.hci_le_ext_opcode = 0x01;
6606 cp_en.le_enable_cs_Features = 0x00;
6607 cp_en.data[0] = 0x00;
6608 cp_en.data[1] = 0x00;
6609 cp_en.data[2] = 0x00;
6613 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6615 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6616 MGMT_STATUS_FAILED);
6620 if (!lmp_le_capable(hdev)) {
6621 mgmt_pending_remove(cmd);
6622 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6623 MGMT_STATUS_NOT_SUPPORTED);
6627 if (!hdev_is_powered(hdev)) {
6628 BT_DBG("%s", hdev->name);
6629 mgmt_pending_remove(cmd);
6630 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6631 MGMT_STATUS_NOT_POWERED);
6635 hci_req_init(&req, hdev);
6637 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6638 sizeof(struct hci_cp_set_enable_rssi),
6639 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6640 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6642 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6643 err = hci_req_run(&req, set_rssi_disable_complete);
6646 mgmt_pending_remove(cmd);
6647 BT_ERR("Error in requesting hci_req_run");
6652 hci_dev_unlock(hdev);
6656 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
6658 struct hci_cc_rsp_enable_rssi *rp = response;
6659 struct mgmt_pending_cmd *cmd_enable = NULL;
6660 struct mgmt_pending_cmd *cmd_disable = NULL;
6661 struct mgmt_cp_set_enable_rssi *cp_en;
6662 struct mgmt_cp_disable_rssi *cp_dis;
6665 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6666 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6667 hci_dev_unlock(hdev);
6670 BT_DBG("Enable Request");
6673 BT_DBG("Disable Request");
6676 cp_en = cmd_enable->param;
6681 switch (rp->le_ext_opcode) {
6683 BT_DBG("RSSI enabled.. Setting Threshold...");
6684 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
6685 cp_en, sizeof(*cp_en));
6689 BT_DBG("Sending RSSI enable success");
6690 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
6691 cp_en, rp, rp->status);
6695 } else if (cmd_disable) {
6696 cp_dis = cmd_disable->param;
6701 switch (rp->le_ext_opcode) {
6703 BT_DBG("Sending RSSI disable success");
6704 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6705 cp_dis, rp, rp->status);
6710 * Only unset RSSI Threshold values for the Link if
6711 * RSSI is monitored for other BREDR or LE Links
6713 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
6714 BT_DBG("Unset Threshold. Other links being monitored");
6715 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6716 cp_dis, rp, rp->status);
6718 BT_DBG("Unset Threshold. Disabling...");
6719 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
6720 cp_dis, sizeof(*cp_dis));
6727 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
6730 struct mgmt_pending_cmd *cmd;
6732 BT_DBG("status 0x%02x", status);
6736 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6741 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6742 mgmt_status(status));
6744 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6747 mgmt_pending_remove(cmd);
6750 hci_dev_unlock(hdev);
6753 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
6754 void *data, u16 len)
6756 struct mgmt_pending_cmd *cmd;
6757 struct hci_request req;
6758 struct mgmt_cp_set_enable_rssi *cp = data;
6759 struct hci_cp_set_enable_rssi cp_en = { 0, };
6762 BT_DBG("Set Enable RSSI.");
6764 cp_en.hci_le_ext_opcode = 0x01;
6765 cp_en.le_enable_cs_Features = 0x04;
6766 cp_en.data[0] = 0x00;
6767 cp_en.data[1] = 0x00;
6768 cp_en.data[2] = 0x00;
6772 if (!lmp_le_capable(hdev)) {
6773 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6774 MGMT_STATUS_NOT_SUPPORTED);
6778 if (!hdev_is_powered(hdev)) {
6779 BT_DBG("%s", hdev->name);
6780 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6781 MGMT_STATUS_NOT_POWERED);
6785 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
6786 BT_DBG("%s", hdev->name);
6787 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6792 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
6795 BT_DBG("%s", hdev->name);
6800 /* If RSSI is already enabled directly set Threshold values */
6801 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
6802 hci_dev_unlock(hdev);
6803 BT_DBG("RSSI Enabled. Directly set Threshold");
6804 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
6808 hci_req_init(&req, hdev);
6810 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6811 sizeof(struct hci_cp_set_enable_rssi),
6812 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6813 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6815 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6816 err = hci_req_run(&req, set_rssi_enable_complete);
6819 mgmt_pending_remove(cmd);
6820 BT_ERR("Error in requesting hci_req_run");
6825 hci_dev_unlock(hdev);
6830 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6832 struct mgmt_pending_cmd *cmd;
6834 BT_DBG("status 0x%02x", status);
6838 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
6842 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6843 MGMT_STATUS_SUCCESS, &status, 1);
6845 mgmt_pending_remove(cmd);
6848 hci_dev_unlock(hdev);
6851 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
6854 struct mgmt_pending_cmd *cmd;
6855 struct hci_request req;
6856 struct mgmt_cp_get_raw_rssi *cp = data;
6857 struct hci_cp_get_raw_rssi hci_cp;
6859 struct hci_conn *conn;
6863 BT_DBG("Get Raw RSSI.");
6867 if (!lmp_le_capable(hdev)) {
6868 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6869 MGMT_STATUS_NOT_SUPPORTED);
6873 if (cp->link_type == 0x01)
6874 dest_type = LE_LINK;
6876 dest_type = ACL_LINK;
6878 /* Get LE/BREDR link handle info */
6879 conn = hci_conn_hash_lookup_ba(hdev,
6880 dest_type, &cp->bt_address);
6882 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6883 MGMT_STATUS_NOT_CONNECTED);
6886 hci_cp.conn_handle = conn->handle;
6888 if (!hdev_is_powered(hdev)) {
6889 BT_DBG("%s", hdev->name);
6890 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6891 MGMT_STATUS_NOT_POWERED);
6895 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
6896 BT_DBG("%s", hdev->name);
6897 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6902 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
6904 BT_DBG("%s", hdev->name);
6909 hci_req_init(&req, hdev);
6911 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
6912 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
6913 err = hci_req_run(&req, get_raw_rssi_complete);
6916 mgmt_pending_remove(cmd);
6917 BT_ERR("Error in requesting hci_req_run");
6921 hci_dev_unlock(hdev);
6926 void mgmt_raw_rssi_response(struct hci_dev *hdev,
6927 struct hci_cc_rp_get_raw_rssi *rp, int success)
6929 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
6930 struct hci_conn *conn;
6932 mgmt_rp.status = rp->status;
6933 mgmt_rp.rssi_dbm = rp->rssi_dbm;
6935 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
6939 bacpy(&mgmt_rp.bt_address, &conn->dst);
6940 if (conn->type == LE_LINK)
6941 mgmt_rp.link_type = 0x01;
6943 mgmt_rp.link_type = 0x00;
6945 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
6946 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
6949 static void set_disable_threshold_complete(struct hci_dev *hdev,
6950 u8 status, u16 opcode)
6952 struct mgmt_pending_cmd *cmd;
6954 BT_DBG("status 0x%02x", status);
6958 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6962 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6963 MGMT_STATUS_SUCCESS, &status, 1);
6965 mgmt_pending_remove(cmd);
6968 hci_dev_unlock(hdev);
6971 /** Removes monitoring for a link*/
6972 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
6973 void *data, u16 len)
6976 struct hci_cp_set_rssi_threshold th = { 0, };
6977 struct mgmt_cp_disable_rssi *cp = data;
6978 struct hci_conn *conn;
6979 struct mgmt_pending_cmd *cmd;
6980 struct hci_request req;
6983 BT_DBG("Set Disable RSSI.");
6987 if (!lmp_le_capable(hdev)) {
6988 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6989 MGMT_STATUS_NOT_SUPPORTED);
6993 /* Get LE/ACL link handle info*/
6994 if (cp->link_type == 0x01)
6995 dest_type = LE_LINK;
6997 dest_type = ACL_LINK;
6999 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
7001 err = mgmt_cmd_complete(sk, hdev->id,
7002 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
7006 th.hci_le_ext_opcode = 0x0B;
7008 th.conn_handle = conn->handle;
7009 th.alert_mask = 0x00;
7011 th.in_range_th = 0x00;
7014 if (!hdev_is_powered(hdev)) {
7015 BT_DBG("%s", hdev->name);
7016 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7021 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
7022 BT_DBG("%s", hdev->name);
7023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7028 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
7031 BT_DBG("%s", hdev->name);
7036 hci_req_init(&req, hdev);
7038 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7039 err = hci_req_run(&req, set_disable_threshold_complete);
7041 mgmt_pending_remove(cmd);
7042 BT_ERR("Error in requesting hci_req_run");
7047 hci_dev_unlock(hdev);
7052 void mgmt_rssi_alert_evt(struct hci_dev *hdev, struct sk_buff *skb)
7054 struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
7055 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
7056 struct hci_conn *conn;
7058 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
7059 ev->conn_handle, ev->alert_type, ev->rssi_dbm);
7061 conn = hci_conn_hash_lookup_handle(hdev, ev->conn_handle);
7064 BT_ERR("RSSI alert Error: Device not found for handle");
7067 bacpy(&mgmt_ev.bdaddr, &conn->dst);
7069 if (conn->type == LE_LINK)
7070 mgmt_ev.link_type = 0x01;
7072 mgmt_ev.link_type = 0x00;
7074 mgmt_ev.alert_type = ev->alert_type;
7075 mgmt_ev.rssi_dbm = ev->rssi_dbm;
7077 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
7078 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
7082 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
7084 struct mgmt_pending_cmd *cmd;
7088 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7090 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7094 type = hdev->le_discovery.type;
7096 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7097 mgmt_status(status), &type, sizeof(type));
7098 mgmt_pending_remove(cmd);
7103 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
7106 unsigned long timeout = 0;
7108 BT_DBG("status %d", status);
7112 mgmt_start_le_discovery_failed(hdev, status);
7113 hci_dev_unlock(hdev);
7118 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
7119 hci_dev_unlock(hdev);
7121 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
7122 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
7127 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
7130 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
7131 void *data, u16 len)
7133 struct mgmt_cp_start_le_discovery *cp = data;
7134 struct mgmt_pending_cmd *cmd;
7135 struct hci_cp_le_set_scan_param param_cp;
7136 struct hci_cp_le_set_scan_enable enable_cp;
7137 struct hci_request req;
7138 u8 status, own_addr_type;
7141 BT_DBG("%s", hdev->name);
7145 if (!hdev_is_powered(hdev)) {
7146 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7147 MGMT_STATUS_NOT_POWERED);
7151 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
7152 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7157 if (cp->type != DISCOV_TYPE_LE) {
7158 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7159 MGMT_STATUS_INVALID_PARAMS);
7163 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
7169 hdev->le_discovery.type = cp->type;
7171 hci_req_init(&req, hdev);
7173 status = mgmt_le_support(hdev);
7175 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7177 mgmt_pending_remove(cmd);
7181 /* If controller is scanning, it means the background scanning
7182 * is running. Thus, we should temporarily stop it in order to
7183 * set the discovery scanning parameters.
7185 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
7186 hci_req_add_le_scan_disable(&req, false);
7188 memset(¶m_cp, 0, sizeof(param_cp));
7190 /* All active scans will be done with either a resolvable
7191 * private address (when privacy feature has been enabled)
7192 * or unresolvable private address.
7194 err = hci_update_random_address(&req, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
7196 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
7197 MGMT_STATUS_FAILED);
7198 mgmt_pending_remove(cmd);
7202 param_cp.type = hdev->le_scan_type;
7203 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
7204 param_cp.window = cpu_to_le16(hdev->le_scan_window);
7205 param_cp.own_address_type = own_addr_type;
7206 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
7209 memset(&enable_cp, 0, sizeof(enable_cp));
7210 enable_cp.enable = LE_SCAN_ENABLE;
7211 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
7213 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
7216 err = hci_req_run(&req, start_le_discovery_complete);
7218 mgmt_pending_remove(cmd);
7220 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
7223 hci_dev_unlock(hdev);
7227 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
7229 struct mgmt_pending_cmd *cmd;
7232 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7236 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
7237 mgmt_status(status), &hdev->le_discovery.type,
7238 sizeof(hdev->le_discovery.type));
7239 mgmt_pending_remove(cmd);
7244 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
7247 BT_DBG("status %d", status);
7252 mgmt_stop_le_discovery_failed(hdev, status);
7256 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
7259 hci_dev_unlock(hdev);
7262 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
7263 void *data, u16 len)
7265 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
7266 struct mgmt_pending_cmd *cmd;
7267 struct hci_request req;
7270 BT_DBG("%s", hdev->name);
7274 if (!hci_le_discovery_active(hdev)) {
7275 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7276 MGMT_STATUS_REJECTED, &mgmt_cp->type,
7277 sizeof(mgmt_cp->type));
7281 if (hdev->le_discovery.type != mgmt_cp->type) {
7282 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7283 MGMT_STATUS_INVALID_PARAMS,
7284 &mgmt_cp->type, sizeof(mgmt_cp->type));
7288 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
7294 hci_req_init(&req, hdev);
7296 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
7297 BT_DBG("unknown le discovery state %u",
7298 hdev->le_discovery.state);
7300 mgmt_pending_remove(cmd);
7301 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
7302 MGMT_STATUS_FAILED, &mgmt_cp->type,
7303 sizeof(mgmt_cp->type));
7307 cancel_delayed_work(&hdev->le_scan_disable);
7308 hci_req_add_le_scan_disable(&req, false);
7310 err = hci_req_run(&req, stop_le_discovery_complete);
7312 mgmt_pending_remove(cmd);
7314 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
7317 hci_dev_unlock(hdev);
7321 /* Separate LE discovery */
7322 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
7324 struct mgmt_ev_discovering ev;
7325 struct mgmt_pending_cmd *cmd;
7327 BT_DBG("%s le discovering %u", hdev->name, discovering);
7330 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
7332 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
7335 u8 type = hdev->le_discovery.type;
7337 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
7339 mgmt_pending_remove(cmd);
7342 memset(&ev, 0, sizeof(ev));
7343 ev.type = hdev->le_discovery.type;
7344 ev.discovering = discovering;
7346 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7349 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
7350 void *data, u16 len)
7354 BT_DBG("%s", hdev->name);
7358 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
7360 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
7362 hci_dev_unlock(hdev);
7367 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
7372 if (min > max || min < 6 || max > 3200)
7375 if (to_multiplier < 10 || to_multiplier > 3200)
7378 if (max >= to_multiplier * 8)
7381 max_latency = (to_multiplier * 8 / max) - 1;
7383 if (latency > 499 || latency > max_latency)
7389 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
7392 struct mgmt_cp_le_conn_update *cp = data;
7394 struct hci_conn *conn;
7395 u16 min, max, latency, supervision_timeout;
7398 if (!hdev_is_powered(hdev))
7399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7400 MGMT_STATUS_NOT_POWERED);
7402 min = __le16_to_cpu(cp->conn_interval_min);
7403 max = __le16_to_cpu(cp->conn_interval_max);
7404 latency = __le16_to_cpu(cp->conn_latency);
7405 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
7407 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
7408 min, max, latency, supervision_timeout);
7410 err = check_le_conn_update_param(min, max, latency,
7411 supervision_timeout);
7414 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7415 MGMT_STATUS_INVALID_PARAMS);
7419 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
7421 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
7422 MGMT_STATUS_NOT_CONNECTED);
7423 hci_dev_unlock(hdev);
7427 hci_dev_unlock(hdev);
7429 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
7431 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
7435 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
7438 struct mgmt_cp_set_manufacturer_data *cp;
7439 struct mgmt_pending_cmd *cmd;
7441 BT_DBG("status 0x%02x", status);
7445 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
7452 mgmt_cmd_status(cmd->sk, hdev->id,
7453 MGMT_OP_SET_MANUFACTURER_DATA,
7454 mgmt_status(status));
7456 mgmt_cmd_complete(cmd->sk, hdev->id,
7457 MGMT_OP_SET_MANUFACTURER_DATA, 0,
7460 mgmt_pending_remove(cmd);
7463 hci_dev_unlock(hdev);
7466 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
7467 void *data, u16 len)
7469 struct mgmt_pending_cmd *cmd;
7470 struct hci_request req;
7471 struct mgmt_cp_set_manufacturer_data *cp = data;
7472 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
7476 BT_DBG("%s", hdev->name);
7478 if (!lmp_bredr_capable(hdev))
7479 return mgmt_cmd_status(sk, hdev->id,
7480 MGMT_OP_SET_MANUFACTURER_DATA,
7481 MGMT_STATUS_NOT_SUPPORTED);
7483 if (cp->data[0] == 0 ||
7484 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
7485 return mgmt_cmd_status(sk, hdev->id,
7486 MGMT_OP_SET_MANUFACTURER_DATA,
7487 MGMT_STATUS_INVALID_PARAMS);
7489 if (cp->data[1] != 0xFF)
7490 return mgmt_cmd_status(sk, hdev->id,
7491 MGMT_OP_SET_MANUFACTURER_DATA,
7492 MGMT_STATUS_NOT_SUPPORTED);
7496 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
7497 err = mgmt_cmd_status(sk, hdev->id,
7498 MGMT_OP_SET_MANUFACTURER_DATA,
7503 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
7510 hci_req_init(&req, hdev);
7512 /* if new data is same as previous data then return command
7515 if (hdev->manufacturer_len == cp->data[0] - 1 &&
7516 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
7517 mgmt_pending_remove(cmd);
7518 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
7519 0, cp, sizeof(*cp));
7524 old_len = hdev->manufacturer_len;
7526 memcpy(old_data, hdev->manufacturer_data, old_len);
7528 hdev->manufacturer_len = cp->data[0] - 1;
7529 if (hdev->manufacturer_len > 0)
7530 memcpy(hdev->manufacturer_data, cp->data + 2,
7531 hdev->manufacturer_len);
7533 __hci_req_update_eir(&req);
7535 err = hci_req_run(&req, set_manufacturer_data_complete);
7537 mgmt_pending_remove(cmd);
7542 hci_dev_unlock(hdev);
7547 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
7548 hdev->manufacturer_len = old_len;
7549 if (hdev->manufacturer_len > 0)
7550 memcpy(hdev->manufacturer_data, old_data,
7551 hdev->manufacturer_len);
7552 hci_dev_unlock(hdev);
7556 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
7557 void *data, u16 len)
7559 struct mgmt_cp_le_set_scan_params *cp = data;
7560 __u16 interval, window;
7563 BT_DBG("%s", hdev->name);
7565 if (!lmp_le_capable(hdev))
7566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7567 MGMT_STATUS_NOT_SUPPORTED);
7569 interval = __le16_to_cpu(cp->interval);
7571 if (interval < 0x0004 || interval > 0x4000)
7572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7573 MGMT_STATUS_INVALID_PARAMS);
7575 window = __le16_to_cpu(cp->window);
7577 if (window < 0x0004 || window > 0x4000)
7578 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7579 MGMT_STATUS_INVALID_PARAMS);
7581 if (window > interval)
7582 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
7583 MGMT_STATUS_INVALID_PARAMS);
7587 hdev->le_scan_type = cp->type;
7588 hdev->le_scan_interval = interval;
7589 hdev->le_scan_window = window;
7591 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
7594 /* If background scan is running, restart it so new parameters are
7597 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
7598 hdev->discovery.state == DISCOVERY_STOPPED) {
7599 struct hci_request req;
7601 hci_req_init(&req, hdev);
7603 hci_req_add_le_scan_disable(&req, false);
7604 hci_req_add_le_passive_scan(&req);
7606 hci_req_run(&req, NULL);
7609 hci_dev_unlock(hdev);
7613 #endif /* TIZEN_BT */
7615 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7617 if (key->master != 0x00 && key->master != 0x01)
7620 switch (key->addr.type) {
7621 case BDADDR_LE_PUBLIC:
7624 case BDADDR_LE_RANDOM:
7625 /* Two most significant bits shall be set */
7626 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7634 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7635 void *cp_data, u16 len)
7637 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7638 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7639 sizeof(struct mgmt_ltk_info));
7640 u16 key_count, expected_len;
7643 bt_dev_dbg(hdev, "sock %p", sk);
7645 if (!lmp_le_capable(hdev))
7646 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7647 MGMT_STATUS_NOT_SUPPORTED);
7649 key_count = __le16_to_cpu(cp->key_count);
7650 if (key_count > max_key_count) {
7651 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7653 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7654 MGMT_STATUS_INVALID_PARAMS);
7657 expected_len = struct_size(cp, keys, key_count);
7658 if (expected_len != len) {
7659 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7661 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7662 MGMT_STATUS_INVALID_PARAMS);
7665 bt_dev_dbg(hdev, "key_count %u", key_count);
7667 for (i = 0; i < key_count; i++) {
7668 struct mgmt_ltk_info *key = &cp->keys[i];
7670 if (!ltk_is_valid(key))
7671 return mgmt_cmd_status(sk, hdev->id,
7672 MGMT_OP_LOAD_LONG_TERM_KEYS,
7673 MGMT_STATUS_INVALID_PARAMS);
7678 hci_smp_ltks_clear(hdev);
7680 for (i = 0; i < key_count; i++) {
7681 struct mgmt_ltk_info *key = &cp->keys[i];
7682 u8 type, authenticated;
7684 if (hci_is_blocked_key(hdev,
7685 HCI_BLOCKED_KEY_TYPE_LTK,
7687 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7692 switch (key->type) {
7693 case MGMT_LTK_UNAUTHENTICATED:
7694 authenticated = 0x00;
7695 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
7697 case MGMT_LTK_AUTHENTICATED:
7698 authenticated = 0x01;
7699 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
7701 case MGMT_LTK_P256_UNAUTH:
7702 authenticated = 0x00;
7703 type = SMP_LTK_P256;
7705 case MGMT_LTK_P256_AUTH:
7706 authenticated = 0x01;
7707 type = SMP_LTK_P256;
7709 case MGMT_LTK_P256_DEBUG:
7710 authenticated = 0x00;
7711 type = SMP_LTK_P256_DEBUG;
7717 hci_add_ltk(hdev, &key->addr.bdaddr,
7718 le_addr_type(key->addr.type), type, authenticated,
7719 key->val, key->enc_size, key->ediv, key->rand);
7722 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7725 hci_dev_unlock(hdev);
7730 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
7732 struct hci_conn *conn = cmd->user_data;
7733 struct mgmt_rp_get_conn_info rp;
7736 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
7738 if (status == MGMT_STATUS_SUCCESS) {
7739 rp.rssi = conn->rssi;
7740 rp.tx_power = conn->tx_power;
7741 rp.max_tx_power = conn->max_tx_power;
7743 rp.rssi = HCI_RSSI_INVALID;
7744 rp.tx_power = HCI_TX_POWER_INVALID;
7745 rp.max_tx_power = HCI_TX_POWER_INVALID;
7748 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
7749 status, &rp, sizeof(rp));
7751 hci_conn_drop(conn);
7757 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
7760 struct hci_cp_read_rssi *cp;
7761 struct mgmt_pending_cmd *cmd;
7762 struct hci_conn *conn;
7766 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
7770 /* Commands sent in request are either Read RSSI or Read Transmit Power
7771 * Level so we check which one was last sent to retrieve connection
7772 * handle. Both commands have handle as first parameter so it's safe to
7773 * cast data on the same command struct.
7775 * First command sent is always Read RSSI and we fail only if it fails.
7776 * In other case we simply override error to indicate success as we
7777 * already remembered if TX power value is actually valid.
7779 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
7781 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
7782 status = MGMT_STATUS_SUCCESS;
7784 status = mgmt_status(hci_status);
7788 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
7792 handle = __le16_to_cpu(cp->handle);
7793 conn = hci_conn_hash_lookup_handle(hdev, handle);
7795 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
7800 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
7804 cmd->cmd_complete(cmd, status);
7805 mgmt_pending_remove(cmd);
7808 hci_dev_unlock(hdev);
7811 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7814 struct mgmt_cp_get_conn_info *cp = data;
7815 struct mgmt_rp_get_conn_info rp;
7816 struct hci_conn *conn;
7817 unsigned long conn_info_age;
7820 bt_dev_dbg(hdev, "sock %p", sk);
7822 memset(&rp, 0, sizeof(rp));
7823 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7824 rp.addr.type = cp->addr.type;
7826 if (!bdaddr_type_is_valid(cp->addr.type))
7827 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7828 MGMT_STATUS_INVALID_PARAMS,
7833 if (!hdev_is_powered(hdev)) {
7834 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7835 MGMT_STATUS_NOT_POWERED, &rp,
7840 if (cp->addr.type == BDADDR_BREDR)
7841 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7844 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7846 if (!conn || conn->state != BT_CONNECTED) {
7847 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7848 MGMT_STATUS_NOT_CONNECTED, &rp,
7853 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
7854 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7855 MGMT_STATUS_BUSY, &rp, sizeof(rp));
7859 /* To avoid client trying to guess when to poll again for information we
7860 * calculate conn info age as random value between min/max set in hdev.
7862 conn_info_age = hdev->conn_info_min_age +
7863 prandom_u32_max(hdev->conn_info_max_age -
7864 hdev->conn_info_min_age);
7866 /* Query controller to refresh cached values if they are too old or were
7869 if (time_after(jiffies, conn->conn_info_timestamp +
7870 msecs_to_jiffies(conn_info_age)) ||
7871 !conn->conn_info_timestamp) {
7872 struct hci_request req;
7873 struct hci_cp_read_tx_power req_txp_cp;
7874 struct hci_cp_read_rssi req_rssi_cp;
7875 struct mgmt_pending_cmd *cmd;
7877 hci_req_init(&req, hdev);
7878 req_rssi_cp.handle = cpu_to_le16(conn->handle);
7879 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
7882 /* For LE links TX power does not change thus we don't need to
7883 * query for it once value is known.
7885 if (!bdaddr_type_is_le(cp->addr.type) ||
7886 conn->tx_power == HCI_TX_POWER_INVALID) {
7887 req_txp_cp.handle = cpu_to_le16(conn->handle);
7888 req_txp_cp.type = 0x00;
7889 hci_req_add(&req, HCI_OP_READ_TX_POWER,
7890 sizeof(req_txp_cp), &req_txp_cp);
7893 /* Max TX power needs to be read only once per connection */
7894 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
7895 req_txp_cp.handle = cpu_to_le16(conn->handle);
7896 req_txp_cp.type = 0x01;
7897 hci_req_add(&req, HCI_OP_READ_TX_POWER,
7898 sizeof(req_txp_cp), &req_txp_cp);
7901 err = hci_req_run(&req, conn_info_refresh_complete);
7905 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
7912 hci_conn_hold(conn);
7913 cmd->user_data = hci_conn_get(conn);
7914 cmd->cmd_complete = conn_info_cmd_complete;
7916 conn->conn_info_timestamp = jiffies;
7918 /* Cache is valid, just reply with values cached in hci_conn */
7919 rp.rssi = conn->rssi;
7920 rp.tx_power = conn->tx_power;
7921 rp.max_tx_power = conn->max_tx_power;
7923 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7924 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7928 hci_dev_unlock(hdev);
7932 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
7934 struct hci_conn *conn = cmd->user_data;
7935 struct mgmt_rp_get_clock_info rp;
7936 struct hci_dev *hdev;
7939 memset(&rp, 0, sizeof(rp));
7940 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
7945 hdev = hci_dev_get(cmd->index);
7947 rp.local_clock = cpu_to_le32(hdev->clock);
7952 rp.piconet_clock = cpu_to_le32(conn->clock);
7953 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7957 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7961 hci_conn_drop(conn);
7968 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7970 struct hci_cp_read_clock *hci_cp;
7971 struct mgmt_pending_cmd *cmd;
7972 struct hci_conn *conn;
7974 bt_dev_dbg(hdev, "status %u", status);
7978 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
7982 if (hci_cp->which) {
7983 u16 handle = __le16_to_cpu(hci_cp->handle);
7984 conn = hci_conn_hash_lookup_handle(hdev, handle);
7989 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
7993 cmd->cmd_complete(cmd, mgmt_status(status));
7994 mgmt_pending_remove(cmd);
7997 hci_dev_unlock(hdev);
8000 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
8003 struct mgmt_cp_get_clock_info *cp = data;
8004 struct mgmt_rp_get_clock_info rp;
8005 struct hci_cp_read_clock hci_cp;
8006 struct mgmt_pending_cmd *cmd;
8007 struct hci_request req;
8008 struct hci_conn *conn;
8011 bt_dev_dbg(hdev, "sock %p", sk);
8013 memset(&rp, 0, sizeof(rp));
8014 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
8015 rp.addr.type = cp->addr.type;
8017 if (cp->addr.type != BDADDR_BREDR)
8018 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8019 MGMT_STATUS_INVALID_PARAMS,
8024 if (!hdev_is_powered(hdev)) {
8025 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
8026 MGMT_STATUS_NOT_POWERED, &rp,
8031 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8032 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
8034 if (!conn || conn->state != BT_CONNECTED) {
8035 err = mgmt_cmd_complete(sk, hdev->id,
8036 MGMT_OP_GET_CLOCK_INFO,
8037 MGMT_STATUS_NOT_CONNECTED,
8045 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
8051 cmd->cmd_complete = clock_info_cmd_complete;
8053 hci_req_init(&req, hdev);
8055 memset(&hci_cp, 0, sizeof(hci_cp));
8056 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8059 hci_conn_hold(conn);
8060 cmd->user_data = hci_conn_get(conn);
8062 hci_cp.handle = cpu_to_le16(conn->handle);
8063 hci_cp.which = 0x01; /* Piconet clock */
8064 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
8067 err = hci_req_run(&req, get_clock_info_complete);
8069 mgmt_pending_remove(cmd);
8072 hci_dev_unlock(hdev);
8076 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
8078 struct hci_conn *conn;
8080 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
8084 if (conn->dst_type != type)
8087 if (conn->state != BT_CONNECTED)
8093 /* This function requires the caller holds hdev->lock */
8094 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
8095 u8 addr_type, u8 auto_connect)
8097 struct hci_conn_params *params;
8099 params = hci_conn_params_add(hdev, addr, addr_type);
8103 if (params->auto_connect == auto_connect)
8106 list_del_init(¶ms->action);
8108 switch (auto_connect) {
8109 case HCI_AUTO_CONN_DISABLED:
8110 case HCI_AUTO_CONN_LINK_LOSS:
8111 /* If auto connect is being disabled when we're trying to
8112 * connect to device, keep connecting.
8114 if (params->explicit_connect)
8115 list_add(¶ms->action, &hdev->pend_le_conns);
8117 case HCI_AUTO_CONN_REPORT:
8118 if (params->explicit_connect)
8119 list_add(¶ms->action, &hdev->pend_le_conns);
8121 list_add(¶ms->action, &hdev->pend_le_reports);
8123 case HCI_AUTO_CONN_DIRECT:
8124 case HCI_AUTO_CONN_ALWAYS:
8125 if (!is_connected(hdev, addr, addr_type))
8126 list_add(¶ms->action, &hdev->pend_le_conns);
8130 params->auto_connect = auto_connect;
8132 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
8133 addr, addr_type, auto_connect);
8138 static void device_added(struct sock *sk, struct hci_dev *hdev,
8139 bdaddr_t *bdaddr, u8 type, u8 action)
8141 struct mgmt_ev_device_added ev;
8143 bacpy(&ev.addr.bdaddr, bdaddr);
8144 ev.addr.type = type;
8147 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
8150 static int add_device(struct sock *sk, struct hci_dev *hdev,
8151 void *data, u16 len)
8153 struct mgmt_cp_add_device *cp = data;
8154 u8 auto_conn, addr_type;
8155 struct hci_conn_params *params;
8157 u32 current_flags = 0;
8159 bt_dev_dbg(hdev, "sock %p", sk);
8161 if (!bdaddr_type_is_valid(cp->addr.type) ||
8162 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
8163 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8164 MGMT_STATUS_INVALID_PARAMS,
8165 &cp->addr, sizeof(cp->addr));
8167 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
8168 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8169 MGMT_STATUS_INVALID_PARAMS,
8170 &cp->addr, sizeof(cp->addr));
8174 if (cp->addr.type == BDADDR_BREDR) {
8175 /* Only incoming connections action is supported for now */
8176 if (cp->action != 0x01) {
8177 err = mgmt_cmd_complete(sk, hdev->id,
8179 MGMT_STATUS_INVALID_PARAMS,
8180 &cp->addr, sizeof(cp->addr));
8184 err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
8190 hci_req_update_scan(hdev);
8195 addr_type = le_addr_type(cp->addr.type);
8197 if (cp->action == 0x02)
8198 auto_conn = HCI_AUTO_CONN_ALWAYS;
8199 else if (cp->action == 0x01)
8200 auto_conn = HCI_AUTO_CONN_DIRECT;
8202 auto_conn = HCI_AUTO_CONN_REPORT;
8204 /* Kernel internally uses conn_params with resolvable private
8205 * address, but Add Device allows only identity addresses.
8206 * Make sure it is enforced before calling
8207 * hci_conn_params_lookup.
8209 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8210 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8211 MGMT_STATUS_INVALID_PARAMS,
8212 &cp->addr, sizeof(cp->addr));
8216 /* If the connection parameters don't exist for this device,
8217 * they will be created and configured with defaults.
8219 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
8221 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8222 MGMT_STATUS_FAILED, &cp->addr,
8226 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8229 current_flags = params->current_flags;
8232 hci_update_background_scan(hdev);
8235 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
8236 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
8237 SUPPORTED_DEVICE_FLAGS(), current_flags);
8239 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
8240 MGMT_STATUS_SUCCESS, &cp->addr,
8244 hci_dev_unlock(hdev);
8248 static void device_removed(struct sock *sk, struct hci_dev *hdev,
8249 bdaddr_t *bdaddr, u8 type)
8251 struct mgmt_ev_device_removed ev;
8253 bacpy(&ev.addr.bdaddr, bdaddr);
8254 ev.addr.type = type;
8256 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
8259 static int remove_device(struct sock *sk, struct hci_dev *hdev,
8260 void *data, u16 len)
8262 struct mgmt_cp_remove_device *cp = data;
8265 bt_dev_dbg(hdev, "sock %p", sk);
8269 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8270 struct hci_conn_params *params;
8273 if (!bdaddr_type_is_valid(cp->addr.type)) {
8274 err = mgmt_cmd_complete(sk, hdev->id,
8275 MGMT_OP_REMOVE_DEVICE,
8276 MGMT_STATUS_INVALID_PARAMS,
8277 &cp->addr, sizeof(cp->addr));
8281 if (cp->addr.type == BDADDR_BREDR) {
8282 err = hci_bdaddr_list_del(&hdev->whitelist,
8286 err = mgmt_cmd_complete(sk, hdev->id,
8287 MGMT_OP_REMOVE_DEVICE,
8288 MGMT_STATUS_INVALID_PARAMS,
8294 hci_req_update_scan(hdev);
8296 device_removed(sk, hdev, &cp->addr.bdaddr,
8301 addr_type = le_addr_type(cp->addr.type);
8303 /* Kernel internally uses conn_params with resolvable private
8304 * address, but Remove Device allows only identity addresses.
8305 * Make sure it is enforced before calling
8306 * hci_conn_params_lookup.
8308 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
8309 err = mgmt_cmd_complete(sk, hdev->id,
8310 MGMT_OP_REMOVE_DEVICE,
8311 MGMT_STATUS_INVALID_PARAMS,
8312 &cp->addr, sizeof(cp->addr));
8316 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8319 err = mgmt_cmd_complete(sk, hdev->id,
8320 MGMT_OP_REMOVE_DEVICE,
8321 MGMT_STATUS_INVALID_PARAMS,
8322 &cp->addr, sizeof(cp->addr));
8326 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
8327 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
8328 err = mgmt_cmd_complete(sk, hdev->id,
8329 MGMT_OP_REMOVE_DEVICE,
8330 MGMT_STATUS_INVALID_PARAMS,
8331 &cp->addr, sizeof(cp->addr));
8335 list_del(¶ms->action);
8336 list_del(¶ms->list);
8338 hci_update_background_scan(hdev);
8340 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
8342 struct hci_conn_params *p, *tmp;
8343 struct bdaddr_list *b, *btmp;
8345 if (cp->addr.type) {
8346 err = mgmt_cmd_complete(sk, hdev->id,
8347 MGMT_OP_REMOVE_DEVICE,
8348 MGMT_STATUS_INVALID_PARAMS,
8349 &cp->addr, sizeof(cp->addr));
8353 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
8354 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
8359 hci_req_update_scan(hdev);
8361 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
8362 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
8364 device_removed(sk, hdev, &p->addr, p->addr_type);
8365 if (p->explicit_connect) {
8366 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
8369 list_del(&p->action);
8374 bt_dev_dbg(hdev, "All LE connection parameters were removed");
8376 hci_update_background_scan(hdev);
8380 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
8381 MGMT_STATUS_SUCCESS, &cp->addr,
8384 hci_dev_unlock(hdev);
8388 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
8391 struct mgmt_cp_load_conn_param *cp = data;
8392 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
8393 sizeof(struct mgmt_conn_param));
8394 u16 param_count, expected_len;
8397 if (!lmp_le_capable(hdev))
8398 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8399 MGMT_STATUS_NOT_SUPPORTED);
8401 param_count = __le16_to_cpu(cp->param_count);
8402 if (param_count > max_param_count) {
8403 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
8405 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8406 MGMT_STATUS_INVALID_PARAMS);
8409 expected_len = struct_size(cp, params, param_count);
8410 if (expected_len != len) {
8411 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
8413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8414 MGMT_STATUS_INVALID_PARAMS);
8417 bt_dev_dbg(hdev, "param_count %u", param_count);
8421 hci_conn_params_clear_disabled(hdev);
8423 for (i = 0; i < param_count; i++) {
8424 struct mgmt_conn_param *param = &cp->params[i];
8425 struct hci_conn_params *hci_param;
8426 u16 min, max, latency, timeout;
8429 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
8432 if (param->addr.type == BDADDR_LE_PUBLIC) {
8433 addr_type = ADDR_LE_DEV_PUBLIC;
8434 } else if (param->addr.type == BDADDR_LE_RANDOM) {
8435 addr_type = ADDR_LE_DEV_RANDOM;
8437 bt_dev_err(hdev, "ignoring invalid connection parameters");
8441 min = le16_to_cpu(param->min_interval);
8442 max = le16_to_cpu(param->max_interval);
8443 latency = le16_to_cpu(param->latency);
8444 timeout = le16_to_cpu(param->timeout);
8446 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
8447 min, max, latency, timeout);
8449 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8450 bt_dev_err(hdev, "ignoring invalid connection parameters");
8454 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
8457 bt_dev_err(hdev, "failed to add connection parameters");
8461 hci_param->conn_min_interval = min;
8462 hci_param->conn_max_interval = max;
8463 hci_param->conn_latency = latency;
8464 hci_param->supervision_timeout = timeout;
8467 hci_dev_unlock(hdev);
8469 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8473 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8474 void *data, u16 len)
8476 struct mgmt_cp_set_external_config *cp = data;
8480 bt_dev_dbg(hdev, "sock %p", sk);
8482 if (hdev_is_powered(hdev))
8483 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8484 MGMT_STATUS_REJECTED);
8486 if (cp->config != 0x00 && cp->config != 0x01)
8487 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8488 MGMT_STATUS_INVALID_PARAMS);
8490 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
8491 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8492 MGMT_STATUS_NOT_SUPPORTED);
8497 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8499 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8501 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8508 err = new_options(hdev, sk);
8510 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8511 mgmt_index_removed(hdev);
8513 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8514 hci_dev_set_flag(hdev, HCI_CONFIG);
8515 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8517 queue_work(hdev->req_workqueue, &hdev->power_on);
8519 set_bit(HCI_RAW, &hdev->flags);
8520 mgmt_index_added(hdev);
8525 hci_dev_unlock(hdev);
8529 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8530 void *data, u16 len)
8532 struct mgmt_cp_set_public_address *cp = data;
8536 bt_dev_dbg(hdev, "sock %p", sk);
8538 if (hdev_is_powered(hdev))
8539 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8540 MGMT_STATUS_REJECTED);
8542 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8543 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8544 MGMT_STATUS_INVALID_PARAMS);
8546 if (!hdev->set_bdaddr)
8547 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8548 MGMT_STATUS_NOT_SUPPORTED);
8552 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8553 bacpy(&hdev->public_addr, &cp->bdaddr);
8555 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8562 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8563 err = new_options(hdev, sk);
8565 if (is_configured(hdev)) {
8566 mgmt_index_removed(hdev);
8568 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8570 hci_dev_set_flag(hdev, HCI_CONFIG);
8571 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8573 queue_work(hdev->req_workqueue, &hdev->power_on);
8577 hci_dev_unlock(hdev);
8582 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
8586 struct mgmt_ev_device_name_update *ev = (void *)buf;
8592 bacpy(&ev->addr.bdaddr, bdaddr);
8593 ev->addr.type = BDADDR_BREDR;
8595 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8598 ev->eir_len = cpu_to_le16(eir_len);
8600 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
8601 sizeof(*ev) + eir_len, NULL);
8604 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8605 u8 link_type, u8 addr_type, u8 status)
8607 struct mgmt_ev_conn_update_failed ev;
8609 bacpy(&ev.addr.bdaddr, bdaddr);
8610 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8613 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
8614 &ev, sizeof(ev), NULL);
8617 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
8618 u8 link_type, u8 addr_type, u16 conn_interval,
8619 u16 conn_latency, u16 supervision_timeout)
8621 struct mgmt_ev_conn_updated ev;
8623 bacpy(&ev.addr.bdaddr, bdaddr);
8624 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8625 ev.conn_interval = cpu_to_le16(conn_interval);
8626 ev.conn_latency = cpu_to_le16(conn_latency);
8627 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
8629 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
8630 &ev, sizeof(ev), NULL);
8634 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
8635 u16 opcode, struct sk_buff *skb)
8637 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8638 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8639 u8 *h192, *r192, *h256, *r256;
8640 struct mgmt_pending_cmd *cmd;
8644 bt_dev_dbg(hdev, "status %u", status);
8646 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
8650 mgmt_cp = cmd->param;
8653 status = mgmt_status(status);
8660 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
8661 struct hci_rp_read_local_oob_data *rp;
8663 if (skb->len != sizeof(*rp)) {
8664 status = MGMT_STATUS_FAILED;
8667 status = MGMT_STATUS_SUCCESS;
8668 rp = (void *)skb->data;
8670 eir_len = 5 + 18 + 18;
8677 struct hci_rp_read_local_oob_ext_data *rp;
8679 if (skb->len != sizeof(*rp)) {
8680 status = MGMT_STATUS_FAILED;
8683 status = MGMT_STATUS_SUCCESS;
8684 rp = (void *)skb->data;
8686 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8687 eir_len = 5 + 18 + 18;
8691 eir_len = 5 + 18 + 18 + 18 + 18;
8701 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8708 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8709 hdev->dev_class, 3);
8712 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8713 EIR_SSP_HASH_C192, h192, 16);
8714 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8715 EIR_SSP_RAND_R192, r192, 16);
8719 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8720 EIR_SSP_HASH_C256, h256, 16);
8721 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8722 EIR_SSP_RAND_R256, r256, 16);
8726 mgmt_rp->type = mgmt_cp->type;
8727 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8729 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8730 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8731 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8732 if (err < 0 || status)
8735 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8737 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8738 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8739 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8742 mgmt_pending_remove(cmd);
8745 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8746 struct mgmt_cp_read_local_oob_ext_data *cp)
8748 struct mgmt_pending_cmd *cmd;
8749 struct hci_request req;
8752 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8757 hci_req_init(&req, hdev);
8759 if (bredr_sc_enabled(hdev))
8760 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
8762 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
8764 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
8766 mgmt_pending_remove(cmd);
8773 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8774 void *data, u16 data_len)
8776 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8777 struct mgmt_rp_read_local_oob_ext_data *rp;
8780 u8 status, flags, role, addr[7], hash[16], rand[16];
8783 bt_dev_dbg(hdev, "sock %p", sk);
8785 if (hdev_is_powered(hdev)) {
8787 case BIT(BDADDR_BREDR):
8788 status = mgmt_bredr_support(hdev);
8794 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8795 status = mgmt_le_support(hdev);
8799 eir_len = 9 + 3 + 18 + 18 + 3;
8802 status = MGMT_STATUS_INVALID_PARAMS;
8807 status = MGMT_STATUS_NOT_POWERED;
8811 rp_len = sizeof(*rp) + eir_len;
8812 rp = kmalloc(rp_len, GFP_ATOMIC);
8823 case BIT(BDADDR_BREDR):
8824 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8825 err = read_local_ssp_oob_req(hdev, sk, cp);
8826 hci_dev_unlock(hdev);
8830 status = MGMT_STATUS_FAILED;
8833 eir_len = eir_append_data(rp->eir, eir_len,
8835 hdev->dev_class, 3);
8838 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8839 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8840 smp_generate_oob(hdev, hash, rand) < 0) {
8841 hci_dev_unlock(hdev);
8842 status = MGMT_STATUS_FAILED;
8846 /* This should return the active RPA, but since the RPA
8847 * is only programmed on demand, it is really hard to fill
8848 * this in at the moment. For now disallow retrieving
8849 * local out-of-band data when privacy is in use.
8851 * Returning the identity address will not help here since
8852 * pairing happens before the identity resolving key is
8853 * known and thus the connection establishment happens
8854 * based on the RPA and not the identity address.
8856 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8857 hci_dev_unlock(hdev);
8858 status = MGMT_STATUS_REJECTED;
8862 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8863 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8864 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8865 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8866 memcpy(addr, &hdev->static_addr, 6);
8869 memcpy(addr, &hdev->bdaddr, 6);
8873 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8874 addr, sizeof(addr));
8876 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8881 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8882 &role, sizeof(role));
8884 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8885 eir_len = eir_append_data(rp->eir, eir_len,
8887 hash, sizeof(hash));
8889 eir_len = eir_append_data(rp->eir, eir_len,
8891 rand, sizeof(rand));
8894 flags = mgmt_get_adv_discov_flags(hdev);
8896 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8897 flags |= LE_AD_NO_BREDR;
8899 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8900 &flags, sizeof(flags));
8904 hci_dev_unlock(hdev);
8906 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8908 status = MGMT_STATUS_SUCCESS;
8911 rp->type = cp->type;
8912 rp->eir_len = cpu_to_le16(eir_len);
8914 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8915 status, rp, sizeof(*rp) + eir_len);
8916 if (err < 0 || status)
8919 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8920 rp, sizeof(*rp) + eir_len,
8921 HCI_MGMT_OOB_DATA_EVENTS, sk);
8929 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8933 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8934 flags |= MGMT_ADV_FLAG_DISCOV;
8935 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8936 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8937 flags |= MGMT_ADV_FLAG_APPEARANCE;
8938 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8940 /* In extended adv TX_POWER returned from Set Adv Param
8941 * will be always valid.
8943 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
8944 ext_adv_capable(hdev))
8945 flags |= MGMT_ADV_FLAG_TX_POWER;
8947 if (ext_adv_capable(hdev)) {
8948 flags |= MGMT_ADV_FLAG_SEC_1M;
8949 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8950 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8952 if (hdev->le_features[1] & HCI_LE_PHY_2M)
8953 flags |= MGMT_ADV_FLAG_SEC_2M;
8955 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8956 flags |= MGMT_ADV_FLAG_SEC_CODED;
8962 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8963 void *data, u16 data_len)
8965 struct mgmt_rp_read_adv_features *rp;
8968 struct adv_info *adv_instance;
8969 u32 supported_flags;
8972 bt_dev_dbg(hdev, "sock %p", sk);
8974 if (!lmp_le_capable(hdev))
8975 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8976 MGMT_STATUS_REJECTED);
8978 /* Enabling the experimental LL Privay support disables support for
8981 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
8983 MGMT_STATUS_NOT_SUPPORTED);
8987 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8988 rp = kmalloc(rp_len, GFP_ATOMIC);
8990 hci_dev_unlock(hdev);
8994 supported_flags = get_supported_adv_flags(hdev);
8996 rp->supported_flags = cpu_to_le32(supported_flags);
8997 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
8998 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
8999 rp->max_instances = hdev->le_num_of_adv_sets;
9000 rp->num_instances = hdev->adv_instance_cnt;
9002 instance = rp->instance;
9003 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
9004 *instance = adv_instance->instance;
9008 hci_dev_unlock(hdev);
9010 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
9011 MGMT_STATUS_SUCCESS, rp, rp_len);
9018 static u8 calculate_name_len(struct hci_dev *hdev)
9020 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
9022 return append_local_name(hdev, buf, 0);
9025 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
9028 u8 max_len = HCI_MAX_AD_LENGTH;
9031 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
9032 MGMT_ADV_FLAG_LIMITED_DISCOV |
9033 MGMT_ADV_FLAG_MANAGED_FLAGS))
9036 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
9039 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
9040 max_len -= calculate_name_len(hdev);
9042 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
9049 static bool flags_managed(u32 adv_flags)
9051 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
9052 MGMT_ADV_FLAG_LIMITED_DISCOV |
9053 MGMT_ADV_FLAG_MANAGED_FLAGS);
9056 static bool tx_power_managed(u32 adv_flags)
9058 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
9061 static bool name_managed(u32 adv_flags)
9063 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
9066 static bool appearance_managed(u32 adv_flags)
9068 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
9071 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
9072 u8 len, bool is_adv_data)
9077 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
9082 /* Make sure that the data is correctly formatted. */
9083 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
9089 if (data[i + 1] == EIR_FLAGS &&
9090 (!is_adv_data || flags_managed(adv_flags)))
9093 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
9096 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
9099 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
9102 if (data[i + 1] == EIR_APPEARANCE &&
9103 appearance_managed(adv_flags))
9106 /* If the current field length would exceed the total data
9107 * length, then it's invalid.
9109 if (i + cur_len >= len)
9116 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
9119 struct mgmt_pending_cmd *cmd;
9120 struct mgmt_cp_add_advertising *cp;
9121 struct mgmt_rp_add_advertising rp;
9122 struct adv_info *adv_instance, *n;
9125 bt_dev_dbg(hdev, "status %d", status);
9129 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
9131 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
9132 if (!adv_instance->pending)
9136 adv_instance->pending = false;
9140 instance = adv_instance->instance;
9142 if (hdev->cur_adv_instance == instance)
9143 cancel_adv_timeout(hdev);
9145 hci_remove_adv_instance(hdev, instance);
9146 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
9153 rp.instance = cp->instance;
9156 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9157 mgmt_status(status));
9159 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9160 mgmt_status(status), &rp, sizeof(rp));
9162 mgmt_pending_remove(cmd);
9165 hci_dev_unlock(hdev);
9168 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
9169 void *data, u16 data_len)
9171 struct mgmt_cp_add_advertising *cp = data;
9172 struct mgmt_rp_add_advertising rp;
9174 u32 supported_flags, phy_flags;
9176 u16 timeout, duration;
9177 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
9178 u8 schedule_instance = 0;
9179 struct adv_info *next_instance;
9181 struct mgmt_pending_cmd *cmd;
9182 struct hci_request req;
9184 bt_dev_dbg(hdev, "sock %p", sk);
9186 status = mgmt_le_support(hdev);
9188 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9191 /* Enabling the experimental LL Privay support disables support for
9194 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9195 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9196 MGMT_STATUS_NOT_SUPPORTED);
9198 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9199 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9200 MGMT_STATUS_INVALID_PARAMS);
9202 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
9203 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9204 MGMT_STATUS_INVALID_PARAMS);
9206 flags = __le32_to_cpu(cp->flags);
9207 timeout = __le16_to_cpu(cp->timeout);
9208 duration = __le16_to_cpu(cp->duration);
9210 /* The current implementation only supports a subset of the specified
9211 * flags. Also need to check mutual exclusiveness of sec flags.
9213 supported_flags = get_supported_adv_flags(hdev);
9214 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
9215 if (flags & ~supported_flags ||
9216 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
9217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9218 MGMT_STATUS_INVALID_PARAMS);
9222 if (timeout && !hdev_is_powered(hdev)) {
9223 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9224 MGMT_STATUS_REJECTED);
9228 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
9229 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
9230 pending_find(MGMT_OP_SET_LE, hdev)) {
9231 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9236 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
9237 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
9238 cp->scan_rsp_len, false)) {
9239 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9240 MGMT_STATUS_INVALID_PARAMS);
9244 err = hci_add_adv_instance(hdev, cp->instance, flags,
9245 cp->adv_data_len, cp->data,
9247 cp->data + cp->adv_data_len,
9250 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9251 MGMT_STATUS_FAILED);
9255 /* Only trigger an advertising added event if a new instance was
9258 if (hdev->adv_instance_cnt > prev_instance_cnt)
9259 mgmt_advertising_added(sk, hdev, cp->instance);
9261 if (hdev->cur_adv_instance == cp->instance) {
9262 /* If the currently advertised instance is being changed then
9263 * cancel the current advertising and schedule the next
9264 * instance. If there is only one instance then the overridden
9265 * advertising data will be visible right away.
9267 cancel_adv_timeout(hdev);
9269 next_instance = hci_get_next_instance(hdev, cp->instance);
9271 schedule_instance = next_instance->instance;
9272 } else if (!hdev->adv_instance_timeout) {
9273 /* Immediately advertise the new instance if no other
9274 * instance is currently being advertised.
9276 schedule_instance = cp->instance;
9279 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
9280 * there is no instance to be advertised then we have no HCI
9281 * communication to make. Simply return.
9283 if (!hdev_is_powered(hdev) ||
9284 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
9285 !schedule_instance) {
9286 rp.instance = cp->instance;
9287 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9288 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9292 /* We're good to go, update advertising data, parameters, and start
9295 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
9302 hci_req_init(&req, hdev);
9304 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
9307 err = hci_req_run(&req, add_advertising_complete);
9310 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
9311 MGMT_STATUS_FAILED);
9312 mgmt_pending_remove(cmd);
9316 hci_dev_unlock(hdev);
9321 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
9324 struct mgmt_pending_cmd *cmd;
9325 struct mgmt_cp_remove_advertising *cp;
9326 struct mgmt_rp_remove_advertising rp;
9328 bt_dev_dbg(hdev, "status %d", status);
9332 /* A failure status here only means that we failed to disable
9333 * advertising. Otherwise, the advertising instance has been removed,
9334 * so report success.
9336 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
9341 rp.instance = cp->instance;
9343 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
9345 mgmt_pending_remove(cmd);
9348 hci_dev_unlock(hdev);
9351 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9352 void *data, u16 data_len)
9354 struct mgmt_cp_remove_advertising *cp = data;
9355 struct mgmt_rp_remove_advertising rp;
9356 struct mgmt_pending_cmd *cmd;
9357 struct hci_request req;
9360 bt_dev_dbg(hdev, "sock %p", sk);
9362 /* Enabling the experimental LL Privay support disables support for
9365 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
9366 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
9367 MGMT_STATUS_NOT_SUPPORTED);
9371 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9372 err = mgmt_cmd_status(sk, hdev->id,
9373 MGMT_OP_REMOVE_ADVERTISING,
9374 MGMT_STATUS_INVALID_PARAMS);
9378 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
9379 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
9380 pending_find(MGMT_OP_SET_LE, hdev)) {
9381 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9386 if (list_empty(&hdev->adv_instances)) {
9387 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9388 MGMT_STATUS_INVALID_PARAMS);
9392 hci_req_init(&req, hdev);
9394 /* If we use extended advertising, instance is disabled and removed */
9395 if (ext_adv_capable(hdev)) {
9396 __hci_req_disable_ext_adv_instance(&req, cp->instance);
9397 __hci_req_remove_ext_adv_instance(&req, cp->instance);
9400 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
9402 if (list_empty(&hdev->adv_instances))
9403 __hci_req_disable_advertising(&req);
9405 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
9406 * flag is set or the device isn't powered then we have no HCI
9407 * communication to make. Simply return.
9409 if (skb_queue_empty(&req.cmd_q) ||
9410 !hdev_is_powered(hdev) ||
9411 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
9412 hci_req_purge(&req);
9413 rp.instance = cp->instance;
9414 err = mgmt_cmd_complete(sk, hdev->id,
9415 MGMT_OP_REMOVE_ADVERTISING,
9416 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9420 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9427 err = hci_req_run(&req, remove_advertising_complete);
9429 mgmt_pending_remove(cmd);
9432 hci_dev_unlock(hdev);
9437 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9438 void *data, u16 data_len)
9440 struct mgmt_cp_get_adv_size_info *cp = data;
9441 struct mgmt_rp_get_adv_size_info rp;
9442 u32 flags, supported_flags;
9445 bt_dev_dbg(hdev, "sock %p", sk);
9447 if (!lmp_le_capable(hdev))
9448 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9449 MGMT_STATUS_REJECTED);
9451 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9452 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9453 MGMT_STATUS_INVALID_PARAMS);
9455 flags = __le32_to_cpu(cp->flags);
9457 /* The current implementation only supports a subset of the specified
9460 supported_flags = get_supported_adv_flags(hdev);
9461 if (flags & ~supported_flags)
9462 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9463 MGMT_STATUS_INVALID_PARAMS);
9465 rp.instance = cp->instance;
9466 rp.flags = cp->flags;
9467 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9468 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9470 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9471 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9476 static const struct hci_mgmt_handler mgmt_handlers[] = {
9477 { NULL }, /* 0x0000 (no command) */
9478 { read_version, MGMT_READ_VERSION_SIZE,
9480 HCI_MGMT_UNTRUSTED },
9481 { read_commands, MGMT_READ_COMMANDS_SIZE,
9483 HCI_MGMT_UNTRUSTED },
9484 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9486 HCI_MGMT_UNTRUSTED },
9487 { read_controller_info, MGMT_READ_INFO_SIZE,
9488 HCI_MGMT_UNTRUSTED },
9489 { set_powered, MGMT_SETTING_SIZE },
9490 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9491 { set_connectable, MGMT_SETTING_SIZE },
9492 { set_fast_connectable, MGMT_SETTING_SIZE },
9493 { set_bondable, MGMT_SETTING_SIZE },
9494 { set_link_security, MGMT_SETTING_SIZE },
9495 { set_ssp, MGMT_SETTING_SIZE },
9496 { set_hs, MGMT_SETTING_SIZE },
9497 { set_le, MGMT_SETTING_SIZE },
9498 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9499 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9500 { add_uuid, MGMT_ADD_UUID_SIZE },
9501 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9502 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9504 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9506 { disconnect, MGMT_DISCONNECT_SIZE },
9507 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9508 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9509 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9510 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9511 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9512 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9513 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9514 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9515 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9516 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9517 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9518 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9519 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9521 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9522 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9523 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9524 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9525 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9526 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9527 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9528 { set_advertising, MGMT_SETTING_SIZE },
9529 { set_bredr, MGMT_SETTING_SIZE },
9530 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9531 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9532 { set_secure_conn, MGMT_SETTING_SIZE },
9533 { set_debug_keys, MGMT_SETTING_SIZE },
9534 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9535 { load_irks, MGMT_LOAD_IRKS_SIZE,
9537 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9538 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9539 { add_device, MGMT_ADD_DEVICE_SIZE },
9540 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9541 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9543 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9545 HCI_MGMT_UNTRUSTED },
9546 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9547 HCI_MGMT_UNCONFIGURED |
9548 HCI_MGMT_UNTRUSTED },
9549 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9550 HCI_MGMT_UNCONFIGURED },
9551 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9552 HCI_MGMT_UNCONFIGURED },
9553 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9555 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9556 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9558 HCI_MGMT_UNTRUSTED },
9559 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9560 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9562 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9563 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9564 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9565 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9566 HCI_MGMT_UNTRUSTED },
9567 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9568 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9569 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9570 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9572 { set_wideband_speech, MGMT_SETTING_SIZE },
9573 { read_security_info, MGMT_READ_SECURITY_INFO_SIZE,
9574 HCI_MGMT_UNTRUSTED },
9575 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9576 HCI_MGMT_UNTRUSTED |
9577 HCI_MGMT_HDEV_OPTIONAL },
9578 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9580 HCI_MGMT_HDEV_OPTIONAL },
9581 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9582 HCI_MGMT_UNTRUSTED },
9583 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9585 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9586 HCI_MGMT_UNTRUSTED },
9587 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9589 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9590 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9591 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9592 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9594 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9598 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
9599 { NULL }, /* 0x0000 (no command) */
9600 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
9601 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
9603 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
9605 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
9606 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
9607 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
9608 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
9609 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
9610 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
9611 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
9612 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
9613 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
9614 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
9615 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
9616 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
9620 void mgmt_index_added(struct hci_dev *hdev)
9622 struct mgmt_ev_ext_index ev;
9624 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9627 switch (hdev->dev_type) {
9629 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9630 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9631 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9634 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9635 HCI_MGMT_INDEX_EVENTS);
9648 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9649 HCI_MGMT_EXT_INDEX_EVENTS);
9652 void mgmt_index_removed(struct hci_dev *hdev)
9654 struct mgmt_ev_ext_index ev;
9655 u8 status = MGMT_STATUS_INVALID_INDEX;
9657 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9660 switch (hdev->dev_type) {
9662 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9664 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9665 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9666 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9669 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9670 HCI_MGMT_INDEX_EVENTS);
9683 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9684 HCI_MGMT_EXT_INDEX_EVENTS);
9687 /* This function requires the caller holds hdev->lock */
9688 static void restart_le_actions(struct hci_dev *hdev)
9690 struct hci_conn_params *p;
9692 list_for_each_entry(p, &hdev->le_conn_params, list) {
9693 /* Needed for AUTO_OFF case where might not "really"
9694 * have been powered off.
9696 list_del_init(&p->action);
9698 switch (p->auto_connect) {
9699 case HCI_AUTO_CONN_DIRECT:
9700 case HCI_AUTO_CONN_ALWAYS:
9701 list_add(&p->action, &hdev->pend_le_conns);
9703 case HCI_AUTO_CONN_REPORT:
9704 list_add(&p->action, &hdev->pend_le_reports);
9712 void mgmt_power_on(struct hci_dev *hdev, int err)
9714 struct cmd_lookup match = { NULL, hdev };
9716 bt_dev_dbg(hdev, "err %d", err);
9721 restart_le_actions(hdev);
9722 hci_update_background_scan(hdev);
9725 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9727 new_settings(hdev, match.sk);
9732 hci_dev_unlock(hdev);
9735 void __mgmt_power_off(struct hci_dev *hdev)
9737 struct cmd_lookup match = { NULL, hdev };
9738 u8 status, zero_cod[] = { 0, 0, 0 };
9740 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9742 /* If the power off is because of hdev unregistration let
9743 * use the appropriate INVALID_INDEX status. Otherwise use
9744 * NOT_POWERED. We cover both scenarios here since later in
9745 * mgmt_index_removed() any hci_conn callbacks will have already
9746 * been triggered, potentially causing misleading DISCONNECTED
9749 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9750 status = MGMT_STATUS_INVALID_INDEX;
9752 status = MGMT_STATUS_NOT_POWERED;
9754 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9756 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9757 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9758 zero_cod, sizeof(zero_cod),
9759 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9760 ext_info_changed(hdev, NULL);
9763 new_settings(hdev, match.sk);
9769 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9771 struct mgmt_pending_cmd *cmd;
9774 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9778 if (err == -ERFKILL)
9779 status = MGMT_STATUS_RFKILLED;
9781 status = MGMT_STATUS_FAILED;
9783 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9785 mgmt_pending_remove(cmd);
9788 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9791 struct mgmt_ev_new_link_key ev;
9793 memset(&ev, 0, sizeof(ev));
9795 ev.store_hint = persistent;
9796 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9797 ev.key.addr.type = BDADDR_BREDR;
9798 ev.key.type = key->type;
9799 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9800 ev.key.pin_len = key->pin_len;
9802 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9805 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9807 switch (ltk->type) {
9810 if (ltk->authenticated)
9811 return MGMT_LTK_AUTHENTICATED;
9812 return MGMT_LTK_UNAUTHENTICATED;
9814 if (ltk->authenticated)
9815 return MGMT_LTK_P256_AUTH;
9816 return MGMT_LTK_P256_UNAUTH;
9817 case SMP_LTK_P256_DEBUG:
9818 return MGMT_LTK_P256_DEBUG;
9821 return MGMT_LTK_UNAUTHENTICATED;
9824 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9826 struct mgmt_ev_new_long_term_key ev;
9828 memset(&ev, 0, sizeof(ev));
9830 /* Devices using resolvable or non-resolvable random addresses
9831 * without providing an identity resolving key don't require
9832 * to store long term keys. Their addresses will change the
9835 * Only when a remote device provides an identity address
9836 * make sure the long term key is stored. If the remote
9837 * identity is known, the long term keys are internally
9838 * mapped to the identity address. So allow static random
9839 * and public addresses here.
9841 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9842 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9843 ev.store_hint = 0x00;
9845 ev.store_hint = persistent;
9847 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9848 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9849 ev.key.type = mgmt_ltk_type(key);
9850 ev.key.enc_size = key->enc_size;
9851 ev.key.ediv = key->ediv;
9852 ev.key.rand = key->rand;
9854 if (key->type == SMP_LTK)
9857 /* Make sure we copy only the significant bytes based on the
9858 * encryption key size, and set the rest of the value to zeroes.
9860 memcpy(ev.key.val, key->val, key->enc_size);
9861 memset(ev.key.val + key->enc_size, 0,
9862 sizeof(ev.key.val) - key->enc_size);
9864 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9867 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9869 struct mgmt_ev_new_irk ev;
9871 memset(&ev, 0, sizeof(ev));
9873 ev.store_hint = persistent;
9875 bacpy(&ev.rpa, &irk->rpa);
9876 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9877 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9878 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9880 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9883 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9886 struct mgmt_ev_new_csrk ev;
9888 memset(&ev, 0, sizeof(ev));
9890 /* Devices using resolvable or non-resolvable random addresses
9891 * without providing an identity resolving key don't require
9892 * to store signature resolving keys. Their addresses will change
9893 * the next time around.
9895 * Only when a remote device provides an identity address
9896 * make sure the signature resolving key is stored. So allow
9897 * static random and public addresses here.
9899 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9900 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9901 ev.store_hint = 0x00;
9903 ev.store_hint = persistent;
9905 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9906 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9907 ev.key.type = csrk->type;
9908 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9910 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9913 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9914 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9915 u16 max_interval, u16 latency, u16 timeout)
9917 struct mgmt_ev_new_conn_param ev;
9919 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9922 memset(&ev, 0, sizeof(ev));
9923 bacpy(&ev.addr.bdaddr, bdaddr);
9924 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9925 ev.store_hint = store_hint;
9926 ev.min_interval = cpu_to_le16(min_interval);
9927 ev.max_interval = cpu_to_le16(max_interval);
9928 ev.latency = cpu_to_le16(latency);
9929 ev.timeout = cpu_to_le16(timeout);
9931 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9934 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9935 u32 flags, u8 *name, u8 name_len)
9938 struct mgmt_ev_device_connected *ev = (void *) buf;
9941 bacpy(&ev->addr.bdaddr, &conn->dst);
9942 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9944 ev->flags = __cpu_to_le32(flags);
9946 /* We must ensure that the EIR Data fields are ordered and
9947 * unique. Keep it simple for now and avoid the problem by not
9948 * adding any BR/EDR data to the LE adv.
9950 if (conn->le_adv_data_len > 0) {
9951 memcpy(&ev->eir[eir_len],
9952 conn->le_adv_data, conn->le_adv_data_len);
9953 eir_len = conn->le_adv_data_len;
9956 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9959 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
9960 eir_len = eir_append_data(ev->eir, eir_len,
9962 conn->dev_class, 3);
9965 ev->eir_len = cpu_to_le16(eir_len);
9967 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
9968 sizeof(*ev) + eir_len, NULL);
9971 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9973 struct sock **sk = data;
9975 cmd->cmd_complete(cmd, 0);
9980 mgmt_pending_remove(cmd);
9983 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9985 struct hci_dev *hdev = data;
9986 struct mgmt_cp_unpair_device *cp = cmd->param;
9988 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9990 cmd->cmd_complete(cmd, 0);
9991 mgmt_pending_remove(cmd);
9994 bool mgmt_powering_down(struct hci_dev *hdev)
9996 struct mgmt_pending_cmd *cmd;
9997 struct mgmt_mode *cp;
9999 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
10010 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
10011 u8 link_type, u8 addr_type, u8 reason,
10012 bool mgmt_connected)
10014 struct mgmt_ev_device_disconnected ev;
10015 struct sock *sk = NULL;
10017 /* The connection is still in hci_conn_hash so test for 1
10018 * instead of 0 to know if this is the last one.
10020 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10021 cancel_delayed_work(&hdev->power_off);
10022 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10025 if (!mgmt_connected)
10028 if (link_type != ACL_LINK && link_type != LE_LINK)
10031 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
10033 bacpy(&ev.addr.bdaddr, bdaddr);
10034 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10035 ev.reason = reason;
10037 /* Report disconnects due to suspend */
10038 if (hdev->suspended)
10039 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
10041 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
10046 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10050 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
10051 u8 link_type, u8 addr_type, u8 status)
10053 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
10054 struct mgmt_cp_disconnect *cp;
10055 struct mgmt_pending_cmd *cmd;
10057 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
10060 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
10066 if (bacmp(bdaddr, &cp->addr.bdaddr))
10069 if (cp->addr.type != bdaddr_type)
10072 cmd->cmd_complete(cmd, mgmt_status(status));
10073 mgmt_pending_remove(cmd);
10076 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10077 u8 addr_type, u8 status)
10079 struct mgmt_ev_connect_failed ev;
10081 /* The connection is still in hci_conn_hash so test for 1
10082 * instead of 0 to know if this is the last one.
10084 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
10085 cancel_delayed_work(&hdev->power_off);
10086 queue_work(hdev->req_workqueue, &hdev->power_off.work);
10089 bacpy(&ev.addr.bdaddr, bdaddr);
10090 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10091 ev.status = mgmt_status(status);
10093 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
10096 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
10098 struct mgmt_ev_pin_code_request ev;
10100 bacpy(&ev.addr.bdaddr, bdaddr);
10101 ev.addr.type = BDADDR_BREDR;
10102 ev.secure = secure;
10104 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
10107 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10110 struct mgmt_pending_cmd *cmd;
10112 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
10116 cmd->cmd_complete(cmd, mgmt_status(status));
10117 mgmt_pending_remove(cmd);
10120 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10123 struct mgmt_pending_cmd *cmd;
10125 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
10129 cmd->cmd_complete(cmd, mgmt_status(status));
10130 mgmt_pending_remove(cmd);
10133 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10134 u8 link_type, u8 addr_type, u32 value,
10137 struct mgmt_ev_user_confirm_request ev;
10139 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10141 bacpy(&ev.addr.bdaddr, bdaddr);
10142 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10143 ev.confirm_hint = confirm_hint;
10144 ev.value = cpu_to_le32(value);
10146 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
10150 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
10151 u8 link_type, u8 addr_type)
10153 struct mgmt_ev_user_passkey_request ev;
10155 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10157 bacpy(&ev.addr.bdaddr, bdaddr);
10158 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10160 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
10164 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10165 u8 link_type, u8 addr_type, u8 status,
10168 struct mgmt_pending_cmd *cmd;
10170 cmd = pending_find(opcode, hdev);
10174 cmd->cmd_complete(cmd, mgmt_status(status));
10175 mgmt_pending_remove(cmd);
10180 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10181 u8 link_type, u8 addr_type, u8 status)
10183 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10184 status, MGMT_OP_USER_CONFIRM_REPLY);
10187 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10188 u8 link_type, u8 addr_type, u8 status)
10190 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10192 MGMT_OP_USER_CONFIRM_NEG_REPLY);
10195 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10196 u8 link_type, u8 addr_type, u8 status)
10198 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10199 status, MGMT_OP_USER_PASSKEY_REPLY);
10202 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10203 u8 link_type, u8 addr_type, u8 status)
10205 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10207 MGMT_OP_USER_PASSKEY_NEG_REPLY);
10210 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
10211 u8 link_type, u8 addr_type, u32 passkey,
10214 struct mgmt_ev_passkey_notify ev;
10216 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10218 bacpy(&ev.addr.bdaddr, bdaddr);
10219 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10220 ev.passkey = __cpu_to_le32(passkey);
10221 ev.entered = entered;
10223 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10226 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10228 struct mgmt_ev_auth_failed ev;
10229 struct mgmt_pending_cmd *cmd;
10230 u8 status = mgmt_status(hci_status);
10232 bacpy(&ev.addr.bdaddr, &conn->dst);
10233 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10234 ev.status = status;
10236 cmd = find_pairing(conn);
10238 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10239 cmd ? cmd->sk : NULL);
10242 cmd->cmd_complete(cmd, status);
10243 mgmt_pending_remove(cmd);
10247 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10249 struct cmd_lookup match = { NULL, hdev };
10253 u8 mgmt_err = mgmt_status(status);
10254 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10255 cmd_status_rsp, &mgmt_err);
10259 if (test_bit(HCI_AUTH, &hdev->flags))
10260 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10262 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10264 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10268 new_settings(hdev, match.sk);
10271 sock_put(match.sk);
10274 static void clear_eir(struct hci_request *req)
10276 struct hci_dev *hdev = req->hdev;
10277 struct hci_cp_write_eir cp;
10279 if (!lmp_ext_inq_capable(hdev))
10282 memset(hdev->eir, 0, sizeof(hdev->eir));
10284 memset(&cp, 0, sizeof(cp));
10286 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
10289 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
10291 struct cmd_lookup match = { NULL, hdev };
10292 struct hci_request req;
10293 bool changed = false;
10296 u8 mgmt_err = mgmt_status(status);
10298 if (enable && hci_dev_test_and_clear_flag(hdev,
10299 HCI_SSP_ENABLED)) {
10300 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
10301 new_settings(hdev, NULL);
10304 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
10310 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
10312 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
10314 changed = hci_dev_test_and_clear_flag(hdev,
10317 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
10320 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
10323 new_settings(hdev, match.sk);
10326 sock_put(match.sk);
10328 hci_req_init(&req, hdev);
10330 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
10331 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
10332 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
10333 sizeof(enable), &enable);
10334 __hci_req_update_eir(&req);
10339 hci_req_run(&req, NULL);
10342 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10344 struct cmd_lookup *match = data;
10346 if (match->sk == NULL) {
10347 match->sk = cmd->sk;
10348 sock_hold(match->sk);
10352 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10355 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10357 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10358 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10359 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10362 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10363 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10364 ext_info_changed(hdev, NULL);
10368 sock_put(match.sk);
10371 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10373 struct mgmt_cp_set_local_name ev;
10374 struct mgmt_pending_cmd *cmd;
10379 memset(&ev, 0, sizeof(ev));
10380 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10381 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10383 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10385 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10387 /* If this is a HCI command related to powering on the
10388 * HCI dev don't send any mgmt signals.
10390 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10394 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10395 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10396 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10399 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10403 for (i = 0; i < uuid_count; i++) {
10404 if (!memcmp(uuid, uuids[i], 16))
10411 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10415 while (parsed < eir_len) {
10416 u8 field_len = eir[0];
10420 if (field_len == 0)
10423 if (eir_len - parsed < field_len + 1)
10427 case EIR_UUID16_ALL:
10428 case EIR_UUID16_SOME:
10429 for (i = 0; i + 3 <= field_len; i += 2) {
10430 memcpy(uuid, bluetooth_base_uuid, 16);
10431 uuid[13] = eir[i + 3];
10432 uuid[12] = eir[i + 2];
10433 if (has_uuid(uuid, uuid_count, uuids))
10437 case EIR_UUID32_ALL:
10438 case EIR_UUID32_SOME:
10439 for (i = 0; i + 5 <= field_len; i += 4) {
10440 memcpy(uuid, bluetooth_base_uuid, 16);
10441 uuid[15] = eir[i + 5];
10442 uuid[14] = eir[i + 4];
10443 uuid[13] = eir[i + 3];
10444 uuid[12] = eir[i + 2];
10445 if (has_uuid(uuid, uuid_count, uuids))
10449 case EIR_UUID128_ALL:
10450 case EIR_UUID128_SOME:
10451 for (i = 0; i + 17 <= field_len; i += 16) {
10452 memcpy(uuid, eir + i + 2, 16);
10453 if (has_uuid(uuid, uuid_count, uuids))
10459 parsed += field_len + 1;
10460 eir += field_len + 1;
10466 static void restart_le_scan(struct hci_dev *hdev)
10468 /* If controller is not scanning we are done. */
10469 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10472 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10473 hdev->discovery.scan_start +
10474 hdev->discovery.scan_duration))
10477 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10478 DISCOV_LE_RESTART_DELAY);
10481 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10482 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10484 /* If a RSSI threshold has been specified, and
10485 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10486 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10487 * is set, let it through for further processing, as we might need to
10488 * restart the scan.
10490 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10491 * the results are also dropped.
10493 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10494 (rssi == HCI_RSSI_INVALID ||
10495 (rssi < hdev->discovery.rssi &&
10496 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10499 if (hdev->discovery.uuid_count != 0) {
10500 /* If a list of UUIDs is provided in filter, results with no
10501 * matching UUID should be dropped.
10503 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10504 hdev->discovery.uuids) &&
10505 !eir_has_uuids(scan_rsp, scan_rsp_len,
10506 hdev->discovery.uuid_count,
10507 hdev->discovery.uuids))
10511 /* If duplicate filtering does not report RSSI changes, then restart
10512 * scanning to ensure updated result with updated RSSI values.
10514 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10515 restart_le_scan(hdev);
10517 /* Validate RSSI value against the RSSI threshold once more. */
10518 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10519 rssi < hdev->discovery.rssi)
10526 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10527 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10528 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10531 struct mgmt_ev_device_found *ev = (void *)buf;
10534 /* Don't send events for a non-kernel initiated discovery. With
10535 * LE one exception is if we have pend_le_reports > 0 in which
10536 * case we're doing passive scanning and want these events.
10538 if (!hci_discovery_active(hdev)) {
10539 if (link_type == ACL_LINK)
10541 if (link_type == LE_LINK &&
10542 list_empty(&hdev->pend_le_reports) &&
10543 !hci_is_adv_monitoring(hdev)) {
10548 if (hdev->discovery.result_filtering) {
10549 /* We are using service discovery */
10550 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10555 if (hdev->discovery.limited) {
10556 /* Check for limited discoverable bit */
10558 if (!(dev_class[1] & 0x20))
10561 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10562 if (!flags || !(flags[0] & LE_AD_LIMITED))
10567 /* Make sure that the buffer is big enough. The 5 extra bytes
10568 * are for the potential CoD field.
10570 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
10573 memset(buf, 0, sizeof(buf));
10575 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10576 * RSSI value was reported as 0 when not available. This behavior
10577 * is kept when using device discovery. This is required for full
10578 * backwards compatibility with the API.
10580 * However when using service discovery, the value 127 will be
10581 * returned when the RSSI is not available.
10583 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10584 link_type == ACL_LINK)
10587 bacpy(&ev->addr.bdaddr, bdaddr);
10588 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10590 ev->flags = cpu_to_le32(flags);
10593 /* Copy EIR or advertising data into event */
10594 memcpy(ev->eir, eir, eir_len);
10596 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
10598 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
10601 if (scan_rsp_len > 0)
10602 /* Append scan response data to event */
10603 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
10605 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10606 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
10608 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
10611 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10612 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10614 struct mgmt_ev_device_found *ev;
10615 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
10618 ev = (struct mgmt_ev_device_found *) buf;
10620 memset(buf, 0, sizeof(buf));
10622 bacpy(&ev->addr.bdaddr, bdaddr);
10623 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10626 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
10629 ev->eir_len = cpu_to_le16(eir_len);
10631 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
10634 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10636 struct mgmt_ev_discovering ev;
10638 bt_dev_dbg(hdev, "discovering %u", discovering);
10640 memset(&ev, 0, sizeof(ev));
10641 ev.type = hdev->discovery.type;
10642 ev.discovering = discovering;
10644 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10647 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10649 struct mgmt_ev_controller_suspend ev;
10651 ev.suspend_state = state;
10652 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10655 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10658 struct mgmt_ev_controller_resume ev;
10660 ev.wake_reason = reason;
10662 bacpy(&ev.addr.bdaddr, bdaddr);
10663 ev.addr.type = addr_type;
10665 memset(&ev.addr, 0, sizeof(ev.addr));
10668 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10671 static struct hci_mgmt_chan chan = {
10672 .channel = HCI_CHANNEL_CONTROL,
10673 .handler_count = ARRAY_SIZE(mgmt_handlers),
10674 .handlers = mgmt_handlers,
10676 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
10677 .tizen_handlers = tizen_mgmt_handlers,
10679 .hdev_init = mgmt_init_hdev,
10682 int mgmt_init(void)
10684 return hci_mgmt_chan_register(&chan);
10687 void mgmt_exit(void)
10689 hci_mgmt_chan_unregister(&chan);