2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
39 #include "hci_request.h"
41 #include "mgmt_util.h"
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 14
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_LINK_SECURITY,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
114 static const u16 mgmt_events[] = {
115 MGMT_EV_CONTROLLER_ERROR,
117 MGMT_EV_INDEX_REMOVED,
118 MGMT_EV_NEW_SETTINGS,
119 MGMT_EV_CLASS_OF_DEV_CHANGED,
120 MGMT_EV_LOCAL_NAME_CHANGED,
121 MGMT_EV_NEW_LINK_KEY,
122 MGMT_EV_NEW_LONG_TERM_KEY,
123 MGMT_EV_DEVICE_CONNECTED,
124 MGMT_EV_DEVICE_DISCONNECTED,
125 MGMT_EV_CONNECT_FAILED,
126 MGMT_EV_PIN_CODE_REQUEST,
127 MGMT_EV_USER_CONFIRM_REQUEST,
128 MGMT_EV_USER_PASSKEY_REQUEST,
130 MGMT_EV_DEVICE_FOUND,
132 MGMT_EV_DEVICE_BLOCKED,
133 MGMT_EV_DEVICE_UNBLOCKED,
134 MGMT_EV_DEVICE_UNPAIRED,
135 MGMT_EV_PASSKEY_NOTIFY,
138 MGMT_EV_DEVICE_ADDED,
139 MGMT_EV_DEVICE_REMOVED,
140 MGMT_EV_NEW_CONN_PARAM,
141 MGMT_EV_UNCONF_INDEX_ADDED,
142 MGMT_EV_UNCONF_INDEX_REMOVED,
143 MGMT_EV_NEW_CONFIG_OPTIONS,
144 MGMT_EV_EXT_INDEX_ADDED,
145 MGMT_EV_EXT_INDEX_REMOVED,
146 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
147 MGMT_EV_ADVERTISING_ADDED,
148 MGMT_EV_ADVERTISING_REMOVED,
149 MGMT_EV_EXT_INFO_CHANGED,
152 static const u16 mgmt_untrusted_commands[] = {
153 MGMT_OP_READ_INDEX_LIST,
155 MGMT_OP_READ_UNCONF_INDEX_LIST,
156 MGMT_OP_READ_CONFIG_INFO,
157 MGMT_OP_READ_EXT_INDEX_LIST,
158 MGMT_OP_READ_EXT_INFO,
161 static const u16 mgmt_untrusted_events[] = {
163 MGMT_EV_INDEX_REMOVED,
164 MGMT_EV_NEW_SETTINGS,
165 MGMT_EV_CLASS_OF_DEV_CHANGED,
166 MGMT_EV_LOCAL_NAME_CHANGED,
167 MGMT_EV_UNCONF_INDEX_ADDED,
168 MGMT_EV_UNCONF_INDEX_REMOVED,
169 MGMT_EV_NEW_CONFIG_OPTIONS,
170 MGMT_EV_EXT_INDEX_ADDED,
171 MGMT_EV_EXT_INDEX_REMOVED,
172 MGMT_EV_EXT_INFO_CHANGED,
175 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
177 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
178 "\x00\x00\x00\x00\x00\x00\x00\x00"
180 /* HCI to MGMT error code conversion table */
181 static u8 mgmt_status_table[] = {
183 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
184 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
185 MGMT_STATUS_FAILED, /* Hardware Failure */
186 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
187 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
188 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
189 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
190 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
191 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
192 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
193 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
194 MGMT_STATUS_BUSY, /* Command Disallowed */
195 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
196 MGMT_STATUS_REJECTED, /* Rejected Security */
197 MGMT_STATUS_REJECTED, /* Rejected Personal */
198 MGMT_STATUS_TIMEOUT, /* Host Timeout */
199 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
200 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
201 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
202 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
203 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
204 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
205 MGMT_STATUS_BUSY, /* Repeated Attempts */
206 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
207 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
208 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
209 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
210 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
211 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
212 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
213 MGMT_STATUS_FAILED, /* Unspecified Error */
214 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
215 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
216 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
217 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
218 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
219 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
220 MGMT_STATUS_FAILED, /* Unit Link Key Used */
221 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
222 MGMT_STATUS_TIMEOUT, /* Instant Passed */
223 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
224 MGMT_STATUS_FAILED, /* Transaction Collision */
225 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
226 MGMT_STATUS_REJECTED, /* QoS Rejected */
227 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
228 MGMT_STATUS_REJECTED, /* Insufficient Security */
229 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
230 MGMT_STATUS_BUSY, /* Role Switch Pending */
231 MGMT_STATUS_FAILED, /* Slot Violation */
232 MGMT_STATUS_FAILED, /* Role Switch Failed */
233 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
234 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
235 MGMT_STATUS_BUSY, /* Host Busy Pairing */
236 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
237 MGMT_STATUS_BUSY, /* Controller Busy */
238 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
239 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
240 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
241 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
242 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
245 static u8 mgmt_status(u8 hci_status)
247 if (hci_status < ARRAY_SIZE(mgmt_status_table))
248 return mgmt_status_table[hci_status];
250 return MGMT_STATUS_FAILED;
253 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
256 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
260 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
261 u16 len, int flag, struct sock *skip_sk)
263 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
267 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
268 struct sock *skip_sk)
270 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
271 HCI_SOCK_TRUSTED, skip_sk);
274 static u8 le_addr_type(u8 mgmt_addr_type)
276 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
277 return ADDR_LE_DEV_PUBLIC;
279 return ADDR_LE_DEV_RANDOM;
282 void mgmt_fill_version_info(void *ver)
284 struct mgmt_rp_read_version *rp = ver;
286 rp->version = MGMT_VERSION;
287 rp->revision = cpu_to_le16(MGMT_REVISION);
290 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
293 struct mgmt_rp_read_version rp;
295 BT_DBG("sock %p", sk);
297 mgmt_fill_version_info(&rp);
299 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
303 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
306 struct mgmt_rp_read_commands *rp;
307 u16 num_commands, num_events;
311 BT_DBG("sock %p", sk);
313 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
314 num_commands = ARRAY_SIZE(mgmt_commands);
315 num_events = ARRAY_SIZE(mgmt_events);
317 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
318 num_events = ARRAY_SIZE(mgmt_untrusted_events);
321 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
323 rp = kmalloc(rp_size, GFP_KERNEL);
327 rp->num_commands = cpu_to_le16(num_commands);
328 rp->num_events = cpu_to_le16(num_events);
330 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
331 __le16 *opcode = rp->opcodes;
333 for (i = 0; i < num_commands; i++, opcode++)
334 put_unaligned_le16(mgmt_commands[i], opcode);
336 for (i = 0; i < num_events; i++, opcode++)
337 put_unaligned_le16(mgmt_events[i], opcode);
339 __le16 *opcode = rp->opcodes;
341 for (i = 0; i < num_commands; i++, opcode++)
342 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
344 for (i = 0; i < num_events; i++, opcode++)
345 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
348 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
355 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
358 struct mgmt_rp_read_index_list *rp;
364 BT_DBG("sock %p", sk);
366 read_lock(&hci_dev_list_lock);
369 list_for_each_entry(d, &hci_dev_list, list) {
370 if (d->dev_type == HCI_PRIMARY &&
371 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
375 rp_len = sizeof(*rp) + (2 * count);
376 rp = kmalloc(rp_len, GFP_ATOMIC);
378 read_unlock(&hci_dev_list_lock);
383 list_for_each_entry(d, &hci_dev_list, list) {
384 if (hci_dev_test_flag(d, HCI_SETUP) ||
385 hci_dev_test_flag(d, HCI_CONFIG) ||
386 hci_dev_test_flag(d, HCI_USER_CHANNEL))
389 /* Devices marked as raw-only are neither configured
390 * nor unconfigured controllers.
392 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
395 if (d->dev_type == HCI_PRIMARY &&
396 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
397 rp->index[count++] = cpu_to_le16(d->id);
398 BT_DBG("Added hci%u", d->id);
402 rp->num_controllers = cpu_to_le16(count);
403 rp_len = sizeof(*rp) + (2 * count);
405 read_unlock(&hci_dev_list_lock);
407 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
415 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
416 void *data, u16 data_len)
418 struct mgmt_rp_read_unconf_index_list *rp;
424 BT_DBG("sock %p", sk);
426 read_lock(&hci_dev_list_lock);
429 list_for_each_entry(d, &hci_dev_list, list) {
430 if (d->dev_type == HCI_PRIMARY &&
431 hci_dev_test_flag(d, HCI_UNCONFIGURED))
435 rp_len = sizeof(*rp) + (2 * count);
436 rp = kmalloc(rp_len, GFP_ATOMIC);
438 read_unlock(&hci_dev_list_lock);
443 list_for_each_entry(d, &hci_dev_list, list) {
444 if (hci_dev_test_flag(d, HCI_SETUP) ||
445 hci_dev_test_flag(d, HCI_CONFIG) ||
446 hci_dev_test_flag(d, HCI_USER_CHANNEL))
449 /* Devices marked as raw-only are neither configured
450 * nor unconfigured controllers.
452 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
455 if (d->dev_type == HCI_PRIMARY &&
456 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
457 rp->index[count++] = cpu_to_le16(d->id);
458 BT_DBG("Added hci%u", d->id);
462 rp->num_controllers = cpu_to_le16(count);
463 rp_len = sizeof(*rp) + (2 * count);
465 read_unlock(&hci_dev_list_lock);
467 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
468 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
475 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
476 void *data, u16 data_len)
478 struct mgmt_rp_read_ext_index_list *rp;
483 BT_DBG("sock %p", sk);
485 read_lock(&hci_dev_list_lock);
488 list_for_each_entry(d, &hci_dev_list, list) {
489 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
493 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
495 read_unlock(&hci_dev_list_lock);
500 list_for_each_entry(d, &hci_dev_list, list) {
501 if (hci_dev_test_flag(d, HCI_SETUP) ||
502 hci_dev_test_flag(d, HCI_CONFIG) ||
503 hci_dev_test_flag(d, HCI_USER_CHANNEL))
506 /* Devices marked as raw-only are neither configured
507 * nor unconfigured controllers.
509 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
512 if (d->dev_type == HCI_PRIMARY) {
513 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
514 rp->entry[count].type = 0x01;
516 rp->entry[count].type = 0x00;
517 } else if (d->dev_type == HCI_AMP) {
518 rp->entry[count].type = 0x02;
523 rp->entry[count].bus = d->bus;
524 rp->entry[count++].index = cpu_to_le16(d->id);
525 BT_DBG("Added hci%u", d->id);
528 rp->num_controllers = cpu_to_le16(count);
530 read_unlock(&hci_dev_list_lock);
532 /* If this command is called at least once, then all the
533 * default index and unconfigured index events are disabled
534 * and from now on only extended index events are used.
536 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
537 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
538 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
540 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
541 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
542 struct_size(rp, entry, count));
549 static bool is_configured(struct hci_dev *hdev)
551 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
552 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
555 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
556 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
557 !bacmp(&hdev->public_addr, BDADDR_ANY))
563 static __le32 get_missing_options(struct hci_dev *hdev)
567 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
568 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
569 options |= MGMT_OPTION_EXTERNAL_CONFIG;
571 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
572 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
573 !bacmp(&hdev->public_addr, BDADDR_ANY))
574 options |= MGMT_OPTION_PUBLIC_ADDRESS;
576 return cpu_to_le32(options);
579 static int new_options(struct hci_dev *hdev, struct sock *skip)
581 __le32 options = get_missing_options(hdev);
583 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
584 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
587 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
589 __le32 options = get_missing_options(hdev);
591 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
595 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
596 void *data, u16 data_len)
598 struct mgmt_rp_read_config_info rp;
601 BT_DBG("sock %p %s", sk, hdev->name);
605 memset(&rp, 0, sizeof(rp));
606 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
608 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
609 options |= MGMT_OPTION_EXTERNAL_CONFIG;
611 if (hdev->set_bdaddr)
612 options |= MGMT_OPTION_PUBLIC_ADDRESS;
614 rp.supported_options = cpu_to_le32(options);
615 rp.missing_options = get_missing_options(hdev);
617 hci_dev_unlock(hdev);
619 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
623 static u32 get_supported_phys(struct hci_dev *hdev)
625 u32 supported_phys = 0;
627 if (lmp_bredr_capable(hdev)) {
628 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
630 if (hdev->features[0][0] & LMP_3SLOT)
631 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
633 if (hdev->features[0][0] & LMP_5SLOT)
634 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
636 if (lmp_edr_2m_capable(hdev)) {
637 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
639 if (lmp_edr_3slot_capable(hdev))
640 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
642 if (lmp_edr_5slot_capable(hdev))
643 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
645 if (lmp_edr_3m_capable(hdev)) {
646 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
648 if (lmp_edr_3slot_capable(hdev))
649 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
651 if (lmp_edr_5slot_capable(hdev))
652 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
657 if (lmp_le_capable(hdev)) {
658 supported_phys |= MGMT_PHY_LE_1M_TX;
659 supported_phys |= MGMT_PHY_LE_1M_RX;
661 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
662 supported_phys |= MGMT_PHY_LE_2M_TX;
663 supported_phys |= MGMT_PHY_LE_2M_RX;
666 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
667 supported_phys |= MGMT_PHY_LE_CODED_TX;
668 supported_phys |= MGMT_PHY_LE_CODED_RX;
672 return supported_phys;
675 static u32 get_selected_phys(struct hci_dev *hdev)
677 u32 selected_phys = 0;
679 if (lmp_bredr_capable(hdev)) {
680 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
682 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
683 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
685 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
686 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
688 if (lmp_edr_2m_capable(hdev)) {
689 if (!(hdev->pkt_type & HCI_2DH1))
690 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
692 if (lmp_edr_3slot_capable(hdev) &&
693 !(hdev->pkt_type & HCI_2DH3))
694 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
696 if (lmp_edr_5slot_capable(hdev) &&
697 !(hdev->pkt_type & HCI_2DH5))
698 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
700 if (lmp_edr_3m_capable(hdev)) {
701 if (!(hdev->pkt_type & HCI_3DH1))
702 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
704 if (lmp_edr_3slot_capable(hdev) &&
705 !(hdev->pkt_type & HCI_3DH3))
706 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
708 if (lmp_edr_5slot_capable(hdev) &&
709 !(hdev->pkt_type & HCI_3DH5))
710 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
715 if (lmp_le_capable(hdev)) {
716 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
717 selected_phys |= MGMT_PHY_LE_1M_TX;
719 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
720 selected_phys |= MGMT_PHY_LE_1M_RX;
722 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
723 selected_phys |= MGMT_PHY_LE_2M_TX;
725 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
726 selected_phys |= MGMT_PHY_LE_2M_RX;
728 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
729 selected_phys |= MGMT_PHY_LE_CODED_TX;
731 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
732 selected_phys |= MGMT_PHY_LE_CODED_RX;
735 return selected_phys;
738 static u32 get_configurable_phys(struct hci_dev *hdev)
740 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
741 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
744 static u32 get_supported_settings(struct hci_dev *hdev)
748 settings |= MGMT_SETTING_POWERED;
749 settings |= MGMT_SETTING_BONDABLE;
750 settings |= MGMT_SETTING_DEBUG_KEYS;
751 settings |= MGMT_SETTING_CONNECTABLE;
752 settings |= MGMT_SETTING_DISCOVERABLE;
754 if (lmp_bredr_capable(hdev)) {
755 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
756 settings |= MGMT_SETTING_FAST_CONNECTABLE;
757 settings |= MGMT_SETTING_BREDR;
758 settings |= MGMT_SETTING_LINK_SECURITY;
760 if (lmp_ssp_capable(hdev)) {
761 settings |= MGMT_SETTING_SSP;
762 settings |= MGMT_SETTING_HS;
765 if (lmp_sc_capable(hdev))
766 settings |= MGMT_SETTING_SECURE_CONN;
769 if (lmp_le_capable(hdev)) {
770 settings |= MGMT_SETTING_LE;
771 settings |= MGMT_SETTING_ADVERTISING;
772 settings |= MGMT_SETTING_SECURE_CONN;
773 settings |= MGMT_SETTING_PRIVACY;
774 settings |= MGMT_SETTING_STATIC_ADDRESS;
777 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
779 settings |= MGMT_SETTING_CONFIGURATION;
781 settings |= MGMT_SETTING_PHY_CONFIGURATION;
786 static u32 get_current_settings(struct hci_dev *hdev)
790 if (hdev_is_powered(hdev))
791 settings |= MGMT_SETTING_POWERED;
793 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
794 settings |= MGMT_SETTING_CONNECTABLE;
796 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
797 settings |= MGMT_SETTING_FAST_CONNECTABLE;
799 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
800 settings |= MGMT_SETTING_DISCOVERABLE;
802 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
803 settings |= MGMT_SETTING_BONDABLE;
805 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
806 settings |= MGMT_SETTING_BREDR;
808 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
809 settings |= MGMT_SETTING_LE;
811 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
812 settings |= MGMT_SETTING_LINK_SECURITY;
814 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
815 settings |= MGMT_SETTING_SSP;
817 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
818 settings |= MGMT_SETTING_HS;
820 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
821 settings |= MGMT_SETTING_ADVERTISING;
823 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
824 settings |= MGMT_SETTING_SECURE_CONN;
826 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
827 settings |= MGMT_SETTING_DEBUG_KEYS;
829 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
830 settings |= MGMT_SETTING_PRIVACY;
832 /* The current setting for static address has two purposes. The
833 * first is to indicate if the static address will be used and
834 * the second is to indicate if it is actually set.
836 * This means if the static address is not configured, this flag
837 * will never be set. If the address is configured, then if the
838 * address is actually used decides if the flag is set or not.
840 * For single mode LE only controllers and dual-mode controllers
841 * with BR/EDR disabled, the existence of the static address will
844 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
845 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
846 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
847 if (bacmp(&hdev->static_addr, BDADDR_ANY))
848 settings |= MGMT_SETTING_STATIC_ADDRESS;
854 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
856 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
859 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
860 struct hci_dev *hdev,
863 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
866 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
868 struct mgmt_pending_cmd *cmd;
870 /* If there's a pending mgmt command the flags will not yet have
871 * their final values, so check for this first.
873 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
875 struct mgmt_mode *cp = cmd->param;
877 return LE_AD_GENERAL;
878 else if (cp->val == 0x02)
879 return LE_AD_LIMITED;
881 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
882 return LE_AD_LIMITED;
883 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
884 return LE_AD_GENERAL;
890 bool mgmt_get_connectable(struct hci_dev *hdev)
892 struct mgmt_pending_cmd *cmd;
894 /* If there's a pending mgmt command the flag will not yet have
895 * it's final value, so check for this first.
897 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
899 struct mgmt_mode *cp = cmd->param;
904 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
907 static void service_cache_off(struct work_struct *work)
909 struct hci_dev *hdev = container_of(work, struct hci_dev,
911 struct hci_request req;
913 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
916 hci_req_init(&req, hdev);
920 __hci_req_update_eir(&req);
921 __hci_req_update_class(&req);
923 hci_dev_unlock(hdev);
925 hci_req_run(&req, NULL);
928 static void rpa_expired(struct work_struct *work)
930 struct hci_dev *hdev = container_of(work, struct hci_dev,
932 struct hci_request req;
936 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
938 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
941 /* The generation of a new RPA and programming it into the
942 * controller happens in the hci_req_enable_advertising()
945 hci_req_init(&req, hdev);
946 if (ext_adv_capable(hdev))
947 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
949 __hci_req_enable_advertising(&req);
950 hci_req_run(&req, NULL);
953 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
955 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
958 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
959 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
961 /* Non-mgmt controlled devices get this bit set
962 * implicitly so that pairing works for them, however
963 * for mgmt we require user-space to explicitly enable
966 hci_dev_clear_flag(hdev, HCI_BONDABLE);
969 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
970 void *data, u16 data_len)
972 struct mgmt_rp_read_info rp;
974 BT_DBG("sock %p %s", sk, hdev->name);
978 memset(&rp, 0, sizeof(rp));
980 bacpy(&rp.bdaddr, &hdev->bdaddr);
982 rp.version = hdev->hci_ver;
983 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
985 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
986 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
988 memcpy(rp.dev_class, hdev->dev_class, 3);
990 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
991 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
993 hci_dev_unlock(hdev);
995 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
999 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1004 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1005 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1006 hdev->dev_class, 3);
1008 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1009 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1012 name_len = strlen(hdev->dev_name);
1013 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1014 hdev->dev_name, name_len);
1016 name_len = strlen(hdev->short_name);
1017 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1018 hdev->short_name, name_len);
1023 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1024 void *data, u16 data_len)
1027 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1030 BT_DBG("sock %p %s", sk, hdev->name);
1032 memset(&buf, 0, sizeof(buf));
1036 bacpy(&rp->bdaddr, &hdev->bdaddr);
1038 rp->version = hdev->hci_ver;
1039 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1041 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1042 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1045 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1046 rp->eir_len = cpu_to_le16(eir_len);
1048 hci_dev_unlock(hdev);
1050 /* If this command is called at least once, then the events
1051 * for class of device and local name changes are disabled
1052 * and only the new extended controller information event
1055 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1056 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1057 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1059 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1060 sizeof(*rp) + eir_len);
1063 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1066 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1069 memset(buf, 0, sizeof(buf));
1071 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1072 ev->eir_len = cpu_to_le16(eir_len);
1074 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1075 sizeof(*ev) + eir_len,
1076 HCI_MGMT_EXT_INFO_EVENTS, skip);
1079 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1081 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1083 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1087 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1089 BT_DBG("%s status 0x%02x", hdev->name, status);
1091 if (hci_conn_count(hdev) == 0) {
1092 cancel_delayed_work(&hdev->power_off);
1093 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1097 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1099 struct mgmt_ev_advertising_added ev;
1101 ev.instance = instance;
1103 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1106 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1109 struct mgmt_ev_advertising_removed ev;
1111 ev.instance = instance;
1113 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1116 static void cancel_adv_timeout(struct hci_dev *hdev)
1118 if (hdev->adv_instance_timeout) {
1119 hdev->adv_instance_timeout = 0;
1120 cancel_delayed_work(&hdev->adv_instance_expire);
1124 static int clean_up_hci_state(struct hci_dev *hdev)
1126 struct hci_request req;
1127 struct hci_conn *conn;
1128 bool discov_stopped;
1131 hci_req_init(&req, hdev);
1133 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1134 test_bit(HCI_PSCAN, &hdev->flags)) {
1136 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1139 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1141 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1142 __hci_req_disable_advertising(&req);
1144 discov_stopped = hci_req_stop_discovery(&req);
1146 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1147 /* 0x15 == Terminated due to Power Off */
1148 __hci_abort_conn(&req, conn, 0x15);
1151 err = hci_req_run(&req, clean_up_hci_complete);
1152 if (!err && discov_stopped)
1153 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1158 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1161 struct mgmt_mode *cp = data;
1162 struct mgmt_pending_cmd *cmd;
1165 BT_DBG("request for %s", hdev->name);
1167 if (cp->val != 0x00 && cp->val != 0x01)
1168 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1169 MGMT_STATUS_INVALID_PARAMS);
1173 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1174 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1179 if (!!cp->val == hdev_is_powered(hdev)) {
1180 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1184 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1191 queue_work(hdev->req_workqueue, &hdev->power_on);
1194 /* Disconnect connections, stop scans, etc */
1195 err = clean_up_hci_state(hdev);
1197 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1198 HCI_POWER_OFF_TIMEOUT);
1200 /* ENODATA means there were no HCI commands queued */
1201 if (err == -ENODATA) {
1202 cancel_delayed_work(&hdev->power_off);
1203 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1209 hci_dev_unlock(hdev);
1213 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1215 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1217 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1218 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1221 int mgmt_new_settings(struct hci_dev *hdev)
1223 return new_settings(hdev, NULL);
1228 struct hci_dev *hdev;
1232 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1234 struct cmd_lookup *match = data;
1236 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1238 list_del(&cmd->list);
1240 if (match->sk == NULL) {
1241 match->sk = cmd->sk;
1242 sock_hold(match->sk);
1245 mgmt_pending_free(cmd);
1248 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1252 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1253 mgmt_pending_remove(cmd);
1256 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1258 if (cmd->cmd_complete) {
1261 cmd->cmd_complete(cmd, *status);
1262 mgmt_pending_remove(cmd);
1267 cmd_status_rsp(cmd, data);
1270 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1272 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1273 cmd->param, cmd->param_len);
1276 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1278 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1279 cmd->param, sizeof(struct mgmt_addr_info));
1282 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1284 if (!lmp_bredr_capable(hdev))
1285 return MGMT_STATUS_NOT_SUPPORTED;
1286 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1287 return MGMT_STATUS_REJECTED;
1289 return MGMT_STATUS_SUCCESS;
1292 static u8 mgmt_le_support(struct hci_dev *hdev)
1294 if (!lmp_le_capable(hdev))
1295 return MGMT_STATUS_NOT_SUPPORTED;
1296 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1297 return MGMT_STATUS_REJECTED;
1299 return MGMT_STATUS_SUCCESS;
1302 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1304 struct mgmt_pending_cmd *cmd;
1306 BT_DBG("status 0x%02x", status);
1310 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1315 u8 mgmt_err = mgmt_status(status);
1316 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1317 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1321 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1322 hdev->discov_timeout > 0) {
1323 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1324 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1327 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1328 new_settings(hdev, cmd->sk);
1331 mgmt_pending_remove(cmd);
1334 hci_dev_unlock(hdev);
1337 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1340 struct mgmt_cp_set_discoverable *cp = data;
1341 struct mgmt_pending_cmd *cmd;
1345 BT_DBG("request for %s", hdev->name);
1347 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1348 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1349 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1350 MGMT_STATUS_REJECTED);
1352 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1353 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1354 MGMT_STATUS_INVALID_PARAMS);
1356 timeout = __le16_to_cpu(cp->timeout);
1358 /* Disabling discoverable requires that no timeout is set,
1359 * and enabling limited discoverable requires a timeout.
1361 if ((cp->val == 0x00 && timeout > 0) ||
1362 (cp->val == 0x02 && timeout == 0))
1363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1364 MGMT_STATUS_INVALID_PARAMS);
1368 if (!hdev_is_powered(hdev) && timeout > 0) {
1369 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1370 MGMT_STATUS_NOT_POWERED);
1374 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1375 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1376 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1381 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1382 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1383 MGMT_STATUS_REJECTED);
1387 if (!hdev_is_powered(hdev)) {
1388 bool changed = false;
1390 /* Setting limited discoverable when powered off is
1391 * not a valid operation since it requires a timeout
1392 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1394 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1395 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1399 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1404 err = new_settings(hdev, sk);
1409 /* If the current mode is the same, then just update the timeout
1410 * value with the new value. And if only the timeout gets updated,
1411 * then no need for any HCI transactions.
1413 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1414 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1415 HCI_LIMITED_DISCOVERABLE)) {
1416 cancel_delayed_work(&hdev->discov_off);
1417 hdev->discov_timeout = timeout;
1419 if (cp->val && hdev->discov_timeout > 0) {
1420 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1421 queue_delayed_work(hdev->req_workqueue,
1422 &hdev->discov_off, to);
1425 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1429 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1435 /* Cancel any potential discoverable timeout that might be
1436 * still active and store new timeout value. The arming of
1437 * the timeout happens in the complete handler.
1439 cancel_delayed_work(&hdev->discov_off);
1440 hdev->discov_timeout = timeout;
1443 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1445 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1447 /* Limited discoverable mode */
1448 if (cp->val == 0x02)
1449 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1451 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1453 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1457 hci_dev_unlock(hdev);
1461 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1463 struct mgmt_pending_cmd *cmd;
1465 BT_DBG("status 0x%02x", status);
1469 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1474 u8 mgmt_err = mgmt_status(status);
1475 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1479 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1480 new_settings(hdev, cmd->sk);
1483 mgmt_pending_remove(cmd);
1486 hci_dev_unlock(hdev);
1489 static int set_connectable_update_settings(struct hci_dev *hdev,
1490 struct sock *sk, u8 val)
1492 bool changed = false;
1495 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1499 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1501 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1502 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1505 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1510 hci_req_update_scan(hdev);
1511 hci_update_background_scan(hdev);
1512 return new_settings(hdev, sk);
1518 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1521 struct mgmt_mode *cp = data;
1522 struct mgmt_pending_cmd *cmd;
1525 BT_DBG("request for %s", hdev->name);
1527 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1528 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1529 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1530 MGMT_STATUS_REJECTED);
1532 if (cp->val != 0x00 && cp->val != 0x01)
1533 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1534 MGMT_STATUS_INVALID_PARAMS);
1538 if (!hdev_is_powered(hdev)) {
1539 err = set_connectable_update_settings(hdev, sk, cp->val);
1543 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1544 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1545 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1550 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1557 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1559 if (hdev->discov_timeout > 0)
1560 cancel_delayed_work(&hdev->discov_off);
1562 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1563 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1564 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1567 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1571 hci_dev_unlock(hdev);
1575 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1578 struct mgmt_mode *cp = data;
1582 BT_DBG("request for %s", hdev->name);
1584 if (cp->val != 0x00 && cp->val != 0x01)
1585 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1586 MGMT_STATUS_INVALID_PARAMS);
1591 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1593 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1595 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1600 /* In limited privacy mode the change of bondable mode
1601 * may affect the local advertising address.
1603 if (hdev_is_powered(hdev) &&
1604 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1605 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1606 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1607 queue_work(hdev->req_workqueue,
1608 &hdev->discoverable_update);
1610 err = new_settings(hdev, sk);
1614 hci_dev_unlock(hdev);
1618 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1621 struct mgmt_mode *cp = data;
1622 struct mgmt_pending_cmd *cmd;
1626 BT_DBG("request for %s", hdev->name);
1628 status = mgmt_bredr_support(hdev);
1630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1633 if (cp->val != 0x00 && cp->val != 0x01)
1634 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1635 MGMT_STATUS_INVALID_PARAMS);
1639 if (!hdev_is_powered(hdev)) {
1640 bool changed = false;
1642 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1643 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1647 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1652 err = new_settings(hdev, sk);
1657 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1665 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1666 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1670 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1676 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1678 mgmt_pending_remove(cmd);
1683 hci_dev_unlock(hdev);
1687 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1689 struct mgmt_mode *cp = data;
1690 struct mgmt_pending_cmd *cmd;
1694 BT_DBG("request for %s", hdev->name);
1696 status = mgmt_bredr_support(hdev);
1698 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1700 if (!lmp_ssp_capable(hdev))
1701 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1702 MGMT_STATUS_NOT_SUPPORTED);
1704 if (cp->val != 0x00 && cp->val != 0x01)
1705 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1706 MGMT_STATUS_INVALID_PARAMS);
1710 if (!hdev_is_powered(hdev)) {
1714 changed = !hci_dev_test_and_set_flag(hdev,
1717 changed = hci_dev_test_and_clear_flag(hdev,
1720 changed = hci_dev_test_and_clear_flag(hdev,
1723 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1726 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1731 err = new_settings(hdev, sk);
1736 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1737 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1742 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1743 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1747 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1753 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1754 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1755 sizeof(cp->val), &cp->val);
1757 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1759 mgmt_pending_remove(cmd);
1764 hci_dev_unlock(hdev);
1768 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1770 struct mgmt_mode *cp = data;
1775 BT_DBG("request for %s", hdev->name);
1777 status = mgmt_bredr_support(hdev);
1779 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1781 if (!lmp_ssp_capable(hdev))
1782 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1783 MGMT_STATUS_NOT_SUPPORTED);
1785 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1786 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1787 MGMT_STATUS_REJECTED);
1789 if (cp->val != 0x00 && cp->val != 0x01)
1790 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1791 MGMT_STATUS_INVALID_PARAMS);
1795 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1796 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1802 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1804 if (hdev_is_powered(hdev)) {
1805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1806 MGMT_STATUS_REJECTED);
1810 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1813 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1818 err = new_settings(hdev, sk);
1821 hci_dev_unlock(hdev);
1825 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1827 struct cmd_lookup match = { NULL, hdev };
1832 u8 mgmt_err = mgmt_status(status);
1834 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1839 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1841 new_settings(hdev, match.sk);
1846 /* Make sure the controller has a good default for
1847 * advertising data. Restrict the update to when LE
1848 * has actually been enabled. During power on, the
1849 * update in powered_update_hci will take care of it.
1851 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1852 struct hci_request req;
1853 hci_req_init(&req, hdev);
1854 if (ext_adv_capable(hdev)) {
1857 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1859 __hci_req_update_scan_rsp_data(&req, 0x00);
1861 __hci_req_update_adv_data(&req, 0x00);
1862 __hci_req_update_scan_rsp_data(&req, 0x00);
1864 hci_req_run(&req, NULL);
1865 hci_update_background_scan(hdev);
1869 hci_dev_unlock(hdev);
1872 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1874 struct mgmt_mode *cp = data;
1875 struct hci_cp_write_le_host_supported hci_cp;
1876 struct mgmt_pending_cmd *cmd;
1877 struct hci_request req;
1881 BT_DBG("request for %s", hdev->name);
1883 if (!lmp_le_capable(hdev))
1884 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1885 MGMT_STATUS_NOT_SUPPORTED);
1887 if (cp->val != 0x00 && cp->val != 0x01)
1888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1889 MGMT_STATUS_INVALID_PARAMS);
1891 /* Bluetooth single mode LE only controllers or dual-mode
1892 * controllers configured as LE only devices, do not allow
1893 * switching LE off. These have either LE enabled explicitly
1894 * or BR/EDR has been previously switched off.
1896 * When trying to enable an already enabled LE, then gracefully
1897 * send a positive response. Trying to disable it however will
1898 * result into rejection.
1900 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1901 if (cp->val == 0x01)
1902 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1904 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1905 MGMT_STATUS_REJECTED);
1911 enabled = lmp_host_le_capable(hdev);
1914 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1916 if (!hdev_is_powered(hdev) || val == enabled) {
1917 bool changed = false;
1919 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1920 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1924 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1925 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1929 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1934 err = new_settings(hdev, sk);
1939 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1940 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1941 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1946 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1952 hci_req_init(&req, hdev);
1954 memset(&hci_cp, 0, sizeof(hci_cp));
1958 hci_cp.simul = 0x00;
1960 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1961 __hci_req_disable_advertising(&req);
1963 if (ext_adv_capable(hdev))
1964 __hci_req_clear_ext_adv_sets(&req);
1967 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1970 err = hci_req_run(&req, le_enable_complete);
1972 mgmt_pending_remove(cmd);
1975 hci_dev_unlock(hdev);
1979 /* This is a helper function to test for pending mgmt commands that can
1980 * cause CoD or EIR HCI commands. We can only allow one such pending
1981 * mgmt command at a time since otherwise we cannot easily track what
1982 * the current values are, will be, and based on that calculate if a new
1983 * HCI command needs to be sent and if yes with what value.
1985 static bool pending_eir_or_class(struct hci_dev *hdev)
1987 struct mgmt_pending_cmd *cmd;
1989 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1990 switch (cmd->opcode) {
1991 case MGMT_OP_ADD_UUID:
1992 case MGMT_OP_REMOVE_UUID:
1993 case MGMT_OP_SET_DEV_CLASS:
1994 case MGMT_OP_SET_POWERED:
2002 static const u8 bluetooth_base_uuid[] = {
2003 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2004 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2007 static u8 get_uuid_size(const u8 *uuid)
2011 if (memcmp(uuid, bluetooth_base_uuid, 12))
2014 val = get_unaligned_le32(&uuid[12]);
2021 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2023 struct mgmt_pending_cmd *cmd;
2027 cmd = pending_find(mgmt_op, hdev);
2031 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2032 mgmt_status(status), hdev->dev_class, 3);
2034 mgmt_pending_remove(cmd);
2037 hci_dev_unlock(hdev);
2040 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2042 BT_DBG("status 0x%02x", status);
2044 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2047 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2049 struct mgmt_cp_add_uuid *cp = data;
2050 struct mgmt_pending_cmd *cmd;
2051 struct hci_request req;
2052 struct bt_uuid *uuid;
2055 BT_DBG("request for %s", hdev->name);
2059 if (pending_eir_or_class(hdev)) {
2060 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2065 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2071 memcpy(uuid->uuid, cp->uuid, 16);
2072 uuid->svc_hint = cp->svc_hint;
2073 uuid->size = get_uuid_size(cp->uuid);
2075 list_add_tail(&uuid->list, &hdev->uuids);
2077 hci_req_init(&req, hdev);
2079 __hci_req_update_class(&req);
2080 __hci_req_update_eir(&req);
2082 err = hci_req_run(&req, add_uuid_complete);
2084 if (err != -ENODATA)
2087 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2088 hdev->dev_class, 3);
2092 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2101 hci_dev_unlock(hdev);
2105 static bool enable_service_cache(struct hci_dev *hdev)
2107 if (!hdev_is_powered(hdev))
2110 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2111 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2119 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2121 BT_DBG("status 0x%02x", status);
2123 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2126 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2129 struct mgmt_cp_remove_uuid *cp = data;
2130 struct mgmt_pending_cmd *cmd;
2131 struct bt_uuid *match, *tmp;
2132 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2133 struct hci_request req;
2136 BT_DBG("request for %s", hdev->name);
2140 if (pending_eir_or_class(hdev)) {
2141 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2146 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2147 hci_uuids_clear(hdev);
2149 if (enable_service_cache(hdev)) {
2150 err = mgmt_cmd_complete(sk, hdev->id,
2151 MGMT_OP_REMOVE_UUID,
2152 0, hdev->dev_class, 3);
2161 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2162 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2165 list_del(&match->list);
2171 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2172 MGMT_STATUS_INVALID_PARAMS);
2177 hci_req_init(&req, hdev);
2179 __hci_req_update_class(&req);
2180 __hci_req_update_eir(&req);
2182 err = hci_req_run(&req, remove_uuid_complete);
2184 if (err != -ENODATA)
2187 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2188 hdev->dev_class, 3);
2192 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2201 hci_dev_unlock(hdev);
2205 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2207 BT_DBG("status 0x%02x", status);
2209 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2212 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2215 struct mgmt_cp_set_dev_class *cp = data;
2216 struct mgmt_pending_cmd *cmd;
2217 struct hci_request req;
2220 BT_DBG("request for %s", hdev->name);
2222 if (!lmp_bredr_capable(hdev))
2223 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2224 MGMT_STATUS_NOT_SUPPORTED);
2228 if (pending_eir_or_class(hdev)) {
2229 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2234 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2235 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2236 MGMT_STATUS_INVALID_PARAMS);
2240 hdev->major_class = cp->major;
2241 hdev->minor_class = cp->minor;
2243 if (!hdev_is_powered(hdev)) {
2244 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2245 hdev->dev_class, 3);
2249 hci_req_init(&req, hdev);
2251 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2252 hci_dev_unlock(hdev);
2253 cancel_delayed_work_sync(&hdev->service_cache);
2255 __hci_req_update_eir(&req);
2258 __hci_req_update_class(&req);
2260 err = hci_req_run(&req, set_class_complete);
2262 if (err != -ENODATA)
2265 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2266 hdev->dev_class, 3);
2270 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2279 hci_dev_unlock(hdev);
2283 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2286 struct mgmt_cp_load_link_keys *cp = data;
2287 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2288 sizeof(struct mgmt_link_key_info));
2289 u16 key_count, expected_len;
2293 BT_DBG("request for %s", hdev->name);
2295 if (!lmp_bredr_capable(hdev))
2296 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2297 MGMT_STATUS_NOT_SUPPORTED);
2299 key_count = __le16_to_cpu(cp->key_count);
2300 if (key_count > max_key_count) {
2301 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2303 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2304 MGMT_STATUS_INVALID_PARAMS);
2307 expected_len = struct_size(cp, keys, key_count);
2308 if (expected_len != len) {
2309 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2311 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2312 MGMT_STATUS_INVALID_PARAMS);
2315 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2317 MGMT_STATUS_INVALID_PARAMS);
2319 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2322 for (i = 0; i < key_count; i++) {
2323 struct mgmt_link_key_info *key = &cp->keys[i];
2325 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2326 return mgmt_cmd_status(sk, hdev->id,
2327 MGMT_OP_LOAD_LINK_KEYS,
2328 MGMT_STATUS_INVALID_PARAMS);
2333 hci_link_keys_clear(hdev);
2336 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2338 changed = hci_dev_test_and_clear_flag(hdev,
2339 HCI_KEEP_DEBUG_KEYS);
2342 new_settings(hdev, NULL);
2344 for (i = 0; i < key_count; i++) {
2345 struct mgmt_link_key_info *key = &cp->keys[i];
2347 /* Always ignore debug keys and require a new pairing if
2348 * the user wants to use them.
2350 if (key->type == HCI_LK_DEBUG_COMBINATION)
2353 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2354 key->type, key->pin_len, NULL);
2357 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2359 hci_dev_unlock(hdev);
2364 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2365 u8 addr_type, struct sock *skip_sk)
2367 struct mgmt_ev_device_unpaired ev;
2369 bacpy(&ev.addr.bdaddr, bdaddr);
2370 ev.addr.type = addr_type;
2372 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2376 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2379 struct mgmt_cp_unpair_device *cp = data;
2380 struct mgmt_rp_unpair_device rp;
2381 struct hci_conn_params *params;
2382 struct mgmt_pending_cmd *cmd;
2383 struct hci_conn *conn;
2387 memset(&rp, 0, sizeof(rp));
2388 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2389 rp.addr.type = cp->addr.type;
2391 if (!bdaddr_type_is_valid(cp->addr.type))
2392 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2393 MGMT_STATUS_INVALID_PARAMS,
2396 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2397 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2398 MGMT_STATUS_INVALID_PARAMS,
2403 if (!hdev_is_powered(hdev)) {
2404 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2405 MGMT_STATUS_NOT_POWERED, &rp,
2410 if (cp->addr.type == BDADDR_BREDR) {
2411 /* If disconnection is requested, then look up the
2412 * connection. If the remote device is connected, it
2413 * will be later used to terminate the link.
2415 * Setting it to NULL explicitly will cause no
2416 * termination of the link.
2419 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2424 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2426 err = mgmt_cmd_complete(sk, hdev->id,
2427 MGMT_OP_UNPAIR_DEVICE,
2428 MGMT_STATUS_NOT_PAIRED, &rp,
2436 /* LE address type */
2437 addr_type = le_addr_type(cp->addr.type);
2439 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2440 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2442 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2443 MGMT_STATUS_NOT_PAIRED, &rp,
2448 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2450 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2455 /* Defer clearing up the connection parameters until closing to
2456 * give a chance of keeping them if a repairing happens.
2458 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2460 /* Disable auto-connection parameters if present */
2461 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2463 if (params->explicit_connect)
2464 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2466 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2469 /* If disconnection is not requested, then clear the connection
2470 * variable so that the link is not terminated.
2472 if (!cp->disconnect)
2476 /* If the connection variable is set, then termination of the
2477 * link is requested.
2480 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2482 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2486 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2493 cmd->cmd_complete = addr_cmd_complete;
2495 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2497 mgmt_pending_remove(cmd);
2500 hci_dev_unlock(hdev);
2504 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2507 struct mgmt_cp_disconnect *cp = data;
2508 struct mgmt_rp_disconnect rp;
2509 struct mgmt_pending_cmd *cmd;
2510 struct hci_conn *conn;
2515 memset(&rp, 0, sizeof(rp));
2516 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2517 rp.addr.type = cp->addr.type;
2519 if (!bdaddr_type_is_valid(cp->addr.type))
2520 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2521 MGMT_STATUS_INVALID_PARAMS,
2526 if (!test_bit(HCI_UP, &hdev->flags)) {
2527 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2528 MGMT_STATUS_NOT_POWERED, &rp,
2533 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2534 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2535 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2539 if (cp->addr.type == BDADDR_BREDR)
2540 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2543 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2544 le_addr_type(cp->addr.type));
2546 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2548 MGMT_STATUS_NOT_CONNECTED, &rp,
2553 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2559 cmd->cmd_complete = generic_cmd_complete;
2561 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2563 mgmt_pending_remove(cmd);
2566 hci_dev_unlock(hdev);
2570 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2572 switch (link_type) {
2574 switch (addr_type) {
2575 case ADDR_LE_DEV_PUBLIC:
2576 return BDADDR_LE_PUBLIC;
2579 /* Fallback to LE Random address type */
2580 return BDADDR_LE_RANDOM;
2584 /* Fallback to BR/EDR type */
2585 return BDADDR_BREDR;
2589 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2592 struct mgmt_rp_get_connections *rp;
2601 if (!hdev_is_powered(hdev)) {
2602 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2603 MGMT_STATUS_NOT_POWERED);
2608 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2609 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2613 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2620 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2621 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2623 bacpy(&rp->addr[i].bdaddr, &c->dst);
2624 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2625 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2630 rp->conn_count = cpu_to_le16(i);
2632 /* Recalculate length in case of filtered SCO connections, etc */
2633 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2634 struct_size(rp, addr, i));
2639 hci_dev_unlock(hdev);
2643 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2644 struct mgmt_cp_pin_code_neg_reply *cp)
2646 struct mgmt_pending_cmd *cmd;
2649 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2654 cmd->cmd_complete = addr_cmd_complete;
2656 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2657 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2659 mgmt_pending_remove(cmd);
2664 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2667 struct hci_conn *conn;
2668 struct mgmt_cp_pin_code_reply *cp = data;
2669 struct hci_cp_pin_code_reply reply;
2670 struct mgmt_pending_cmd *cmd;
2677 if (!hdev_is_powered(hdev)) {
2678 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2679 MGMT_STATUS_NOT_POWERED);
2683 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2685 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2686 MGMT_STATUS_NOT_CONNECTED);
2690 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2691 struct mgmt_cp_pin_code_neg_reply ncp;
2693 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2695 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2697 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2699 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2700 MGMT_STATUS_INVALID_PARAMS);
2705 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2711 cmd->cmd_complete = addr_cmd_complete;
2713 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2714 reply.pin_len = cp->pin_len;
2715 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2717 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2719 mgmt_pending_remove(cmd);
2722 hci_dev_unlock(hdev);
2726 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2729 struct mgmt_cp_set_io_capability *cp = data;
2733 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2734 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2735 MGMT_STATUS_INVALID_PARAMS);
2739 hdev->io_capability = cp->io_capability;
2741 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2742 hdev->io_capability);
2744 hci_dev_unlock(hdev);
2746 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2750 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2752 struct hci_dev *hdev = conn->hdev;
2753 struct mgmt_pending_cmd *cmd;
2755 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2756 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2759 if (cmd->user_data != conn)
2768 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2770 struct mgmt_rp_pair_device rp;
2771 struct hci_conn *conn = cmd->user_data;
2774 bacpy(&rp.addr.bdaddr, &conn->dst);
2775 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2777 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2778 status, &rp, sizeof(rp));
2780 /* So we don't get further callbacks for this connection */
2781 conn->connect_cfm_cb = NULL;
2782 conn->security_cfm_cb = NULL;
2783 conn->disconn_cfm_cb = NULL;
2785 hci_conn_drop(conn);
2787 /* The device is paired so there is no need to remove
2788 * its connection parameters anymore.
2790 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2797 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2799 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2800 struct mgmt_pending_cmd *cmd;
2802 cmd = find_pairing(conn);
2804 cmd->cmd_complete(cmd, status);
2805 mgmt_pending_remove(cmd);
2809 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2811 struct mgmt_pending_cmd *cmd;
2813 BT_DBG("status %u", status);
2815 cmd = find_pairing(conn);
2817 BT_DBG("Unable to find a pending command");
2821 cmd->cmd_complete(cmd, mgmt_status(status));
2822 mgmt_pending_remove(cmd);
2825 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2827 struct mgmt_pending_cmd *cmd;
2829 BT_DBG("status %u", status);
2834 cmd = find_pairing(conn);
2836 BT_DBG("Unable to find a pending command");
2840 cmd->cmd_complete(cmd, mgmt_status(status));
2841 mgmt_pending_remove(cmd);
2844 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2847 struct mgmt_cp_pair_device *cp = data;
2848 struct mgmt_rp_pair_device rp;
2849 struct mgmt_pending_cmd *cmd;
2850 u8 sec_level, auth_type;
2851 struct hci_conn *conn;
2856 memset(&rp, 0, sizeof(rp));
2857 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2858 rp.addr.type = cp->addr.type;
2860 if (!bdaddr_type_is_valid(cp->addr.type))
2861 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2862 MGMT_STATUS_INVALID_PARAMS,
2865 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2866 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2867 MGMT_STATUS_INVALID_PARAMS,
2872 if (!hdev_is_powered(hdev)) {
2873 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2874 MGMT_STATUS_NOT_POWERED, &rp,
2879 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2880 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2881 MGMT_STATUS_ALREADY_PAIRED, &rp,
2886 sec_level = BT_SECURITY_MEDIUM;
2887 auth_type = HCI_AT_DEDICATED_BONDING;
2889 if (cp->addr.type == BDADDR_BREDR) {
2890 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2893 u8 addr_type = le_addr_type(cp->addr.type);
2894 struct hci_conn_params *p;
2896 /* When pairing a new device, it is expected to remember
2897 * this device for future connections. Adding the connection
2898 * parameter information ahead of time allows tracking
2899 * of the slave preferred values and will speed up any
2900 * further connection establishment.
2902 * If connection parameters already exist, then they
2903 * will be kept and this function does nothing.
2905 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2907 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2908 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2910 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2911 addr_type, sec_level,
2912 HCI_LE_CONN_TIMEOUT);
2918 if (PTR_ERR(conn) == -EBUSY)
2919 status = MGMT_STATUS_BUSY;
2920 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2921 status = MGMT_STATUS_NOT_SUPPORTED;
2922 else if (PTR_ERR(conn) == -ECONNREFUSED)
2923 status = MGMT_STATUS_REJECTED;
2925 status = MGMT_STATUS_CONNECT_FAILED;
2927 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2928 status, &rp, sizeof(rp));
2932 if (conn->connect_cfm_cb) {
2933 hci_conn_drop(conn);
2934 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2935 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2939 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2942 hci_conn_drop(conn);
2946 cmd->cmd_complete = pairing_complete;
2948 /* For LE, just connecting isn't a proof that the pairing finished */
2949 if (cp->addr.type == BDADDR_BREDR) {
2950 conn->connect_cfm_cb = pairing_complete_cb;
2951 conn->security_cfm_cb = pairing_complete_cb;
2952 conn->disconn_cfm_cb = pairing_complete_cb;
2954 conn->connect_cfm_cb = le_pairing_complete_cb;
2955 conn->security_cfm_cb = le_pairing_complete_cb;
2956 conn->disconn_cfm_cb = le_pairing_complete_cb;
2959 conn->io_capability = cp->io_cap;
2960 cmd->user_data = hci_conn_get(conn);
2962 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2963 hci_conn_security(conn, sec_level, auth_type, true)) {
2964 cmd->cmd_complete(cmd, 0);
2965 mgmt_pending_remove(cmd);
2971 hci_dev_unlock(hdev);
2975 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2978 struct mgmt_addr_info *addr = data;
2979 struct mgmt_pending_cmd *cmd;
2980 struct hci_conn *conn;
2987 if (!hdev_is_powered(hdev)) {
2988 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2989 MGMT_STATUS_NOT_POWERED);
2993 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2995 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2996 MGMT_STATUS_INVALID_PARAMS);
3000 conn = cmd->user_data;
3002 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3003 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3004 MGMT_STATUS_INVALID_PARAMS);
3008 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3009 mgmt_pending_remove(cmd);
3011 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3012 addr, sizeof(*addr));
3014 hci_dev_unlock(hdev);
3018 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3019 struct mgmt_addr_info *addr, u16 mgmt_op,
3020 u16 hci_op, __le32 passkey)
3022 struct mgmt_pending_cmd *cmd;
3023 struct hci_conn *conn;
3028 if (!hdev_is_powered(hdev)) {
3029 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3030 MGMT_STATUS_NOT_POWERED, addr,
3035 if (addr->type == BDADDR_BREDR)
3036 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3038 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3039 le_addr_type(addr->type));
3042 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3043 MGMT_STATUS_NOT_CONNECTED, addr,
3048 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3049 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3051 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3052 MGMT_STATUS_SUCCESS, addr,
3055 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3056 MGMT_STATUS_FAILED, addr,
3062 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3068 cmd->cmd_complete = addr_cmd_complete;
3070 /* Continue with pairing via HCI */
3071 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3072 struct hci_cp_user_passkey_reply cp;
3074 bacpy(&cp.bdaddr, &addr->bdaddr);
3075 cp.passkey = passkey;
3076 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3078 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3082 mgmt_pending_remove(cmd);
3085 hci_dev_unlock(hdev);
3089 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3090 void *data, u16 len)
3092 struct mgmt_cp_pin_code_neg_reply *cp = data;
3096 return user_pairing_resp(sk, hdev, &cp->addr,
3097 MGMT_OP_PIN_CODE_NEG_REPLY,
3098 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3101 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3104 struct mgmt_cp_user_confirm_reply *cp = data;
3108 if (len != sizeof(*cp))
3109 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3110 MGMT_STATUS_INVALID_PARAMS);
3112 return user_pairing_resp(sk, hdev, &cp->addr,
3113 MGMT_OP_USER_CONFIRM_REPLY,
3114 HCI_OP_USER_CONFIRM_REPLY, 0);
3117 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3118 void *data, u16 len)
3120 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3124 return user_pairing_resp(sk, hdev, &cp->addr,
3125 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3126 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3129 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3132 struct mgmt_cp_user_passkey_reply *cp = data;
3136 return user_pairing_resp(sk, hdev, &cp->addr,
3137 MGMT_OP_USER_PASSKEY_REPLY,
3138 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3141 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3142 void *data, u16 len)
3144 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3148 return user_pairing_resp(sk, hdev, &cp->addr,
3149 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3150 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3153 static void adv_expire(struct hci_dev *hdev, u32 flags)
3155 struct adv_info *adv_instance;
3156 struct hci_request req;
3159 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3163 /* stop if current instance doesn't need to be changed */
3164 if (!(adv_instance->flags & flags))
3167 cancel_adv_timeout(hdev);
3169 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3173 hci_req_init(&req, hdev);
3174 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3179 hci_req_run(&req, NULL);
3182 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3184 struct mgmt_cp_set_local_name *cp;
3185 struct mgmt_pending_cmd *cmd;
3187 BT_DBG("status 0x%02x", status);
3191 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3198 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3199 mgmt_status(status));
3201 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3204 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3205 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3208 mgmt_pending_remove(cmd);
3211 hci_dev_unlock(hdev);
3214 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3217 struct mgmt_cp_set_local_name *cp = data;
3218 struct mgmt_pending_cmd *cmd;
3219 struct hci_request req;
3226 /* If the old values are the same as the new ones just return a
3227 * direct command complete event.
3229 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3230 !memcmp(hdev->short_name, cp->short_name,
3231 sizeof(hdev->short_name))) {
3232 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3237 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3239 if (!hdev_is_powered(hdev)) {
3240 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3242 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3247 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3248 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3249 ext_info_changed(hdev, sk);
3254 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3260 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3262 hci_req_init(&req, hdev);
3264 if (lmp_bredr_capable(hdev)) {
3265 __hci_req_update_name(&req);
3266 __hci_req_update_eir(&req);
3269 /* The name is stored in the scan response data and so
3270 * no need to udpate the advertising data here.
3272 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3273 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3275 err = hci_req_run(&req, set_name_complete);
3277 mgmt_pending_remove(cmd);
3280 hci_dev_unlock(hdev);
3284 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3287 struct mgmt_cp_set_appearance *cp = data;
3293 if (!lmp_le_capable(hdev))
3294 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3295 MGMT_STATUS_NOT_SUPPORTED);
3297 apperance = le16_to_cpu(cp->appearance);
3301 if (hdev->appearance != apperance) {
3302 hdev->appearance = apperance;
3304 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3305 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3307 ext_info_changed(hdev, sk);
3310 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3313 hci_dev_unlock(hdev);
3318 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3319 void *data, u16 len)
3321 struct mgmt_rp_get_phy_confguration rp;
3323 BT_DBG("sock %p %s", sk, hdev->name);
3327 memset(&rp, 0, sizeof(rp));
3329 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3330 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3331 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3333 hci_dev_unlock(hdev);
3335 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3339 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3341 struct mgmt_ev_phy_configuration_changed ev;
3343 memset(&ev, 0, sizeof(ev));
3345 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3347 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3351 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3352 u16 opcode, struct sk_buff *skb)
3354 struct mgmt_pending_cmd *cmd;
3356 BT_DBG("status 0x%02x", status);
3360 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3365 mgmt_cmd_status(cmd->sk, hdev->id,
3366 MGMT_OP_SET_PHY_CONFIGURATION,
3367 mgmt_status(status));
3369 mgmt_cmd_complete(cmd->sk, hdev->id,
3370 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3373 mgmt_phy_configuration_changed(hdev, cmd->sk);
3376 mgmt_pending_remove(cmd);
3379 hci_dev_unlock(hdev);
3382 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3383 void *data, u16 len)
3385 struct mgmt_cp_set_phy_confguration *cp = data;
3386 struct hci_cp_le_set_default_phy cp_phy;
3387 struct mgmt_pending_cmd *cmd;
3388 struct hci_request req;
3389 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3390 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3391 bool changed = false;
3394 BT_DBG("sock %p %s", sk, hdev->name);
3396 configurable_phys = get_configurable_phys(hdev);
3397 supported_phys = get_supported_phys(hdev);
3398 selected_phys = __le32_to_cpu(cp->selected_phys);
3400 if (selected_phys & ~supported_phys)
3401 return mgmt_cmd_status(sk, hdev->id,
3402 MGMT_OP_SET_PHY_CONFIGURATION,
3403 MGMT_STATUS_INVALID_PARAMS);
3405 unconfigure_phys = supported_phys & ~configurable_phys;
3407 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3408 return mgmt_cmd_status(sk, hdev->id,
3409 MGMT_OP_SET_PHY_CONFIGURATION,
3410 MGMT_STATUS_INVALID_PARAMS);
3412 if (selected_phys == get_selected_phys(hdev))
3413 return mgmt_cmd_complete(sk, hdev->id,
3414 MGMT_OP_SET_PHY_CONFIGURATION,
3419 if (!hdev_is_powered(hdev)) {
3420 err = mgmt_cmd_status(sk, hdev->id,
3421 MGMT_OP_SET_PHY_CONFIGURATION,
3422 MGMT_STATUS_REJECTED);
3426 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3427 err = mgmt_cmd_status(sk, hdev->id,
3428 MGMT_OP_SET_PHY_CONFIGURATION,
3433 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3434 pkt_type |= (HCI_DH3 | HCI_DM3);
3436 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3438 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3439 pkt_type |= (HCI_DH5 | HCI_DM5);
3441 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3443 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3444 pkt_type &= ~HCI_2DH1;
3446 pkt_type |= HCI_2DH1;
3448 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3449 pkt_type &= ~HCI_2DH3;
3451 pkt_type |= HCI_2DH3;
3453 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3454 pkt_type &= ~HCI_2DH5;
3456 pkt_type |= HCI_2DH5;
3458 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3459 pkt_type &= ~HCI_3DH1;
3461 pkt_type |= HCI_3DH1;
3463 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3464 pkt_type &= ~HCI_3DH3;
3466 pkt_type |= HCI_3DH3;
3468 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3469 pkt_type &= ~HCI_3DH5;
3471 pkt_type |= HCI_3DH5;
3473 if (pkt_type != hdev->pkt_type) {
3474 hdev->pkt_type = pkt_type;
3478 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3479 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3481 mgmt_phy_configuration_changed(hdev, sk);
3483 err = mgmt_cmd_complete(sk, hdev->id,
3484 MGMT_OP_SET_PHY_CONFIGURATION,
3490 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3497 hci_req_init(&req, hdev);
3499 memset(&cp_phy, 0, sizeof(cp_phy));
3501 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3502 cp_phy.all_phys |= 0x01;
3504 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3505 cp_phy.all_phys |= 0x02;
3507 if (selected_phys & MGMT_PHY_LE_1M_TX)
3508 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3510 if (selected_phys & MGMT_PHY_LE_2M_TX)
3511 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3513 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3514 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3516 if (selected_phys & MGMT_PHY_LE_1M_RX)
3517 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3519 if (selected_phys & MGMT_PHY_LE_2M_RX)
3520 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3522 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3523 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3525 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3527 err = hci_req_run_skb(&req, set_default_phy_complete);
3529 mgmt_pending_remove(cmd);
3532 hci_dev_unlock(hdev);
3537 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3538 u16 opcode, struct sk_buff *skb)
3540 struct mgmt_rp_read_local_oob_data mgmt_rp;
3541 size_t rp_size = sizeof(mgmt_rp);
3542 struct mgmt_pending_cmd *cmd;
3544 BT_DBG("%s status %u", hdev->name, status);
3546 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3550 if (status || !skb) {
3551 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3552 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3556 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3558 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3559 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3561 if (skb->len < sizeof(*rp)) {
3562 mgmt_cmd_status(cmd->sk, hdev->id,
3563 MGMT_OP_READ_LOCAL_OOB_DATA,
3564 MGMT_STATUS_FAILED);
3568 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3569 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3571 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3573 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3575 if (skb->len < sizeof(*rp)) {
3576 mgmt_cmd_status(cmd->sk, hdev->id,
3577 MGMT_OP_READ_LOCAL_OOB_DATA,
3578 MGMT_STATUS_FAILED);
3582 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3583 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3585 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3586 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3589 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3590 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3593 mgmt_pending_remove(cmd);
3596 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3597 void *data, u16 data_len)
3599 struct mgmt_pending_cmd *cmd;
3600 struct hci_request req;
3603 BT_DBG("%s", hdev->name);
3607 if (!hdev_is_powered(hdev)) {
3608 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3609 MGMT_STATUS_NOT_POWERED);
3613 if (!lmp_ssp_capable(hdev)) {
3614 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3615 MGMT_STATUS_NOT_SUPPORTED);
3619 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3620 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3625 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3631 hci_req_init(&req, hdev);
3633 if (bredr_sc_enabled(hdev))
3634 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3636 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3638 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3640 mgmt_pending_remove(cmd);
3643 hci_dev_unlock(hdev);
3647 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3648 void *data, u16 len)
3650 struct mgmt_addr_info *addr = data;
3653 BT_DBG("%s ", hdev->name);
3655 if (!bdaddr_type_is_valid(addr->type))
3656 return mgmt_cmd_complete(sk, hdev->id,
3657 MGMT_OP_ADD_REMOTE_OOB_DATA,
3658 MGMT_STATUS_INVALID_PARAMS,
3659 addr, sizeof(*addr));
3663 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3664 struct mgmt_cp_add_remote_oob_data *cp = data;
3667 if (cp->addr.type != BDADDR_BREDR) {
3668 err = mgmt_cmd_complete(sk, hdev->id,
3669 MGMT_OP_ADD_REMOTE_OOB_DATA,
3670 MGMT_STATUS_INVALID_PARAMS,
3671 &cp->addr, sizeof(cp->addr));
3675 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3676 cp->addr.type, cp->hash,
3677 cp->rand, NULL, NULL);
3679 status = MGMT_STATUS_FAILED;
3681 status = MGMT_STATUS_SUCCESS;
3683 err = mgmt_cmd_complete(sk, hdev->id,
3684 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3685 &cp->addr, sizeof(cp->addr));
3686 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3687 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3688 u8 *rand192, *hash192, *rand256, *hash256;
3691 if (bdaddr_type_is_le(cp->addr.type)) {
3692 /* Enforce zero-valued 192-bit parameters as
3693 * long as legacy SMP OOB isn't implemented.
3695 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3696 memcmp(cp->hash192, ZERO_KEY, 16)) {
3697 err = mgmt_cmd_complete(sk, hdev->id,
3698 MGMT_OP_ADD_REMOTE_OOB_DATA,
3699 MGMT_STATUS_INVALID_PARAMS,
3700 addr, sizeof(*addr));
3707 /* In case one of the P-192 values is set to zero,
3708 * then just disable OOB data for P-192.
3710 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3711 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3715 rand192 = cp->rand192;
3716 hash192 = cp->hash192;
3720 /* In case one of the P-256 values is set to zero, then just
3721 * disable OOB data for P-256.
3723 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3724 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3728 rand256 = cp->rand256;
3729 hash256 = cp->hash256;
3732 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3733 cp->addr.type, hash192, rand192,
3736 status = MGMT_STATUS_FAILED;
3738 status = MGMT_STATUS_SUCCESS;
3740 err = mgmt_cmd_complete(sk, hdev->id,
3741 MGMT_OP_ADD_REMOTE_OOB_DATA,
3742 status, &cp->addr, sizeof(cp->addr));
3744 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3746 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3747 MGMT_STATUS_INVALID_PARAMS);
3751 hci_dev_unlock(hdev);
3755 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3756 void *data, u16 len)
3758 struct mgmt_cp_remove_remote_oob_data *cp = data;
3762 BT_DBG("%s", hdev->name);
3764 if (cp->addr.type != BDADDR_BREDR)
3765 return mgmt_cmd_complete(sk, hdev->id,
3766 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3767 MGMT_STATUS_INVALID_PARAMS,
3768 &cp->addr, sizeof(cp->addr));
3772 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3773 hci_remote_oob_data_clear(hdev);
3774 status = MGMT_STATUS_SUCCESS;
3778 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3780 status = MGMT_STATUS_INVALID_PARAMS;
3782 status = MGMT_STATUS_SUCCESS;
3785 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3786 status, &cp->addr, sizeof(cp->addr));
3788 hci_dev_unlock(hdev);
3792 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3794 struct mgmt_pending_cmd *cmd;
3796 BT_DBG("status %d", status);
3800 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3802 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3805 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3808 cmd->cmd_complete(cmd, mgmt_status(status));
3809 mgmt_pending_remove(cmd);
3812 hci_dev_unlock(hdev);
3815 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3816 uint8_t *mgmt_status)
3819 case DISCOV_TYPE_LE:
3820 *mgmt_status = mgmt_le_support(hdev);
3824 case DISCOV_TYPE_INTERLEAVED:
3825 *mgmt_status = mgmt_le_support(hdev);
3828 /* Intentional fall-through */
3829 case DISCOV_TYPE_BREDR:
3830 *mgmt_status = mgmt_bredr_support(hdev);
3835 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3842 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3843 u16 op, void *data, u16 len)
3845 struct mgmt_cp_start_discovery *cp = data;
3846 struct mgmt_pending_cmd *cmd;
3850 BT_DBG("%s", hdev->name);
3854 if (!hdev_is_powered(hdev)) {
3855 err = mgmt_cmd_complete(sk, hdev->id, op,
3856 MGMT_STATUS_NOT_POWERED,
3857 &cp->type, sizeof(cp->type));
3861 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3862 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3863 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3864 &cp->type, sizeof(cp->type));
3868 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3869 err = mgmt_cmd_complete(sk, hdev->id, op, status,
3870 &cp->type, sizeof(cp->type));
3874 /* Clear the discovery filter first to free any previously
3875 * allocated memory for the UUID list.
3877 hci_discovery_filter_clear(hdev);
3879 hdev->discovery.type = cp->type;
3880 hdev->discovery.report_invalid_rssi = false;
3881 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3882 hdev->discovery.limited = true;
3884 hdev->discovery.limited = false;
3886 cmd = mgmt_pending_add(sk, op, hdev, data, len);
3892 cmd->cmd_complete = generic_cmd_complete;
3894 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3895 queue_work(hdev->req_workqueue, &hdev->discov_update);
3899 hci_dev_unlock(hdev);
3903 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3904 void *data, u16 len)
3906 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3910 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3911 void *data, u16 len)
3913 return start_discovery_internal(sk, hdev,
3914 MGMT_OP_START_LIMITED_DISCOVERY,
3918 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3921 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3925 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3926 void *data, u16 len)
3928 struct mgmt_cp_start_service_discovery *cp = data;
3929 struct mgmt_pending_cmd *cmd;
3930 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3931 u16 uuid_count, expected_len;
3935 BT_DBG("%s", hdev->name);
3939 if (!hdev_is_powered(hdev)) {
3940 err = mgmt_cmd_complete(sk, hdev->id,
3941 MGMT_OP_START_SERVICE_DISCOVERY,
3942 MGMT_STATUS_NOT_POWERED,
3943 &cp->type, sizeof(cp->type));
3947 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3948 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3949 err = mgmt_cmd_complete(sk, hdev->id,
3950 MGMT_OP_START_SERVICE_DISCOVERY,
3951 MGMT_STATUS_BUSY, &cp->type,
3956 uuid_count = __le16_to_cpu(cp->uuid_count);
3957 if (uuid_count > max_uuid_count) {
3958 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
3960 err = mgmt_cmd_complete(sk, hdev->id,
3961 MGMT_OP_START_SERVICE_DISCOVERY,
3962 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3967 expected_len = sizeof(*cp) + uuid_count * 16;
3968 if (expected_len != len) {
3969 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
3971 err = mgmt_cmd_complete(sk, hdev->id,
3972 MGMT_OP_START_SERVICE_DISCOVERY,
3973 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3978 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3979 err = mgmt_cmd_complete(sk, hdev->id,
3980 MGMT_OP_START_SERVICE_DISCOVERY,
3981 status, &cp->type, sizeof(cp->type));
3985 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3992 cmd->cmd_complete = service_discovery_cmd_complete;
3994 /* Clear the discovery filter first to free any previously
3995 * allocated memory for the UUID list.
3997 hci_discovery_filter_clear(hdev);
3999 hdev->discovery.result_filtering = true;
4000 hdev->discovery.type = cp->type;
4001 hdev->discovery.rssi = cp->rssi;
4002 hdev->discovery.uuid_count = uuid_count;
4004 if (uuid_count > 0) {
4005 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4007 if (!hdev->discovery.uuids) {
4008 err = mgmt_cmd_complete(sk, hdev->id,
4009 MGMT_OP_START_SERVICE_DISCOVERY,
4011 &cp->type, sizeof(cp->type));
4012 mgmt_pending_remove(cmd);
4017 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4018 queue_work(hdev->req_workqueue, &hdev->discov_update);
4022 hci_dev_unlock(hdev);
4026 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4028 struct mgmt_pending_cmd *cmd;
4030 BT_DBG("status %d", status);
4034 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4036 cmd->cmd_complete(cmd, mgmt_status(status));
4037 mgmt_pending_remove(cmd);
4040 hci_dev_unlock(hdev);
4043 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4046 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4047 struct mgmt_pending_cmd *cmd;
4050 BT_DBG("%s", hdev->name);
4054 if (!hci_discovery_active(hdev)) {
4055 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4056 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4057 sizeof(mgmt_cp->type));
4061 if (hdev->discovery.type != mgmt_cp->type) {
4062 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4063 MGMT_STATUS_INVALID_PARAMS,
4064 &mgmt_cp->type, sizeof(mgmt_cp->type));
4068 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4074 cmd->cmd_complete = generic_cmd_complete;
4076 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4077 queue_work(hdev->req_workqueue, &hdev->discov_update);
4081 hci_dev_unlock(hdev);
4085 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4088 struct mgmt_cp_confirm_name *cp = data;
4089 struct inquiry_entry *e;
4092 BT_DBG("%s", hdev->name);
4096 if (!hci_discovery_active(hdev)) {
4097 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4098 MGMT_STATUS_FAILED, &cp->addr,
4103 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4105 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4106 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4111 if (cp->name_known) {
4112 e->name_state = NAME_KNOWN;
4115 e->name_state = NAME_NEEDED;
4116 hci_inquiry_cache_update_resolve(hdev, e);
4119 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4120 &cp->addr, sizeof(cp->addr));
4123 hci_dev_unlock(hdev);
4127 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4130 struct mgmt_cp_block_device *cp = data;
4134 BT_DBG("%s", hdev->name);
4136 if (!bdaddr_type_is_valid(cp->addr.type))
4137 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4138 MGMT_STATUS_INVALID_PARAMS,
4139 &cp->addr, sizeof(cp->addr));
4143 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4146 status = MGMT_STATUS_FAILED;
4150 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4152 status = MGMT_STATUS_SUCCESS;
4155 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4156 &cp->addr, sizeof(cp->addr));
4158 hci_dev_unlock(hdev);
4163 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4166 struct mgmt_cp_unblock_device *cp = data;
4170 BT_DBG("%s", hdev->name);
4172 if (!bdaddr_type_is_valid(cp->addr.type))
4173 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4174 MGMT_STATUS_INVALID_PARAMS,
4175 &cp->addr, sizeof(cp->addr));
4179 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4182 status = MGMT_STATUS_INVALID_PARAMS;
4186 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4188 status = MGMT_STATUS_SUCCESS;
4191 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4192 &cp->addr, sizeof(cp->addr));
4194 hci_dev_unlock(hdev);
4199 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4202 struct mgmt_cp_set_device_id *cp = data;
4203 struct hci_request req;
4207 BT_DBG("%s", hdev->name);
4209 source = __le16_to_cpu(cp->source);
4211 if (source > 0x0002)
4212 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4213 MGMT_STATUS_INVALID_PARAMS);
4217 hdev->devid_source = source;
4218 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4219 hdev->devid_product = __le16_to_cpu(cp->product);
4220 hdev->devid_version = __le16_to_cpu(cp->version);
4222 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4225 hci_req_init(&req, hdev);
4226 __hci_req_update_eir(&req);
4227 hci_req_run(&req, NULL);
4229 hci_dev_unlock(hdev);
4234 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4237 BT_DBG("status %d", status);
4240 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4243 struct cmd_lookup match = { NULL, hdev };
4244 struct hci_request req;
4246 struct adv_info *adv_instance;
4252 u8 mgmt_err = mgmt_status(status);
4254 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4255 cmd_status_rsp, &mgmt_err);
4259 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4260 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4262 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4264 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4267 new_settings(hdev, match.sk);
4272 /* If "Set Advertising" was just disabled and instance advertising was
4273 * set up earlier, then re-enable multi-instance advertising.
4275 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4276 list_empty(&hdev->adv_instances))
4279 instance = hdev->cur_adv_instance;
4281 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4282 struct adv_info, list);
4286 instance = adv_instance->instance;
4289 hci_req_init(&req, hdev);
4291 err = __hci_req_schedule_adv_instance(&req, instance, true);
4294 err = hci_req_run(&req, enable_advertising_instance);
4297 bt_dev_err(hdev, "failed to re-configure advertising");
4300 hci_dev_unlock(hdev);
4303 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4306 struct mgmt_mode *cp = data;
4307 struct mgmt_pending_cmd *cmd;
4308 struct hci_request req;
4312 BT_DBG("request for %s", hdev->name);
4314 status = mgmt_le_support(hdev);
4316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4319 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4320 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4321 MGMT_STATUS_INVALID_PARAMS);
4327 /* The following conditions are ones which mean that we should
4328 * not do any HCI communication but directly send a mgmt
4329 * response to user space (after toggling the flag if
4332 if (!hdev_is_powered(hdev) ||
4333 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4334 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4335 hci_conn_num(hdev, LE_LINK) > 0 ||
4336 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4337 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4341 hdev->cur_adv_instance = 0x00;
4342 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4343 if (cp->val == 0x02)
4344 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4346 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4348 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4349 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4352 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4357 err = new_settings(hdev, sk);
4362 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4363 pending_find(MGMT_OP_SET_LE, hdev)) {
4364 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4369 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4375 hci_req_init(&req, hdev);
4377 if (cp->val == 0x02)
4378 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4380 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4382 cancel_adv_timeout(hdev);
4385 /* Switch to instance "0" for the Set Advertising setting.
4386 * We cannot use update_[adv|scan_rsp]_data() here as the
4387 * HCI_ADVERTISING flag is not yet set.
4389 hdev->cur_adv_instance = 0x00;
4391 if (ext_adv_capable(hdev)) {
4392 __hci_req_start_ext_adv(&req, 0x00);
4394 __hci_req_update_adv_data(&req, 0x00);
4395 __hci_req_update_scan_rsp_data(&req, 0x00);
4396 __hci_req_enable_advertising(&req);
4399 __hci_req_disable_advertising(&req);
4402 err = hci_req_run(&req, set_advertising_complete);
4404 mgmt_pending_remove(cmd);
4407 hci_dev_unlock(hdev);
4411 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4412 void *data, u16 len)
4414 struct mgmt_cp_set_static_address *cp = data;
4417 BT_DBG("%s", hdev->name);
4419 if (!lmp_le_capable(hdev))
4420 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4421 MGMT_STATUS_NOT_SUPPORTED);
4423 if (hdev_is_powered(hdev))
4424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4425 MGMT_STATUS_REJECTED);
4427 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4428 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4429 return mgmt_cmd_status(sk, hdev->id,
4430 MGMT_OP_SET_STATIC_ADDRESS,
4431 MGMT_STATUS_INVALID_PARAMS);
4433 /* Two most significant bits shall be set */
4434 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4435 return mgmt_cmd_status(sk, hdev->id,
4436 MGMT_OP_SET_STATIC_ADDRESS,
4437 MGMT_STATUS_INVALID_PARAMS);
4442 bacpy(&hdev->static_addr, &cp->bdaddr);
4444 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4448 err = new_settings(hdev, sk);
4451 hci_dev_unlock(hdev);
4455 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4456 void *data, u16 len)
4458 struct mgmt_cp_set_scan_params *cp = data;
4459 __u16 interval, window;
4462 BT_DBG("%s", hdev->name);
4464 if (!lmp_le_capable(hdev))
4465 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4466 MGMT_STATUS_NOT_SUPPORTED);
4468 interval = __le16_to_cpu(cp->interval);
4470 if (interval < 0x0004 || interval > 0x4000)
4471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4472 MGMT_STATUS_INVALID_PARAMS);
4474 window = __le16_to_cpu(cp->window);
4476 if (window < 0x0004 || window > 0x4000)
4477 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4478 MGMT_STATUS_INVALID_PARAMS);
4480 if (window > interval)
4481 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4482 MGMT_STATUS_INVALID_PARAMS);
4486 hdev->le_scan_interval = interval;
4487 hdev->le_scan_window = window;
4489 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4492 /* If background scan is running, restart it so new parameters are
4495 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4496 hdev->discovery.state == DISCOVERY_STOPPED) {
4497 struct hci_request req;
4499 hci_req_init(&req, hdev);
4501 hci_req_add_le_scan_disable(&req);
4502 hci_req_add_le_passive_scan(&req);
4504 hci_req_run(&req, NULL);
4507 hci_dev_unlock(hdev);
4512 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4515 struct mgmt_pending_cmd *cmd;
4517 BT_DBG("status 0x%02x", status);
4521 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4526 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4527 mgmt_status(status));
4529 struct mgmt_mode *cp = cmd->param;
4532 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4534 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4536 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4537 new_settings(hdev, cmd->sk);
4540 mgmt_pending_remove(cmd);
4543 hci_dev_unlock(hdev);
4546 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4547 void *data, u16 len)
4549 struct mgmt_mode *cp = data;
4550 struct mgmt_pending_cmd *cmd;
4551 struct hci_request req;
4554 BT_DBG("%s", hdev->name);
4556 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4557 hdev->hci_ver < BLUETOOTH_VER_1_2)
4558 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4559 MGMT_STATUS_NOT_SUPPORTED);
4561 if (cp->val != 0x00 && cp->val != 0x01)
4562 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4563 MGMT_STATUS_INVALID_PARAMS);
4567 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4568 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4573 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4574 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4579 if (!hdev_is_powered(hdev)) {
4580 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4581 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4583 new_settings(hdev, sk);
4587 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4594 hci_req_init(&req, hdev);
4596 __hci_req_write_fast_connectable(&req, cp->val);
4598 err = hci_req_run(&req, fast_connectable_complete);
4600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4601 MGMT_STATUS_FAILED);
4602 mgmt_pending_remove(cmd);
4606 hci_dev_unlock(hdev);
4611 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4613 struct mgmt_pending_cmd *cmd;
4615 BT_DBG("status 0x%02x", status);
4619 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4624 u8 mgmt_err = mgmt_status(status);
4626 /* We need to restore the flag if related HCI commands
4629 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4631 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4633 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4634 new_settings(hdev, cmd->sk);
4637 mgmt_pending_remove(cmd);
4640 hci_dev_unlock(hdev);
4643 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4645 struct mgmt_mode *cp = data;
4646 struct mgmt_pending_cmd *cmd;
4647 struct hci_request req;
4650 BT_DBG("request for %s", hdev->name);
4652 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4653 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4654 MGMT_STATUS_NOT_SUPPORTED);
4656 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4657 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4658 MGMT_STATUS_REJECTED);
4660 if (cp->val != 0x00 && cp->val != 0x01)
4661 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4662 MGMT_STATUS_INVALID_PARAMS);
4666 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4667 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4671 if (!hdev_is_powered(hdev)) {
4673 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4674 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4675 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4676 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4677 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4680 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4682 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4686 err = new_settings(hdev, sk);
4690 /* Reject disabling when powered on */
4692 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4693 MGMT_STATUS_REJECTED);
4696 /* When configuring a dual-mode controller to operate
4697 * with LE only and using a static address, then switching
4698 * BR/EDR back on is not allowed.
4700 * Dual-mode controllers shall operate with the public
4701 * address as its identity address for BR/EDR and LE. So
4702 * reject the attempt to create an invalid configuration.
4704 * The same restrictions applies when secure connections
4705 * has been enabled. For BR/EDR this is a controller feature
4706 * while for LE it is a host stack feature. This means that
4707 * switching BR/EDR back on when secure connections has been
4708 * enabled is not a supported transaction.
4710 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4711 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4712 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4713 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4714 MGMT_STATUS_REJECTED);
4719 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4720 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4725 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4731 /* We need to flip the bit already here so that
4732 * hci_req_update_adv_data generates the correct flags.
4734 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4736 hci_req_init(&req, hdev);
4738 __hci_req_write_fast_connectable(&req, false);
4739 __hci_req_update_scan(&req);
4741 /* Since only the advertising data flags will change, there
4742 * is no need to update the scan response data.
4744 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4746 err = hci_req_run(&req, set_bredr_complete);
4748 mgmt_pending_remove(cmd);
4751 hci_dev_unlock(hdev);
4755 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4757 struct mgmt_pending_cmd *cmd;
4758 struct mgmt_mode *cp;
4760 BT_DBG("%s status %u", hdev->name, status);
4764 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4769 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4770 mgmt_status(status));
4778 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4779 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4782 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4783 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4786 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4787 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4791 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4792 new_settings(hdev, cmd->sk);
4795 mgmt_pending_remove(cmd);
4797 hci_dev_unlock(hdev);
4800 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4801 void *data, u16 len)
4803 struct mgmt_mode *cp = data;
4804 struct mgmt_pending_cmd *cmd;
4805 struct hci_request req;
4809 BT_DBG("request for %s", hdev->name);
4811 if (!lmp_sc_capable(hdev) &&
4812 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4814 MGMT_STATUS_NOT_SUPPORTED);
4816 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4817 lmp_sc_capable(hdev) &&
4818 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4819 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4820 MGMT_STATUS_REJECTED);
4822 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4824 MGMT_STATUS_INVALID_PARAMS);
4828 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4829 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4833 changed = !hci_dev_test_and_set_flag(hdev,
4835 if (cp->val == 0x02)
4836 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4838 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4840 changed = hci_dev_test_and_clear_flag(hdev,
4842 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4845 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4850 err = new_settings(hdev, sk);
4855 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4863 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4864 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4865 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4869 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4875 hci_req_init(&req, hdev);
4876 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4877 err = hci_req_run(&req, sc_enable_complete);
4879 mgmt_pending_remove(cmd);
4884 hci_dev_unlock(hdev);
4888 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4889 void *data, u16 len)
4891 struct mgmt_mode *cp = data;
4892 bool changed, use_changed;
4895 BT_DBG("request for %s", hdev->name);
4897 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4898 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4899 MGMT_STATUS_INVALID_PARAMS);
4904 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4906 changed = hci_dev_test_and_clear_flag(hdev,
4907 HCI_KEEP_DEBUG_KEYS);
4909 if (cp->val == 0x02)
4910 use_changed = !hci_dev_test_and_set_flag(hdev,
4911 HCI_USE_DEBUG_KEYS);
4913 use_changed = hci_dev_test_and_clear_flag(hdev,
4914 HCI_USE_DEBUG_KEYS);
4916 if (hdev_is_powered(hdev) && use_changed &&
4917 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4918 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4919 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4920 sizeof(mode), &mode);
4923 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4928 err = new_settings(hdev, sk);
4931 hci_dev_unlock(hdev);
4935 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4938 struct mgmt_cp_set_privacy *cp = cp_data;
4942 BT_DBG("request for %s", hdev->name);
4944 if (!lmp_le_capable(hdev))
4945 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4946 MGMT_STATUS_NOT_SUPPORTED);
4948 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4949 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4950 MGMT_STATUS_INVALID_PARAMS);
4952 if (hdev_is_powered(hdev))
4953 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4954 MGMT_STATUS_REJECTED);
4958 /* If user space supports this command it is also expected to
4959 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4961 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4964 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4965 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4966 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4967 hci_adv_instances_set_rpa_expired(hdev, true);
4968 if (cp->privacy == 0x02)
4969 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4971 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4973 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4974 memset(hdev->irk, 0, sizeof(hdev->irk));
4975 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4976 hci_adv_instances_set_rpa_expired(hdev, false);
4977 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4980 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4985 err = new_settings(hdev, sk);
4988 hci_dev_unlock(hdev);
4992 static bool irk_is_valid(struct mgmt_irk_info *irk)
4994 switch (irk->addr.type) {
4995 case BDADDR_LE_PUBLIC:
4998 case BDADDR_LE_RANDOM:
4999 /* Two most significant bits shall be set */
5000 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5008 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5011 struct mgmt_cp_load_irks *cp = cp_data;
5012 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5013 sizeof(struct mgmt_irk_info));
5014 u16 irk_count, expected_len;
5017 BT_DBG("request for %s", hdev->name);
5019 if (!lmp_le_capable(hdev))
5020 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5021 MGMT_STATUS_NOT_SUPPORTED);
5023 irk_count = __le16_to_cpu(cp->irk_count);
5024 if (irk_count > max_irk_count) {
5025 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5027 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5028 MGMT_STATUS_INVALID_PARAMS);
5031 expected_len = struct_size(cp, irks, irk_count);
5032 if (expected_len != len) {
5033 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5035 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5036 MGMT_STATUS_INVALID_PARAMS);
5039 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5041 for (i = 0; i < irk_count; i++) {
5042 struct mgmt_irk_info *key = &cp->irks[i];
5044 if (!irk_is_valid(key))
5045 return mgmt_cmd_status(sk, hdev->id,
5047 MGMT_STATUS_INVALID_PARAMS);
5052 hci_smp_irks_clear(hdev);
5054 for (i = 0; i < irk_count; i++) {
5055 struct mgmt_irk_info *irk = &cp->irks[i];
5057 hci_add_irk(hdev, &irk->addr.bdaddr,
5058 le_addr_type(irk->addr.type), irk->val,
5062 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5064 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5066 hci_dev_unlock(hdev);
5071 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5073 if (key->master != 0x00 && key->master != 0x01)
5076 switch (key->addr.type) {
5077 case BDADDR_LE_PUBLIC:
5080 case BDADDR_LE_RANDOM:
5081 /* Two most significant bits shall be set */
5082 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5090 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5091 void *cp_data, u16 len)
5093 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5094 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5095 sizeof(struct mgmt_ltk_info));
5096 u16 key_count, expected_len;
5099 BT_DBG("request for %s", hdev->name);
5101 if (!lmp_le_capable(hdev))
5102 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5103 MGMT_STATUS_NOT_SUPPORTED);
5105 key_count = __le16_to_cpu(cp->key_count);
5106 if (key_count > max_key_count) {
5107 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5109 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5110 MGMT_STATUS_INVALID_PARAMS);
5113 expected_len = struct_size(cp, keys, key_count);
5114 if (expected_len != len) {
5115 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5117 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5118 MGMT_STATUS_INVALID_PARAMS);
5121 BT_DBG("%s key_count %u", hdev->name, key_count);
5123 for (i = 0; i < key_count; i++) {
5124 struct mgmt_ltk_info *key = &cp->keys[i];
5126 if (!ltk_is_valid(key))
5127 return mgmt_cmd_status(sk, hdev->id,
5128 MGMT_OP_LOAD_LONG_TERM_KEYS,
5129 MGMT_STATUS_INVALID_PARAMS);
5134 hci_smp_ltks_clear(hdev);
5136 for (i = 0; i < key_count; i++) {
5137 struct mgmt_ltk_info *key = &cp->keys[i];
5138 u8 type, authenticated;
5140 switch (key->type) {
5141 case MGMT_LTK_UNAUTHENTICATED:
5142 authenticated = 0x00;
5143 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5145 case MGMT_LTK_AUTHENTICATED:
5146 authenticated = 0x01;
5147 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5149 case MGMT_LTK_P256_UNAUTH:
5150 authenticated = 0x00;
5151 type = SMP_LTK_P256;
5153 case MGMT_LTK_P256_AUTH:
5154 authenticated = 0x01;
5155 type = SMP_LTK_P256;
5157 case MGMT_LTK_P256_DEBUG:
5158 authenticated = 0x00;
5159 type = SMP_LTK_P256_DEBUG;
5165 hci_add_ltk(hdev, &key->addr.bdaddr,
5166 le_addr_type(key->addr.type), type, authenticated,
5167 key->val, key->enc_size, key->ediv, key->rand);
5170 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5173 hci_dev_unlock(hdev);
5178 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5180 struct hci_conn *conn = cmd->user_data;
5181 struct mgmt_rp_get_conn_info rp;
5184 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5186 if (status == MGMT_STATUS_SUCCESS) {
5187 rp.rssi = conn->rssi;
5188 rp.tx_power = conn->tx_power;
5189 rp.max_tx_power = conn->max_tx_power;
5191 rp.rssi = HCI_RSSI_INVALID;
5192 rp.tx_power = HCI_TX_POWER_INVALID;
5193 rp.max_tx_power = HCI_TX_POWER_INVALID;
5196 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5197 status, &rp, sizeof(rp));
5199 hci_conn_drop(conn);
5205 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5208 struct hci_cp_read_rssi *cp;
5209 struct mgmt_pending_cmd *cmd;
5210 struct hci_conn *conn;
5214 BT_DBG("status 0x%02x", hci_status);
5218 /* Commands sent in request are either Read RSSI or Read Transmit Power
5219 * Level so we check which one was last sent to retrieve connection
5220 * handle. Both commands have handle as first parameter so it's safe to
5221 * cast data on the same command struct.
5223 * First command sent is always Read RSSI and we fail only if it fails.
5224 * In other case we simply override error to indicate success as we
5225 * already remembered if TX power value is actually valid.
5227 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5229 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5230 status = MGMT_STATUS_SUCCESS;
5232 status = mgmt_status(hci_status);
5236 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
5240 handle = __le16_to_cpu(cp->handle);
5241 conn = hci_conn_hash_lookup_handle(hdev, handle);
5243 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
5248 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5252 cmd->cmd_complete(cmd, status);
5253 mgmt_pending_remove(cmd);
5256 hci_dev_unlock(hdev);
5259 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5262 struct mgmt_cp_get_conn_info *cp = data;
5263 struct mgmt_rp_get_conn_info rp;
5264 struct hci_conn *conn;
5265 unsigned long conn_info_age;
5268 BT_DBG("%s", hdev->name);
5270 memset(&rp, 0, sizeof(rp));
5271 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5272 rp.addr.type = cp->addr.type;
5274 if (!bdaddr_type_is_valid(cp->addr.type))
5275 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5276 MGMT_STATUS_INVALID_PARAMS,
5281 if (!hdev_is_powered(hdev)) {
5282 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5283 MGMT_STATUS_NOT_POWERED, &rp,
5288 if (cp->addr.type == BDADDR_BREDR)
5289 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5292 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5294 if (!conn || conn->state != BT_CONNECTED) {
5295 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5296 MGMT_STATUS_NOT_CONNECTED, &rp,
5301 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5302 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5303 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5307 /* To avoid client trying to guess when to poll again for information we
5308 * calculate conn info age as random value between min/max set in hdev.
5310 conn_info_age = hdev->conn_info_min_age +
5311 prandom_u32_max(hdev->conn_info_max_age -
5312 hdev->conn_info_min_age);
5314 /* Query controller to refresh cached values if they are too old or were
5317 if (time_after(jiffies, conn->conn_info_timestamp +
5318 msecs_to_jiffies(conn_info_age)) ||
5319 !conn->conn_info_timestamp) {
5320 struct hci_request req;
5321 struct hci_cp_read_tx_power req_txp_cp;
5322 struct hci_cp_read_rssi req_rssi_cp;
5323 struct mgmt_pending_cmd *cmd;
5325 hci_req_init(&req, hdev);
5326 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5327 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5330 /* For LE links TX power does not change thus we don't need to
5331 * query for it once value is known.
5333 if (!bdaddr_type_is_le(cp->addr.type) ||
5334 conn->tx_power == HCI_TX_POWER_INVALID) {
5335 req_txp_cp.handle = cpu_to_le16(conn->handle);
5336 req_txp_cp.type = 0x00;
5337 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5338 sizeof(req_txp_cp), &req_txp_cp);
5341 /* Max TX power needs to be read only once per connection */
5342 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5343 req_txp_cp.handle = cpu_to_le16(conn->handle);
5344 req_txp_cp.type = 0x01;
5345 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5346 sizeof(req_txp_cp), &req_txp_cp);
5349 err = hci_req_run(&req, conn_info_refresh_complete);
5353 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5360 hci_conn_hold(conn);
5361 cmd->user_data = hci_conn_get(conn);
5362 cmd->cmd_complete = conn_info_cmd_complete;
5364 conn->conn_info_timestamp = jiffies;
5366 /* Cache is valid, just reply with values cached in hci_conn */
5367 rp.rssi = conn->rssi;
5368 rp.tx_power = conn->tx_power;
5369 rp.max_tx_power = conn->max_tx_power;
5371 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5372 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5376 hci_dev_unlock(hdev);
5380 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5382 struct hci_conn *conn = cmd->user_data;
5383 struct mgmt_rp_get_clock_info rp;
5384 struct hci_dev *hdev;
5387 memset(&rp, 0, sizeof(rp));
5388 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5393 hdev = hci_dev_get(cmd->index);
5395 rp.local_clock = cpu_to_le32(hdev->clock);
5400 rp.piconet_clock = cpu_to_le32(conn->clock);
5401 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5405 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5409 hci_conn_drop(conn);
5416 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5418 struct hci_cp_read_clock *hci_cp;
5419 struct mgmt_pending_cmd *cmd;
5420 struct hci_conn *conn;
5422 BT_DBG("%s status %u", hdev->name, status);
5426 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5430 if (hci_cp->which) {
5431 u16 handle = __le16_to_cpu(hci_cp->handle);
5432 conn = hci_conn_hash_lookup_handle(hdev, handle);
5437 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5441 cmd->cmd_complete(cmd, mgmt_status(status));
5442 mgmt_pending_remove(cmd);
5445 hci_dev_unlock(hdev);
5448 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5451 struct mgmt_cp_get_clock_info *cp = data;
5452 struct mgmt_rp_get_clock_info rp;
5453 struct hci_cp_read_clock hci_cp;
5454 struct mgmt_pending_cmd *cmd;
5455 struct hci_request req;
5456 struct hci_conn *conn;
5459 BT_DBG("%s", hdev->name);
5461 memset(&rp, 0, sizeof(rp));
5462 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5463 rp.addr.type = cp->addr.type;
5465 if (cp->addr.type != BDADDR_BREDR)
5466 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5467 MGMT_STATUS_INVALID_PARAMS,
5472 if (!hdev_is_powered(hdev)) {
5473 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5474 MGMT_STATUS_NOT_POWERED, &rp,
5479 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5480 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5482 if (!conn || conn->state != BT_CONNECTED) {
5483 err = mgmt_cmd_complete(sk, hdev->id,
5484 MGMT_OP_GET_CLOCK_INFO,
5485 MGMT_STATUS_NOT_CONNECTED,
5493 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5499 cmd->cmd_complete = clock_info_cmd_complete;
5501 hci_req_init(&req, hdev);
5503 memset(&hci_cp, 0, sizeof(hci_cp));
5504 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5507 hci_conn_hold(conn);
5508 cmd->user_data = hci_conn_get(conn);
5510 hci_cp.handle = cpu_to_le16(conn->handle);
5511 hci_cp.which = 0x01; /* Piconet clock */
5512 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5515 err = hci_req_run(&req, get_clock_info_complete);
5517 mgmt_pending_remove(cmd);
5520 hci_dev_unlock(hdev);
5524 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5526 struct hci_conn *conn;
5528 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5532 if (conn->dst_type != type)
5535 if (conn->state != BT_CONNECTED)
5541 /* This function requires the caller holds hdev->lock */
5542 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5543 u8 addr_type, u8 auto_connect)
5545 struct hci_conn_params *params;
5547 params = hci_conn_params_add(hdev, addr, addr_type);
5551 if (params->auto_connect == auto_connect)
5554 list_del_init(¶ms->action);
5556 switch (auto_connect) {
5557 case HCI_AUTO_CONN_DISABLED:
5558 case HCI_AUTO_CONN_LINK_LOSS:
5559 /* If auto connect is being disabled when we're trying to
5560 * connect to device, keep connecting.
5562 if (params->explicit_connect)
5563 list_add(¶ms->action, &hdev->pend_le_conns);
5565 case HCI_AUTO_CONN_REPORT:
5566 if (params->explicit_connect)
5567 list_add(¶ms->action, &hdev->pend_le_conns);
5569 list_add(¶ms->action, &hdev->pend_le_reports);
5571 case HCI_AUTO_CONN_DIRECT:
5572 case HCI_AUTO_CONN_ALWAYS:
5573 if (!is_connected(hdev, addr, addr_type))
5574 list_add(¶ms->action, &hdev->pend_le_conns);
5578 params->auto_connect = auto_connect;
5580 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5586 static void device_added(struct sock *sk, struct hci_dev *hdev,
5587 bdaddr_t *bdaddr, u8 type, u8 action)
5589 struct mgmt_ev_device_added ev;
5591 bacpy(&ev.addr.bdaddr, bdaddr);
5592 ev.addr.type = type;
5595 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5598 static int add_device(struct sock *sk, struct hci_dev *hdev,
5599 void *data, u16 len)
5601 struct mgmt_cp_add_device *cp = data;
5602 u8 auto_conn, addr_type;
5605 BT_DBG("%s", hdev->name);
5607 if (!bdaddr_type_is_valid(cp->addr.type) ||
5608 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5609 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5610 MGMT_STATUS_INVALID_PARAMS,
5611 &cp->addr, sizeof(cp->addr));
5613 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5614 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5615 MGMT_STATUS_INVALID_PARAMS,
5616 &cp->addr, sizeof(cp->addr));
5620 if (cp->addr.type == BDADDR_BREDR) {
5621 /* Only incoming connections action is supported for now */
5622 if (cp->action != 0x01) {
5623 err = mgmt_cmd_complete(sk, hdev->id,
5625 MGMT_STATUS_INVALID_PARAMS,
5626 &cp->addr, sizeof(cp->addr));
5630 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5635 hci_req_update_scan(hdev);
5640 addr_type = le_addr_type(cp->addr.type);
5642 if (cp->action == 0x02)
5643 auto_conn = HCI_AUTO_CONN_ALWAYS;
5644 else if (cp->action == 0x01)
5645 auto_conn = HCI_AUTO_CONN_DIRECT;
5647 auto_conn = HCI_AUTO_CONN_REPORT;
5649 /* Kernel internally uses conn_params with resolvable private
5650 * address, but Add Device allows only identity addresses.
5651 * Make sure it is enforced before calling
5652 * hci_conn_params_lookup.
5654 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5655 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5656 MGMT_STATUS_INVALID_PARAMS,
5657 &cp->addr, sizeof(cp->addr));
5661 /* If the connection parameters don't exist for this device,
5662 * they will be created and configured with defaults.
5664 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5666 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5667 MGMT_STATUS_FAILED, &cp->addr,
5672 hci_update_background_scan(hdev);
5675 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5677 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5678 MGMT_STATUS_SUCCESS, &cp->addr,
5682 hci_dev_unlock(hdev);
5686 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5687 bdaddr_t *bdaddr, u8 type)
5689 struct mgmt_ev_device_removed ev;
5691 bacpy(&ev.addr.bdaddr, bdaddr);
5692 ev.addr.type = type;
5694 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5697 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5698 void *data, u16 len)
5700 struct mgmt_cp_remove_device *cp = data;
5703 BT_DBG("%s", hdev->name);
5707 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5708 struct hci_conn_params *params;
5711 if (!bdaddr_type_is_valid(cp->addr.type)) {
5712 err = mgmt_cmd_complete(sk, hdev->id,
5713 MGMT_OP_REMOVE_DEVICE,
5714 MGMT_STATUS_INVALID_PARAMS,
5715 &cp->addr, sizeof(cp->addr));
5719 if (cp->addr.type == BDADDR_BREDR) {
5720 err = hci_bdaddr_list_del(&hdev->whitelist,
5724 err = mgmt_cmd_complete(sk, hdev->id,
5725 MGMT_OP_REMOVE_DEVICE,
5726 MGMT_STATUS_INVALID_PARAMS,
5732 hci_req_update_scan(hdev);
5734 device_removed(sk, hdev, &cp->addr.bdaddr,
5739 addr_type = le_addr_type(cp->addr.type);
5741 /* Kernel internally uses conn_params with resolvable private
5742 * address, but Remove Device allows only identity addresses.
5743 * Make sure it is enforced before calling
5744 * hci_conn_params_lookup.
5746 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5747 err = mgmt_cmd_complete(sk, hdev->id,
5748 MGMT_OP_REMOVE_DEVICE,
5749 MGMT_STATUS_INVALID_PARAMS,
5750 &cp->addr, sizeof(cp->addr));
5754 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5757 err = mgmt_cmd_complete(sk, hdev->id,
5758 MGMT_OP_REMOVE_DEVICE,
5759 MGMT_STATUS_INVALID_PARAMS,
5760 &cp->addr, sizeof(cp->addr));
5764 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5765 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5766 err = mgmt_cmd_complete(sk, hdev->id,
5767 MGMT_OP_REMOVE_DEVICE,
5768 MGMT_STATUS_INVALID_PARAMS,
5769 &cp->addr, sizeof(cp->addr));
5773 list_del(¶ms->action);
5774 list_del(¶ms->list);
5776 hci_update_background_scan(hdev);
5778 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5780 struct hci_conn_params *p, *tmp;
5781 struct bdaddr_list *b, *btmp;
5783 if (cp->addr.type) {
5784 err = mgmt_cmd_complete(sk, hdev->id,
5785 MGMT_OP_REMOVE_DEVICE,
5786 MGMT_STATUS_INVALID_PARAMS,
5787 &cp->addr, sizeof(cp->addr));
5791 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5792 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5797 hci_req_update_scan(hdev);
5799 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5800 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5802 device_removed(sk, hdev, &p->addr, p->addr_type);
5803 if (p->explicit_connect) {
5804 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5807 list_del(&p->action);
5812 BT_DBG("All LE connection parameters were removed");
5814 hci_update_background_scan(hdev);
5818 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5819 MGMT_STATUS_SUCCESS, &cp->addr,
5822 hci_dev_unlock(hdev);
5826 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5829 struct mgmt_cp_load_conn_param *cp = data;
5830 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5831 sizeof(struct mgmt_conn_param));
5832 u16 param_count, expected_len;
5835 if (!lmp_le_capable(hdev))
5836 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5837 MGMT_STATUS_NOT_SUPPORTED);
5839 param_count = __le16_to_cpu(cp->param_count);
5840 if (param_count > max_param_count) {
5841 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
5843 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5844 MGMT_STATUS_INVALID_PARAMS);
5847 expected_len = struct_size(cp, params, param_count);
5848 if (expected_len != len) {
5849 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
5851 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5852 MGMT_STATUS_INVALID_PARAMS);
5855 BT_DBG("%s param_count %u", hdev->name, param_count);
5859 hci_conn_params_clear_disabled(hdev);
5861 for (i = 0; i < param_count; i++) {
5862 struct mgmt_conn_param *param = &cp->params[i];
5863 struct hci_conn_params *hci_param;
5864 u16 min, max, latency, timeout;
5867 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5870 if (param->addr.type == BDADDR_LE_PUBLIC) {
5871 addr_type = ADDR_LE_DEV_PUBLIC;
5872 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5873 addr_type = ADDR_LE_DEV_RANDOM;
5875 bt_dev_err(hdev, "ignoring invalid connection parameters");
5879 min = le16_to_cpu(param->min_interval);
5880 max = le16_to_cpu(param->max_interval);
5881 latency = le16_to_cpu(param->latency);
5882 timeout = le16_to_cpu(param->timeout);
5884 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5885 min, max, latency, timeout);
5887 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5888 bt_dev_err(hdev, "ignoring invalid connection parameters");
5892 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5895 bt_dev_err(hdev, "failed to add connection parameters");
5899 hci_param->conn_min_interval = min;
5900 hci_param->conn_max_interval = max;
5901 hci_param->conn_latency = latency;
5902 hci_param->supervision_timeout = timeout;
5905 hci_dev_unlock(hdev);
5907 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5911 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5912 void *data, u16 len)
5914 struct mgmt_cp_set_external_config *cp = data;
5918 BT_DBG("%s", hdev->name);
5920 if (hdev_is_powered(hdev))
5921 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5922 MGMT_STATUS_REJECTED);
5924 if (cp->config != 0x00 && cp->config != 0x01)
5925 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5926 MGMT_STATUS_INVALID_PARAMS);
5928 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5929 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5930 MGMT_STATUS_NOT_SUPPORTED);
5935 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
5937 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
5939 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5946 err = new_options(hdev, sk);
5948 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
5949 mgmt_index_removed(hdev);
5951 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
5952 hci_dev_set_flag(hdev, HCI_CONFIG);
5953 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5955 queue_work(hdev->req_workqueue, &hdev->power_on);
5957 set_bit(HCI_RAW, &hdev->flags);
5958 mgmt_index_added(hdev);
5963 hci_dev_unlock(hdev);
5967 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5968 void *data, u16 len)
5970 struct mgmt_cp_set_public_address *cp = data;
5974 BT_DBG("%s", hdev->name);
5976 if (hdev_is_powered(hdev))
5977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5978 MGMT_STATUS_REJECTED);
5980 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5982 MGMT_STATUS_INVALID_PARAMS);
5984 if (!hdev->set_bdaddr)
5985 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5986 MGMT_STATUS_NOT_SUPPORTED);
5990 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5991 bacpy(&hdev->public_addr, &cp->bdaddr);
5993 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6000 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6001 err = new_options(hdev, sk);
6003 if (is_configured(hdev)) {
6004 mgmt_index_removed(hdev);
6006 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6008 hci_dev_set_flag(hdev, HCI_CONFIG);
6009 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6011 queue_work(hdev->req_workqueue, &hdev->power_on);
6015 hci_dev_unlock(hdev);
6019 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6020 u16 opcode, struct sk_buff *skb)
6022 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6023 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6024 u8 *h192, *r192, *h256, *r256;
6025 struct mgmt_pending_cmd *cmd;
6029 BT_DBG("%s status %u", hdev->name, status);
6031 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6035 mgmt_cp = cmd->param;
6038 status = mgmt_status(status);
6045 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6046 struct hci_rp_read_local_oob_data *rp;
6048 if (skb->len != sizeof(*rp)) {
6049 status = MGMT_STATUS_FAILED;
6052 status = MGMT_STATUS_SUCCESS;
6053 rp = (void *)skb->data;
6055 eir_len = 5 + 18 + 18;
6062 struct hci_rp_read_local_oob_ext_data *rp;
6064 if (skb->len != sizeof(*rp)) {
6065 status = MGMT_STATUS_FAILED;
6068 status = MGMT_STATUS_SUCCESS;
6069 rp = (void *)skb->data;
6071 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6072 eir_len = 5 + 18 + 18;
6076 eir_len = 5 + 18 + 18 + 18 + 18;
6086 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6093 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6094 hdev->dev_class, 3);
6097 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6098 EIR_SSP_HASH_C192, h192, 16);
6099 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6100 EIR_SSP_RAND_R192, r192, 16);
6104 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6105 EIR_SSP_HASH_C256, h256, 16);
6106 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6107 EIR_SSP_RAND_R256, r256, 16);
6111 mgmt_rp->type = mgmt_cp->type;
6112 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6114 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6115 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6116 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6117 if (err < 0 || status)
6120 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6122 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6123 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6124 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6127 mgmt_pending_remove(cmd);
6130 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6131 struct mgmt_cp_read_local_oob_ext_data *cp)
6133 struct mgmt_pending_cmd *cmd;
6134 struct hci_request req;
6137 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6142 hci_req_init(&req, hdev);
6144 if (bredr_sc_enabled(hdev))
6145 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6147 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6149 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6151 mgmt_pending_remove(cmd);
6158 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6159 void *data, u16 data_len)
6161 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6162 struct mgmt_rp_read_local_oob_ext_data *rp;
6165 u8 status, flags, role, addr[7], hash[16], rand[16];
6168 BT_DBG("%s", hdev->name);
6170 if (hdev_is_powered(hdev)) {
6172 case BIT(BDADDR_BREDR):
6173 status = mgmt_bredr_support(hdev);
6179 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6180 status = mgmt_le_support(hdev);
6184 eir_len = 9 + 3 + 18 + 18 + 3;
6187 status = MGMT_STATUS_INVALID_PARAMS;
6192 status = MGMT_STATUS_NOT_POWERED;
6196 rp_len = sizeof(*rp) + eir_len;
6197 rp = kmalloc(rp_len, GFP_ATOMIC);
6208 case BIT(BDADDR_BREDR):
6209 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6210 err = read_local_ssp_oob_req(hdev, sk, cp);
6211 hci_dev_unlock(hdev);
6215 status = MGMT_STATUS_FAILED;
6218 eir_len = eir_append_data(rp->eir, eir_len,
6220 hdev->dev_class, 3);
6223 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6224 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6225 smp_generate_oob(hdev, hash, rand) < 0) {
6226 hci_dev_unlock(hdev);
6227 status = MGMT_STATUS_FAILED;
6231 /* This should return the active RPA, but since the RPA
6232 * is only programmed on demand, it is really hard to fill
6233 * this in at the moment. For now disallow retrieving
6234 * local out-of-band data when privacy is in use.
6236 * Returning the identity address will not help here since
6237 * pairing happens before the identity resolving key is
6238 * known and thus the connection establishment happens
6239 * based on the RPA and not the identity address.
6241 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6242 hci_dev_unlock(hdev);
6243 status = MGMT_STATUS_REJECTED;
6247 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6248 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6249 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6250 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6251 memcpy(addr, &hdev->static_addr, 6);
6254 memcpy(addr, &hdev->bdaddr, 6);
6258 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6259 addr, sizeof(addr));
6261 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6266 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6267 &role, sizeof(role));
6269 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6270 eir_len = eir_append_data(rp->eir, eir_len,
6272 hash, sizeof(hash));
6274 eir_len = eir_append_data(rp->eir, eir_len,
6276 rand, sizeof(rand));
6279 flags = mgmt_get_adv_discov_flags(hdev);
6281 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6282 flags |= LE_AD_NO_BREDR;
6284 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6285 &flags, sizeof(flags));
6289 hci_dev_unlock(hdev);
6291 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6293 status = MGMT_STATUS_SUCCESS;
6296 rp->type = cp->type;
6297 rp->eir_len = cpu_to_le16(eir_len);
6299 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6300 status, rp, sizeof(*rp) + eir_len);
6301 if (err < 0 || status)
6304 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6305 rp, sizeof(*rp) + eir_len,
6306 HCI_MGMT_OOB_DATA_EVENTS, sk);
6314 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6318 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6319 flags |= MGMT_ADV_FLAG_DISCOV;
6320 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6321 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6322 flags |= MGMT_ADV_FLAG_APPEARANCE;
6323 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
6325 /* In extended adv TX_POWER returned from Set Adv Param
6326 * will be always valid.
6328 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
6329 ext_adv_capable(hdev))
6330 flags |= MGMT_ADV_FLAG_TX_POWER;
6332 if (ext_adv_capable(hdev)) {
6333 flags |= MGMT_ADV_FLAG_SEC_1M;
6335 if (hdev->le_features[1] & HCI_LE_PHY_2M)
6336 flags |= MGMT_ADV_FLAG_SEC_2M;
6338 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
6339 flags |= MGMT_ADV_FLAG_SEC_CODED;
6345 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6346 void *data, u16 data_len)
6348 struct mgmt_rp_read_adv_features *rp;
6351 struct adv_info *adv_instance;
6352 u32 supported_flags;
6355 BT_DBG("%s", hdev->name);
6357 if (!lmp_le_capable(hdev))
6358 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6359 MGMT_STATUS_REJECTED);
6363 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
6364 rp = kmalloc(rp_len, GFP_ATOMIC);
6366 hci_dev_unlock(hdev);
6370 supported_flags = get_supported_adv_flags(hdev);
6372 rp->supported_flags = cpu_to_le32(supported_flags);
6373 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6374 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6375 rp->max_instances = HCI_MAX_ADV_INSTANCES;
6376 rp->num_instances = hdev->adv_instance_cnt;
6378 instance = rp->instance;
6379 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6380 *instance = adv_instance->instance;
6384 hci_dev_unlock(hdev);
6386 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6387 MGMT_STATUS_SUCCESS, rp, rp_len);
6394 static u8 calculate_name_len(struct hci_dev *hdev)
6396 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6398 return append_local_name(hdev, buf, 0);
6401 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6404 u8 max_len = HCI_MAX_AD_LENGTH;
6407 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6408 MGMT_ADV_FLAG_LIMITED_DISCOV |
6409 MGMT_ADV_FLAG_MANAGED_FLAGS))
6412 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6415 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6416 max_len -= calculate_name_len(hdev);
6418 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6425 static bool flags_managed(u32 adv_flags)
6427 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6428 MGMT_ADV_FLAG_LIMITED_DISCOV |
6429 MGMT_ADV_FLAG_MANAGED_FLAGS);
6432 static bool tx_power_managed(u32 adv_flags)
6434 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6437 static bool name_managed(u32 adv_flags)
6439 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6442 static bool appearance_managed(u32 adv_flags)
6444 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6447 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6448 u8 len, bool is_adv_data)
6453 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6458 /* Make sure that the data is correctly formatted. */
6459 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6462 if (data[i + 1] == EIR_FLAGS &&
6463 (!is_adv_data || flags_managed(adv_flags)))
6466 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6469 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6472 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6475 if (data[i + 1] == EIR_APPEARANCE &&
6476 appearance_managed(adv_flags))
6479 /* If the current field length would exceed the total data
6480 * length, then it's invalid.
6482 if (i + cur_len >= len)
6489 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6492 struct mgmt_pending_cmd *cmd;
6493 struct mgmt_cp_add_advertising *cp;
6494 struct mgmt_rp_add_advertising rp;
6495 struct adv_info *adv_instance, *n;
6498 BT_DBG("status %d", status);
6502 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6504 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6505 if (!adv_instance->pending)
6509 adv_instance->pending = false;
6513 instance = adv_instance->instance;
6515 if (hdev->cur_adv_instance == instance)
6516 cancel_adv_timeout(hdev);
6518 hci_remove_adv_instance(hdev, instance);
6519 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6526 rp.instance = cp->instance;
6529 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6530 mgmt_status(status));
6532 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6533 mgmt_status(status), &rp, sizeof(rp));
6535 mgmt_pending_remove(cmd);
6538 hci_dev_unlock(hdev);
6541 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6542 void *data, u16 data_len)
6544 struct mgmt_cp_add_advertising *cp = data;
6545 struct mgmt_rp_add_advertising rp;
6547 u32 supported_flags, phy_flags;
6549 u16 timeout, duration;
6550 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6551 u8 schedule_instance = 0;
6552 struct adv_info *next_instance;
6554 struct mgmt_pending_cmd *cmd;
6555 struct hci_request req;
6557 BT_DBG("%s", hdev->name);
6559 status = mgmt_le_support(hdev);
6561 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6564 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6566 MGMT_STATUS_INVALID_PARAMS);
6568 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6569 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6570 MGMT_STATUS_INVALID_PARAMS);
6572 flags = __le32_to_cpu(cp->flags);
6573 timeout = __le16_to_cpu(cp->timeout);
6574 duration = __le16_to_cpu(cp->duration);
6576 /* The current implementation only supports a subset of the specified
6577 * flags. Also need to check mutual exclusiveness of sec flags.
6579 supported_flags = get_supported_adv_flags(hdev);
6580 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
6581 if (flags & ~supported_flags ||
6582 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
6583 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6584 MGMT_STATUS_INVALID_PARAMS);
6588 if (timeout && !hdev_is_powered(hdev)) {
6589 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6590 MGMT_STATUS_REJECTED);
6594 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6595 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6596 pending_find(MGMT_OP_SET_LE, hdev)) {
6597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6602 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6603 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6604 cp->scan_rsp_len, false)) {
6605 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6606 MGMT_STATUS_INVALID_PARAMS);
6610 err = hci_add_adv_instance(hdev, cp->instance, flags,
6611 cp->adv_data_len, cp->data,
6613 cp->data + cp->adv_data_len,
6616 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6617 MGMT_STATUS_FAILED);
6621 /* Only trigger an advertising added event if a new instance was
6624 if (hdev->adv_instance_cnt > prev_instance_cnt)
6625 mgmt_advertising_added(sk, hdev, cp->instance);
6627 if (hdev->cur_adv_instance == cp->instance) {
6628 /* If the currently advertised instance is being changed then
6629 * cancel the current advertising and schedule the next
6630 * instance. If there is only one instance then the overridden
6631 * advertising data will be visible right away.
6633 cancel_adv_timeout(hdev);
6635 next_instance = hci_get_next_instance(hdev, cp->instance);
6637 schedule_instance = next_instance->instance;
6638 } else if (!hdev->adv_instance_timeout) {
6639 /* Immediately advertise the new instance if no other
6640 * instance is currently being advertised.
6642 schedule_instance = cp->instance;
6645 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6646 * there is no instance to be advertised then we have no HCI
6647 * communication to make. Simply return.
6649 if (!hdev_is_powered(hdev) ||
6650 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6651 !schedule_instance) {
6652 rp.instance = cp->instance;
6653 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6654 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6658 /* We're good to go, update advertising data, parameters, and start
6661 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6668 hci_req_init(&req, hdev);
6670 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6673 err = hci_req_run(&req, add_advertising_complete);
6676 mgmt_pending_remove(cmd);
6679 hci_dev_unlock(hdev);
6684 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6687 struct mgmt_pending_cmd *cmd;
6688 struct mgmt_cp_remove_advertising *cp;
6689 struct mgmt_rp_remove_advertising rp;
6691 BT_DBG("status %d", status);
6695 /* A failure status here only means that we failed to disable
6696 * advertising. Otherwise, the advertising instance has been removed,
6697 * so report success.
6699 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6704 rp.instance = cp->instance;
6706 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6708 mgmt_pending_remove(cmd);
6711 hci_dev_unlock(hdev);
6714 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6715 void *data, u16 data_len)
6717 struct mgmt_cp_remove_advertising *cp = data;
6718 struct mgmt_rp_remove_advertising rp;
6719 struct mgmt_pending_cmd *cmd;
6720 struct hci_request req;
6723 BT_DBG("%s", hdev->name);
6727 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6728 err = mgmt_cmd_status(sk, hdev->id,
6729 MGMT_OP_REMOVE_ADVERTISING,
6730 MGMT_STATUS_INVALID_PARAMS);
6734 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6735 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6736 pending_find(MGMT_OP_SET_LE, hdev)) {
6737 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6742 if (list_empty(&hdev->adv_instances)) {
6743 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6744 MGMT_STATUS_INVALID_PARAMS);
6748 hci_req_init(&req, hdev);
6750 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6752 if (list_empty(&hdev->adv_instances))
6753 __hci_req_disable_advertising(&req);
6755 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
6756 * flag is set or the device isn't powered then we have no HCI
6757 * communication to make. Simply return.
6759 if (skb_queue_empty(&req.cmd_q) ||
6760 !hdev_is_powered(hdev) ||
6761 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6762 hci_req_purge(&req);
6763 rp.instance = cp->instance;
6764 err = mgmt_cmd_complete(sk, hdev->id,
6765 MGMT_OP_REMOVE_ADVERTISING,
6766 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6770 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6777 err = hci_req_run(&req, remove_advertising_complete);
6779 mgmt_pending_remove(cmd);
6782 hci_dev_unlock(hdev);
6787 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6788 void *data, u16 data_len)
6790 struct mgmt_cp_get_adv_size_info *cp = data;
6791 struct mgmt_rp_get_adv_size_info rp;
6792 u32 flags, supported_flags;
6795 BT_DBG("%s", hdev->name);
6797 if (!lmp_le_capable(hdev))
6798 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6799 MGMT_STATUS_REJECTED);
6801 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6803 MGMT_STATUS_INVALID_PARAMS);
6805 flags = __le32_to_cpu(cp->flags);
6807 /* The current implementation only supports a subset of the specified
6810 supported_flags = get_supported_adv_flags(hdev);
6811 if (flags & ~supported_flags)
6812 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6813 MGMT_STATUS_INVALID_PARAMS);
6815 rp.instance = cp->instance;
6816 rp.flags = cp->flags;
6817 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
6818 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
6820 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6821 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6826 static const struct hci_mgmt_handler mgmt_handlers[] = {
6827 { NULL }, /* 0x0000 (no command) */
6828 { read_version, MGMT_READ_VERSION_SIZE,
6830 HCI_MGMT_UNTRUSTED },
6831 { read_commands, MGMT_READ_COMMANDS_SIZE,
6833 HCI_MGMT_UNTRUSTED },
6834 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6836 HCI_MGMT_UNTRUSTED },
6837 { read_controller_info, MGMT_READ_INFO_SIZE,
6838 HCI_MGMT_UNTRUSTED },
6839 { set_powered, MGMT_SETTING_SIZE },
6840 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6841 { set_connectable, MGMT_SETTING_SIZE },
6842 { set_fast_connectable, MGMT_SETTING_SIZE },
6843 { set_bondable, MGMT_SETTING_SIZE },
6844 { set_link_security, MGMT_SETTING_SIZE },
6845 { set_ssp, MGMT_SETTING_SIZE },
6846 { set_hs, MGMT_SETTING_SIZE },
6847 { set_le, MGMT_SETTING_SIZE },
6848 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6849 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6850 { add_uuid, MGMT_ADD_UUID_SIZE },
6851 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6852 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6854 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6856 { disconnect, MGMT_DISCONNECT_SIZE },
6857 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6858 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6859 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6860 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6861 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6862 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6863 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6864 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6865 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6866 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6867 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6868 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6869 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6871 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6872 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6873 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6874 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6875 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6876 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6877 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6878 { set_advertising, MGMT_SETTING_SIZE },
6879 { set_bredr, MGMT_SETTING_SIZE },
6880 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6881 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6882 { set_secure_conn, MGMT_SETTING_SIZE },
6883 { set_debug_keys, MGMT_SETTING_SIZE },
6884 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6885 { load_irks, MGMT_LOAD_IRKS_SIZE,
6887 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6888 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6889 { add_device, MGMT_ADD_DEVICE_SIZE },
6890 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6891 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6893 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6895 HCI_MGMT_UNTRUSTED },
6896 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6897 HCI_MGMT_UNCONFIGURED |
6898 HCI_MGMT_UNTRUSTED },
6899 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6900 HCI_MGMT_UNCONFIGURED },
6901 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6902 HCI_MGMT_UNCONFIGURED },
6903 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6905 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6906 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6908 HCI_MGMT_UNTRUSTED },
6909 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6910 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6912 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6913 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
6914 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6915 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
6916 HCI_MGMT_UNTRUSTED },
6917 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
6918 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
6919 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
6923 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
6924 { NULL }, /* 0x0000 (no command) */
6928 void mgmt_index_added(struct hci_dev *hdev)
6930 struct mgmt_ev_ext_index ev;
6932 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6935 switch (hdev->dev_type) {
6937 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6938 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6939 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6942 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6943 HCI_MGMT_INDEX_EVENTS);
6956 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6957 HCI_MGMT_EXT_INDEX_EVENTS);
6960 void mgmt_index_removed(struct hci_dev *hdev)
6962 struct mgmt_ev_ext_index ev;
6963 u8 status = MGMT_STATUS_INVALID_INDEX;
6965 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6968 switch (hdev->dev_type) {
6970 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6972 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6973 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6974 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6977 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6978 HCI_MGMT_INDEX_EVENTS);
6991 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6992 HCI_MGMT_EXT_INDEX_EVENTS);
6995 /* This function requires the caller holds hdev->lock */
6996 static void restart_le_actions(struct hci_dev *hdev)
6998 struct hci_conn_params *p;
7000 list_for_each_entry(p, &hdev->le_conn_params, list) {
7001 /* Needed for AUTO_OFF case where might not "really"
7002 * have been powered off.
7004 list_del_init(&p->action);
7006 switch (p->auto_connect) {
7007 case HCI_AUTO_CONN_DIRECT:
7008 case HCI_AUTO_CONN_ALWAYS:
7009 list_add(&p->action, &hdev->pend_le_conns);
7011 case HCI_AUTO_CONN_REPORT:
7012 list_add(&p->action, &hdev->pend_le_reports);
7020 void mgmt_power_on(struct hci_dev *hdev, int err)
7022 struct cmd_lookup match = { NULL, hdev };
7024 BT_DBG("err %d", err);
7029 restart_le_actions(hdev);
7030 hci_update_background_scan(hdev);
7033 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7035 new_settings(hdev, match.sk);
7040 hci_dev_unlock(hdev);
7043 void __mgmt_power_off(struct hci_dev *hdev)
7045 struct cmd_lookup match = { NULL, hdev };
7046 u8 status, zero_cod[] = { 0, 0, 0 };
7048 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7050 /* If the power off is because of hdev unregistration let
7051 * use the appropriate INVALID_INDEX status. Otherwise use
7052 * NOT_POWERED. We cover both scenarios here since later in
7053 * mgmt_index_removed() any hci_conn callbacks will have already
7054 * been triggered, potentially causing misleading DISCONNECTED
7057 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7058 status = MGMT_STATUS_INVALID_INDEX;
7060 status = MGMT_STATUS_NOT_POWERED;
7062 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7064 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7065 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7066 zero_cod, sizeof(zero_cod),
7067 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7068 ext_info_changed(hdev, NULL);
7071 new_settings(hdev, match.sk);
7077 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7079 struct mgmt_pending_cmd *cmd;
7082 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7086 if (err == -ERFKILL)
7087 status = MGMT_STATUS_RFKILLED;
7089 status = MGMT_STATUS_FAILED;
7091 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7093 mgmt_pending_remove(cmd);
7096 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7099 struct mgmt_ev_new_link_key ev;
7101 memset(&ev, 0, sizeof(ev));
7103 ev.store_hint = persistent;
7104 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7105 ev.key.addr.type = BDADDR_BREDR;
7106 ev.key.type = key->type;
7107 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7108 ev.key.pin_len = key->pin_len;
7110 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7113 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7115 switch (ltk->type) {
7118 if (ltk->authenticated)
7119 return MGMT_LTK_AUTHENTICATED;
7120 return MGMT_LTK_UNAUTHENTICATED;
7122 if (ltk->authenticated)
7123 return MGMT_LTK_P256_AUTH;
7124 return MGMT_LTK_P256_UNAUTH;
7125 case SMP_LTK_P256_DEBUG:
7126 return MGMT_LTK_P256_DEBUG;
7129 return MGMT_LTK_UNAUTHENTICATED;
7132 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7134 struct mgmt_ev_new_long_term_key ev;
7136 memset(&ev, 0, sizeof(ev));
7138 /* Devices using resolvable or non-resolvable random addresses
7139 * without providing an identity resolving key don't require
7140 * to store long term keys. Their addresses will change the
7143 * Only when a remote device provides an identity address
7144 * make sure the long term key is stored. If the remote
7145 * identity is known, the long term keys are internally
7146 * mapped to the identity address. So allow static random
7147 * and public addresses here.
7149 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7150 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7151 ev.store_hint = 0x00;
7153 ev.store_hint = persistent;
7155 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7156 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7157 ev.key.type = mgmt_ltk_type(key);
7158 ev.key.enc_size = key->enc_size;
7159 ev.key.ediv = key->ediv;
7160 ev.key.rand = key->rand;
7162 if (key->type == SMP_LTK)
7165 /* Make sure we copy only the significant bytes based on the
7166 * encryption key size, and set the rest of the value to zeroes.
7168 memcpy(ev.key.val, key->val, key->enc_size);
7169 memset(ev.key.val + key->enc_size, 0,
7170 sizeof(ev.key.val) - key->enc_size);
7172 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7175 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7177 struct mgmt_ev_new_irk ev;
7179 memset(&ev, 0, sizeof(ev));
7181 ev.store_hint = persistent;
7183 bacpy(&ev.rpa, &irk->rpa);
7184 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7185 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7186 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7188 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7191 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7194 struct mgmt_ev_new_csrk ev;
7196 memset(&ev, 0, sizeof(ev));
7198 /* Devices using resolvable or non-resolvable random addresses
7199 * without providing an identity resolving key don't require
7200 * to store signature resolving keys. Their addresses will change
7201 * the next time around.
7203 * Only when a remote device provides an identity address
7204 * make sure the signature resolving key is stored. So allow
7205 * static random and public addresses here.
7207 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7208 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7209 ev.store_hint = 0x00;
7211 ev.store_hint = persistent;
7213 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7214 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7215 ev.key.type = csrk->type;
7216 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7218 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7221 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7222 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7223 u16 max_interval, u16 latency, u16 timeout)
7225 struct mgmt_ev_new_conn_param ev;
7227 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7230 memset(&ev, 0, sizeof(ev));
7231 bacpy(&ev.addr.bdaddr, bdaddr);
7232 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7233 ev.store_hint = store_hint;
7234 ev.min_interval = cpu_to_le16(min_interval);
7235 ev.max_interval = cpu_to_le16(max_interval);
7236 ev.latency = cpu_to_le16(latency);
7237 ev.timeout = cpu_to_le16(timeout);
7239 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7242 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7243 u32 flags, u8 *name, u8 name_len)
7246 struct mgmt_ev_device_connected *ev = (void *) buf;
7249 bacpy(&ev->addr.bdaddr, &conn->dst);
7250 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7252 ev->flags = __cpu_to_le32(flags);
7254 /* We must ensure that the EIR Data fields are ordered and
7255 * unique. Keep it simple for now and avoid the problem by not
7256 * adding any BR/EDR data to the LE adv.
7258 if (conn->le_adv_data_len > 0) {
7259 memcpy(&ev->eir[eir_len],
7260 conn->le_adv_data, conn->le_adv_data_len);
7261 eir_len = conn->le_adv_data_len;
7264 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7267 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7268 eir_len = eir_append_data(ev->eir, eir_len,
7270 conn->dev_class, 3);
7273 ev->eir_len = cpu_to_le16(eir_len);
7275 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7276 sizeof(*ev) + eir_len, NULL);
7279 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7281 struct sock **sk = data;
7283 cmd->cmd_complete(cmd, 0);
7288 mgmt_pending_remove(cmd);
7291 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7293 struct hci_dev *hdev = data;
7294 struct mgmt_cp_unpair_device *cp = cmd->param;
7296 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7298 cmd->cmd_complete(cmd, 0);
7299 mgmt_pending_remove(cmd);
7302 bool mgmt_powering_down(struct hci_dev *hdev)
7304 struct mgmt_pending_cmd *cmd;
7305 struct mgmt_mode *cp;
7307 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7318 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7319 u8 link_type, u8 addr_type, u8 reason,
7320 bool mgmt_connected)
7322 struct mgmt_ev_device_disconnected ev;
7323 struct sock *sk = NULL;
7325 /* The connection is still in hci_conn_hash so test for 1
7326 * instead of 0 to know if this is the last one.
7328 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7329 cancel_delayed_work(&hdev->power_off);
7330 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7333 if (!mgmt_connected)
7336 if (link_type != ACL_LINK && link_type != LE_LINK)
7339 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7341 bacpy(&ev.addr.bdaddr, bdaddr);
7342 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7345 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7350 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7354 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7355 u8 link_type, u8 addr_type, u8 status)
7357 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7358 struct mgmt_cp_disconnect *cp;
7359 struct mgmt_pending_cmd *cmd;
7361 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7364 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7370 if (bacmp(bdaddr, &cp->addr.bdaddr))
7373 if (cp->addr.type != bdaddr_type)
7376 cmd->cmd_complete(cmd, mgmt_status(status));
7377 mgmt_pending_remove(cmd);
7380 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7381 u8 addr_type, u8 status)
7383 struct mgmt_ev_connect_failed ev;
7385 /* The connection is still in hci_conn_hash so test for 1
7386 * instead of 0 to know if this is the last one.
7388 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7389 cancel_delayed_work(&hdev->power_off);
7390 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7393 bacpy(&ev.addr.bdaddr, bdaddr);
7394 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7395 ev.status = mgmt_status(status);
7397 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7400 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7402 struct mgmt_ev_pin_code_request ev;
7404 bacpy(&ev.addr.bdaddr, bdaddr);
7405 ev.addr.type = BDADDR_BREDR;
7408 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7411 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7414 struct mgmt_pending_cmd *cmd;
7416 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7420 cmd->cmd_complete(cmd, mgmt_status(status));
7421 mgmt_pending_remove(cmd);
7424 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7427 struct mgmt_pending_cmd *cmd;
7429 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7433 cmd->cmd_complete(cmd, mgmt_status(status));
7434 mgmt_pending_remove(cmd);
7437 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7438 u8 link_type, u8 addr_type, u32 value,
7441 struct mgmt_ev_user_confirm_request ev;
7443 BT_DBG("%s", hdev->name);
7445 bacpy(&ev.addr.bdaddr, bdaddr);
7446 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7447 ev.confirm_hint = confirm_hint;
7448 ev.value = cpu_to_le32(value);
7450 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7454 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7455 u8 link_type, u8 addr_type)
7457 struct mgmt_ev_user_passkey_request ev;
7459 BT_DBG("%s", hdev->name);
7461 bacpy(&ev.addr.bdaddr, bdaddr);
7462 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7464 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7468 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7469 u8 link_type, u8 addr_type, u8 status,
7472 struct mgmt_pending_cmd *cmd;
7474 cmd = pending_find(opcode, hdev);
7478 cmd->cmd_complete(cmd, mgmt_status(status));
7479 mgmt_pending_remove(cmd);
7484 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7485 u8 link_type, u8 addr_type, u8 status)
7487 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7488 status, MGMT_OP_USER_CONFIRM_REPLY);
7491 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7492 u8 link_type, u8 addr_type, u8 status)
7494 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7496 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7499 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7500 u8 link_type, u8 addr_type, u8 status)
7502 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7503 status, MGMT_OP_USER_PASSKEY_REPLY);
7506 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7507 u8 link_type, u8 addr_type, u8 status)
7509 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7511 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7514 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7515 u8 link_type, u8 addr_type, u32 passkey,
7518 struct mgmt_ev_passkey_notify ev;
7520 BT_DBG("%s", hdev->name);
7522 bacpy(&ev.addr.bdaddr, bdaddr);
7523 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7524 ev.passkey = __cpu_to_le32(passkey);
7525 ev.entered = entered;
7527 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7530 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7532 struct mgmt_ev_auth_failed ev;
7533 struct mgmt_pending_cmd *cmd;
7534 u8 status = mgmt_status(hci_status);
7536 bacpy(&ev.addr.bdaddr, &conn->dst);
7537 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7540 cmd = find_pairing(conn);
7542 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7543 cmd ? cmd->sk : NULL);
7546 cmd->cmd_complete(cmd, status);
7547 mgmt_pending_remove(cmd);
7551 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7553 struct cmd_lookup match = { NULL, hdev };
7557 u8 mgmt_err = mgmt_status(status);
7558 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7559 cmd_status_rsp, &mgmt_err);
7563 if (test_bit(HCI_AUTH, &hdev->flags))
7564 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7566 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7568 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7572 new_settings(hdev, match.sk);
7578 static void clear_eir(struct hci_request *req)
7580 struct hci_dev *hdev = req->hdev;
7581 struct hci_cp_write_eir cp;
7583 if (!lmp_ext_inq_capable(hdev))
7586 memset(hdev->eir, 0, sizeof(hdev->eir));
7588 memset(&cp, 0, sizeof(cp));
7590 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7593 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7595 struct cmd_lookup match = { NULL, hdev };
7596 struct hci_request req;
7597 bool changed = false;
7600 u8 mgmt_err = mgmt_status(status);
7602 if (enable && hci_dev_test_and_clear_flag(hdev,
7604 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7605 new_settings(hdev, NULL);
7608 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7614 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7616 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7618 changed = hci_dev_test_and_clear_flag(hdev,
7621 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7624 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7627 new_settings(hdev, match.sk);
7632 hci_req_init(&req, hdev);
7634 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7635 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7636 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7637 sizeof(enable), &enable);
7638 __hci_req_update_eir(&req);
7643 hci_req_run(&req, NULL);
7646 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7648 struct cmd_lookup *match = data;
7650 if (match->sk == NULL) {
7651 match->sk = cmd->sk;
7652 sock_hold(match->sk);
7656 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7659 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7661 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7662 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7663 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7666 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7667 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7668 ext_info_changed(hdev, NULL);
7675 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7677 struct mgmt_cp_set_local_name ev;
7678 struct mgmt_pending_cmd *cmd;
7683 memset(&ev, 0, sizeof(ev));
7684 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7685 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7687 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7689 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7691 /* If this is a HCI command related to powering on the
7692 * HCI dev don't send any mgmt signals.
7694 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7698 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7699 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7700 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7703 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7707 for (i = 0; i < uuid_count; i++) {
7708 if (!memcmp(uuid, uuids[i], 16))
7715 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7719 while (parsed < eir_len) {
7720 u8 field_len = eir[0];
7727 if (eir_len - parsed < field_len + 1)
7731 case EIR_UUID16_ALL:
7732 case EIR_UUID16_SOME:
7733 for (i = 0; i + 3 <= field_len; i += 2) {
7734 memcpy(uuid, bluetooth_base_uuid, 16);
7735 uuid[13] = eir[i + 3];
7736 uuid[12] = eir[i + 2];
7737 if (has_uuid(uuid, uuid_count, uuids))
7741 case EIR_UUID32_ALL:
7742 case EIR_UUID32_SOME:
7743 for (i = 0; i + 5 <= field_len; i += 4) {
7744 memcpy(uuid, bluetooth_base_uuid, 16);
7745 uuid[15] = eir[i + 5];
7746 uuid[14] = eir[i + 4];
7747 uuid[13] = eir[i + 3];
7748 uuid[12] = eir[i + 2];
7749 if (has_uuid(uuid, uuid_count, uuids))
7753 case EIR_UUID128_ALL:
7754 case EIR_UUID128_SOME:
7755 for (i = 0; i + 17 <= field_len; i += 16) {
7756 memcpy(uuid, eir + i + 2, 16);
7757 if (has_uuid(uuid, uuid_count, uuids))
7763 parsed += field_len + 1;
7764 eir += field_len + 1;
7770 static void restart_le_scan(struct hci_dev *hdev)
7772 /* If controller is not scanning we are done. */
7773 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7776 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7777 hdev->discovery.scan_start +
7778 hdev->discovery.scan_duration))
7781 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7782 DISCOV_LE_RESTART_DELAY);
7785 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7786 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7788 /* If a RSSI threshold has been specified, and
7789 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7790 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7791 * is set, let it through for further processing, as we might need to
7794 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7795 * the results are also dropped.
7797 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7798 (rssi == HCI_RSSI_INVALID ||
7799 (rssi < hdev->discovery.rssi &&
7800 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7803 if (hdev->discovery.uuid_count != 0) {
7804 /* If a list of UUIDs is provided in filter, results with no
7805 * matching UUID should be dropped.
7807 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7808 hdev->discovery.uuids) &&
7809 !eir_has_uuids(scan_rsp, scan_rsp_len,
7810 hdev->discovery.uuid_count,
7811 hdev->discovery.uuids))
7815 /* If duplicate filtering does not report RSSI changes, then restart
7816 * scanning to ensure updated result with updated RSSI values.
7818 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7819 restart_le_scan(hdev);
7821 /* Validate RSSI value against the RSSI threshold once more. */
7822 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7823 rssi < hdev->discovery.rssi)
7830 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7831 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7832 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7835 struct mgmt_ev_device_found *ev = (void *)buf;
7838 /* Don't send events for a non-kernel initiated discovery. With
7839 * LE one exception is if we have pend_le_reports > 0 in which
7840 * case we're doing passive scanning and want these events.
7842 if (!hci_discovery_active(hdev)) {
7843 if (link_type == ACL_LINK)
7845 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7849 if (hdev->discovery.result_filtering) {
7850 /* We are using service discovery */
7851 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7856 if (hdev->discovery.limited) {
7857 /* Check for limited discoverable bit */
7859 if (!(dev_class[1] & 0x20))
7862 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7863 if (!flags || !(flags[0] & LE_AD_LIMITED))
7868 /* Make sure that the buffer is big enough. The 5 extra bytes
7869 * are for the potential CoD field.
7871 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7874 memset(buf, 0, sizeof(buf));
7876 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7877 * RSSI value was reported as 0 when not available. This behavior
7878 * is kept when using device discovery. This is required for full
7879 * backwards compatibility with the API.
7881 * However when using service discovery, the value 127 will be
7882 * returned when the RSSI is not available.
7884 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7885 link_type == ACL_LINK)
7888 bacpy(&ev->addr.bdaddr, bdaddr);
7889 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7891 ev->flags = cpu_to_le32(flags);
7894 /* Copy EIR or advertising data into event */
7895 memcpy(ev->eir, eir, eir_len);
7897 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7899 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7902 if (scan_rsp_len > 0)
7903 /* Append scan response data to event */
7904 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7906 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7907 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7909 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7912 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7913 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7915 struct mgmt_ev_device_found *ev;
7916 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7919 ev = (struct mgmt_ev_device_found *) buf;
7921 memset(buf, 0, sizeof(buf));
7923 bacpy(&ev->addr.bdaddr, bdaddr);
7924 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7927 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7930 ev->eir_len = cpu_to_le16(eir_len);
7932 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7935 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7937 struct mgmt_ev_discovering ev;
7939 BT_DBG("%s discovering %u", hdev->name, discovering);
7941 memset(&ev, 0, sizeof(ev));
7942 ev.type = hdev->discovery.type;
7943 ev.discovering = discovering;
7945 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7948 static struct hci_mgmt_chan chan = {
7949 .channel = HCI_CHANNEL_CONTROL,
7950 .handler_count = ARRAY_SIZE(mgmt_handlers),
7951 .handlers = mgmt_handlers,
7953 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
7954 .tizen_handlers = tizen_mgmt_handlers,
7956 .hdev_init = mgmt_init_hdev,
7961 return hci_mgmt_chan_register(&chan);
7964 void mgmt_exit(void)
7966 hci_mgmt_chan_unregister(&chan);