2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
39 #include "hci_request.h"
41 #include "mgmt_util.h"
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 14
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_LINK_SECURITY,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
114 static const u16 mgmt_events[] = {
115 MGMT_EV_CONTROLLER_ERROR,
117 MGMT_EV_INDEX_REMOVED,
118 MGMT_EV_NEW_SETTINGS,
119 MGMT_EV_CLASS_OF_DEV_CHANGED,
120 MGMT_EV_LOCAL_NAME_CHANGED,
121 MGMT_EV_NEW_LINK_KEY,
122 MGMT_EV_NEW_LONG_TERM_KEY,
123 MGMT_EV_DEVICE_CONNECTED,
124 MGMT_EV_DEVICE_DISCONNECTED,
125 MGMT_EV_CONNECT_FAILED,
126 MGMT_EV_PIN_CODE_REQUEST,
127 MGMT_EV_USER_CONFIRM_REQUEST,
128 MGMT_EV_USER_PASSKEY_REQUEST,
130 MGMT_EV_DEVICE_FOUND,
132 MGMT_EV_DEVICE_BLOCKED,
133 MGMT_EV_DEVICE_UNBLOCKED,
134 MGMT_EV_DEVICE_UNPAIRED,
135 MGMT_EV_PASSKEY_NOTIFY,
138 MGMT_EV_DEVICE_ADDED,
139 MGMT_EV_DEVICE_REMOVED,
140 MGMT_EV_NEW_CONN_PARAM,
141 MGMT_EV_UNCONF_INDEX_ADDED,
142 MGMT_EV_UNCONF_INDEX_REMOVED,
143 MGMT_EV_NEW_CONFIG_OPTIONS,
144 MGMT_EV_EXT_INDEX_ADDED,
145 MGMT_EV_EXT_INDEX_REMOVED,
146 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
147 MGMT_EV_ADVERTISING_ADDED,
148 MGMT_EV_ADVERTISING_REMOVED,
149 MGMT_EV_EXT_INFO_CHANGED,
152 static const u16 mgmt_untrusted_commands[] = {
153 MGMT_OP_READ_INDEX_LIST,
155 MGMT_OP_READ_UNCONF_INDEX_LIST,
156 MGMT_OP_READ_CONFIG_INFO,
157 MGMT_OP_READ_EXT_INDEX_LIST,
158 MGMT_OP_READ_EXT_INFO,
161 static const u16 mgmt_untrusted_events[] = {
163 MGMT_EV_INDEX_REMOVED,
164 MGMT_EV_NEW_SETTINGS,
165 MGMT_EV_CLASS_OF_DEV_CHANGED,
166 MGMT_EV_LOCAL_NAME_CHANGED,
167 MGMT_EV_UNCONF_INDEX_ADDED,
168 MGMT_EV_UNCONF_INDEX_REMOVED,
169 MGMT_EV_NEW_CONFIG_OPTIONS,
170 MGMT_EV_EXT_INDEX_ADDED,
171 MGMT_EV_EXT_INDEX_REMOVED,
172 MGMT_EV_EXT_INFO_CHANGED,
175 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
177 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
178 "\x00\x00\x00\x00\x00\x00\x00\x00"
180 /* HCI to MGMT error code conversion table */
181 static u8 mgmt_status_table[] = {
183 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
184 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
185 MGMT_STATUS_FAILED, /* Hardware Failure */
186 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
187 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
188 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
189 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
190 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
191 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
192 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
193 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
194 MGMT_STATUS_BUSY, /* Command Disallowed */
195 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
196 MGMT_STATUS_REJECTED, /* Rejected Security */
197 MGMT_STATUS_REJECTED, /* Rejected Personal */
198 MGMT_STATUS_TIMEOUT, /* Host Timeout */
199 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
200 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
201 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
202 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
203 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
204 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
205 MGMT_STATUS_BUSY, /* Repeated Attempts */
206 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
207 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
208 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
209 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
210 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
211 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
212 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
213 MGMT_STATUS_FAILED, /* Unspecified Error */
214 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
215 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
216 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
217 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
218 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
219 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
220 MGMT_STATUS_FAILED, /* Unit Link Key Used */
221 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
222 MGMT_STATUS_TIMEOUT, /* Instant Passed */
223 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
224 MGMT_STATUS_FAILED, /* Transaction Collision */
225 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
226 MGMT_STATUS_REJECTED, /* QoS Rejected */
227 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
228 MGMT_STATUS_REJECTED, /* Insufficient Security */
229 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
230 MGMT_STATUS_BUSY, /* Role Switch Pending */
231 MGMT_STATUS_FAILED, /* Slot Violation */
232 MGMT_STATUS_FAILED, /* Role Switch Failed */
233 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
234 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
235 MGMT_STATUS_BUSY, /* Host Busy Pairing */
236 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
237 MGMT_STATUS_BUSY, /* Controller Busy */
238 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
239 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
240 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
241 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
242 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
245 static u8 mgmt_status(u8 hci_status)
247 if (hci_status < ARRAY_SIZE(mgmt_status_table))
248 return mgmt_status_table[hci_status];
250 return MGMT_STATUS_FAILED;
253 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
256 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
260 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
261 u16 len, int flag, struct sock *skip_sk)
263 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
267 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
268 struct sock *skip_sk)
270 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
271 HCI_SOCK_TRUSTED, skip_sk);
274 static u8 le_addr_type(u8 mgmt_addr_type)
276 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
277 return ADDR_LE_DEV_PUBLIC;
279 return ADDR_LE_DEV_RANDOM;
282 void mgmt_fill_version_info(void *ver)
284 struct mgmt_rp_read_version *rp = ver;
286 rp->version = MGMT_VERSION;
287 rp->revision = cpu_to_le16(MGMT_REVISION);
290 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
293 struct mgmt_rp_read_version rp;
295 BT_DBG("sock %p", sk);
297 mgmt_fill_version_info(&rp);
299 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
303 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
306 struct mgmt_rp_read_commands *rp;
307 u16 num_commands, num_events;
311 BT_DBG("sock %p", sk);
313 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
314 num_commands = ARRAY_SIZE(mgmt_commands);
315 num_events = ARRAY_SIZE(mgmt_events);
317 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
318 num_events = ARRAY_SIZE(mgmt_untrusted_events);
321 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
323 rp = kmalloc(rp_size, GFP_KERNEL);
327 rp->num_commands = cpu_to_le16(num_commands);
328 rp->num_events = cpu_to_le16(num_events);
330 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
331 __le16 *opcode = rp->opcodes;
333 for (i = 0; i < num_commands; i++, opcode++)
334 put_unaligned_le16(mgmt_commands[i], opcode);
336 for (i = 0; i < num_events; i++, opcode++)
337 put_unaligned_le16(mgmt_events[i], opcode);
339 __le16 *opcode = rp->opcodes;
341 for (i = 0; i < num_commands; i++, opcode++)
342 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
344 for (i = 0; i < num_events; i++, opcode++)
345 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
348 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
355 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
358 struct mgmt_rp_read_index_list *rp;
364 BT_DBG("sock %p", sk);
366 read_lock(&hci_dev_list_lock);
369 list_for_each_entry(d, &hci_dev_list, list) {
370 if (d->dev_type == HCI_PRIMARY &&
371 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
375 rp_len = sizeof(*rp) + (2 * count);
376 rp = kmalloc(rp_len, GFP_ATOMIC);
378 read_unlock(&hci_dev_list_lock);
383 list_for_each_entry(d, &hci_dev_list, list) {
384 if (hci_dev_test_flag(d, HCI_SETUP) ||
385 hci_dev_test_flag(d, HCI_CONFIG) ||
386 hci_dev_test_flag(d, HCI_USER_CHANNEL))
389 /* Devices marked as raw-only are neither configured
390 * nor unconfigured controllers.
392 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
395 if (d->dev_type == HCI_PRIMARY &&
396 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
397 rp->index[count++] = cpu_to_le16(d->id);
398 BT_DBG("Added hci%u", d->id);
402 rp->num_controllers = cpu_to_le16(count);
403 rp_len = sizeof(*rp) + (2 * count);
405 read_unlock(&hci_dev_list_lock);
407 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
415 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
416 void *data, u16 data_len)
418 struct mgmt_rp_read_unconf_index_list *rp;
424 BT_DBG("sock %p", sk);
426 read_lock(&hci_dev_list_lock);
429 list_for_each_entry(d, &hci_dev_list, list) {
430 if (d->dev_type == HCI_PRIMARY &&
431 hci_dev_test_flag(d, HCI_UNCONFIGURED))
435 rp_len = sizeof(*rp) + (2 * count);
436 rp = kmalloc(rp_len, GFP_ATOMIC);
438 read_unlock(&hci_dev_list_lock);
443 list_for_each_entry(d, &hci_dev_list, list) {
444 if (hci_dev_test_flag(d, HCI_SETUP) ||
445 hci_dev_test_flag(d, HCI_CONFIG) ||
446 hci_dev_test_flag(d, HCI_USER_CHANNEL))
449 /* Devices marked as raw-only are neither configured
450 * nor unconfigured controllers.
452 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
455 if (d->dev_type == HCI_PRIMARY &&
456 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
457 rp->index[count++] = cpu_to_le16(d->id);
458 BT_DBG("Added hci%u", d->id);
462 rp->num_controllers = cpu_to_le16(count);
463 rp_len = sizeof(*rp) + (2 * count);
465 read_unlock(&hci_dev_list_lock);
467 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
468 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
475 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
476 void *data, u16 data_len)
478 struct mgmt_rp_read_ext_index_list *rp;
484 BT_DBG("sock %p", sk);
486 read_lock(&hci_dev_list_lock);
489 list_for_each_entry(d, &hci_dev_list, list) {
490 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
494 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
495 rp = kmalloc(rp_len, GFP_ATOMIC);
497 read_unlock(&hci_dev_list_lock);
502 list_for_each_entry(d, &hci_dev_list, list) {
503 if (hci_dev_test_flag(d, HCI_SETUP) ||
504 hci_dev_test_flag(d, HCI_CONFIG) ||
505 hci_dev_test_flag(d, HCI_USER_CHANNEL))
508 /* Devices marked as raw-only are neither configured
509 * nor unconfigured controllers.
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
514 if (d->dev_type == HCI_PRIMARY) {
515 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
516 rp->entry[count].type = 0x01;
518 rp->entry[count].type = 0x00;
519 } else if (d->dev_type == HCI_AMP) {
520 rp->entry[count].type = 0x02;
525 rp->entry[count].bus = d->bus;
526 rp->entry[count++].index = cpu_to_le16(d->id);
527 BT_DBG("Added hci%u", d->id);
530 rp->num_controllers = cpu_to_le16(count);
531 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
533 read_unlock(&hci_dev_list_lock);
535 /* If this command is called at least once, then all the
536 * default index and unconfigured index events are disabled
537 * and from now on only extended index events are used.
539 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
540 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
541 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
551 static bool is_configured(struct hci_dev *hdev)
553 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
554 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
557 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
558 !bacmp(&hdev->public_addr, BDADDR_ANY))
564 static __le32 get_missing_options(struct hci_dev *hdev)
568 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
569 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
570 options |= MGMT_OPTION_EXTERNAL_CONFIG;
572 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
573 !bacmp(&hdev->public_addr, BDADDR_ANY))
574 options |= MGMT_OPTION_PUBLIC_ADDRESS;
576 return cpu_to_le32(options);
579 static int new_options(struct hci_dev *hdev, struct sock *skip)
581 __le32 options = get_missing_options(hdev);
583 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
584 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
587 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
589 __le32 options = get_missing_options(hdev);
591 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
595 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
596 void *data, u16 data_len)
598 struct mgmt_rp_read_config_info rp;
601 BT_DBG("sock %p %s", sk, hdev->name);
605 memset(&rp, 0, sizeof(rp));
606 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
608 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
609 options |= MGMT_OPTION_EXTERNAL_CONFIG;
611 if (hdev->set_bdaddr)
612 options |= MGMT_OPTION_PUBLIC_ADDRESS;
614 rp.supported_options = cpu_to_le32(options);
615 rp.missing_options = get_missing_options(hdev);
617 hci_dev_unlock(hdev);
619 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
623 static u32 get_supported_phys(struct hci_dev *hdev)
625 u32 supported_phys = 0;
627 if (lmp_bredr_capable(hdev)) {
628 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
630 if (hdev->features[0][0] & LMP_3SLOT)
631 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
633 if (hdev->features[0][0] & LMP_5SLOT)
634 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
636 if (lmp_edr_2m_capable(hdev)) {
637 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
639 if (lmp_edr_3slot_capable(hdev))
640 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
642 if (lmp_edr_5slot_capable(hdev))
643 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
645 if (lmp_edr_3m_capable(hdev)) {
646 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
648 if (lmp_edr_3slot_capable(hdev))
649 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
651 if (lmp_edr_5slot_capable(hdev))
652 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
657 if (lmp_le_capable(hdev)) {
658 supported_phys |= MGMT_PHY_LE_1M_TX;
659 supported_phys |= MGMT_PHY_LE_1M_RX;
661 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
662 supported_phys |= MGMT_PHY_LE_2M_TX;
663 supported_phys |= MGMT_PHY_LE_2M_RX;
666 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
667 supported_phys |= MGMT_PHY_LE_CODED_TX;
668 supported_phys |= MGMT_PHY_LE_CODED_RX;
672 return supported_phys;
675 static u32 get_selected_phys(struct hci_dev *hdev)
677 u32 selected_phys = 0;
679 if (lmp_bredr_capable(hdev)) {
680 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
682 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
683 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
685 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
686 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
688 if (lmp_edr_2m_capable(hdev)) {
689 if (!(hdev->pkt_type & HCI_2DH1))
690 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
692 if (lmp_edr_3slot_capable(hdev) &&
693 !(hdev->pkt_type & HCI_2DH3))
694 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
696 if (lmp_edr_5slot_capable(hdev) &&
697 !(hdev->pkt_type & HCI_2DH5))
698 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
700 if (lmp_edr_3m_capable(hdev)) {
701 if (!(hdev->pkt_type & HCI_3DH1))
702 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
704 if (lmp_edr_3slot_capable(hdev) &&
705 !(hdev->pkt_type & HCI_3DH3))
706 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
708 if (lmp_edr_5slot_capable(hdev) &&
709 !(hdev->pkt_type & HCI_3DH5))
710 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
715 if (lmp_le_capable(hdev)) {
716 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
717 selected_phys |= MGMT_PHY_LE_1M_TX;
719 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
720 selected_phys |= MGMT_PHY_LE_1M_RX;
722 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
723 selected_phys |= MGMT_PHY_LE_2M_TX;
725 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
726 selected_phys |= MGMT_PHY_LE_2M_RX;
728 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
729 selected_phys |= MGMT_PHY_LE_CODED_TX;
731 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
732 selected_phys |= MGMT_PHY_LE_CODED_RX;
735 return selected_phys;
738 static u32 get_configurable_phys(struct hci_dev *hdev)
740 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
741 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
744 static u32 get_supported_settings(struct hci_dev *hdev)
748 settings |= MGMT_SETTING_POWERED;
749 settings |= MGMT_SETTING_BONDABLE;
750 settings |= MGMT_SETTING_DEBUG_KEYS;
751 settings |= MGMT_SETTING_CONNECTABLE;
752 settings |= MGMT_SETTING_DISCOVERABLE;
754 if (lmp_bredr_capable(hdev)) {
755 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
756 settings |= MGMT_SETTING_FAST_CONNECTABLE;
757 settings |= MGMT_SETTING_BREDR;
758 settings |= MGMT_SETTING_LINK_SECURITY;
760 if (lmp_ssp_capable(hdev)) {
761 settings |= MGMT_SETTING_SSP;
762 settings |= MGMT_SETTING_HS;
765 if (lmp_sc_capable(hdev))
766 settings |= MGMT_SETTING_SECURE_CONN;
769 if (lmp_le_capable(hdev)) {
770 settings |= MGMT_SETTING_LE;
771 settings |= MGMT_SETTING_ADVERTISING;
772 settings |= MGMT_SETTING_SECURE_CONN;
773 settings |= MGMT_SETTING_PRIVACY;
774 settings |= MGMT_SETTING_STATIC_ADDRESS;
777 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
779 settings |= MGMT_SETTING_CONFIGURATION;
781 settings |= MGMT_SETTING_PHY_CONFIGURATION;
786 static u32 get_current_settings(struct hci_dev *hdev)
790 if (hdev_is_powered(hdev))
791 settings |= MGMT_SETTING_POWERED;
793 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
794 settings |= MGMT_SETTING_CONNECTABLE;
796 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
797 settings |= MGMT_SETTING_FAST_CONNECTABLE;
799 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
800 settings |= MGMT_SETTING_DISCOVERABLE;
802 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
803 settings |= MGMT_SETTING_BONDABLE;
805 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
806 settings |= MGMT_SETTING_BREDR;
808 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
809 settings |= MGMT_SETTING_LE;
811 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
812 settings |= MGMT_SETTING_LINK_SECURITY;
814 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
815 settings |= MGMT_SETTING_SSP;
817 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
818 settings |= MGMT_SETTING_HS;
820 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
821 settings |= MGMT_SETTING_ADVERTISING;
823 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
824 settings |= MGMT_SETTING_SECURE_CONN;
826 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
827 settings |= MGMT_SETTING_DEBUG_KEYS;
829 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
830 settings |= MGMT_SETTING_PRIVACY;
832 /* The current setting for static address has two purposes. The
833 * first is to indicate if the static address will be used and
834 * the second is to indicate if it is actually set.
836 * This means if the static address is not configured, this flag
837 * will never be set. If the address is configured, then if the
838 * address is actually used decides if the flag is set or not.
840 * For single mode LE only controllers and dual-mode controllers
841 * with BR/EDR disabled, the existence of the static address will
844 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
845 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
846 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
847 if (bacmp(&hdev->static_addr, BDADDR_ANY))
848 settings |= MGMT_SETTING_STATIC_ADDRESS;
854 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
856 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
859 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
860 struct hci_dev *hdev,
863 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
866 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
868 struct mgmt_pending_cmd *cmd;
870 /* If there's a pending mgmt command the flags will not yet have
871 * their final values, so check for this first.
873 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
875 struct mgmt_mode *cp = cmd->param;
877 return LE_AD_GENERAL;
878 else if (cp->val == 0x02)
879 return LE_AD_LIMITED;
881 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
882 return LE_AD_LIMITED;
883 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
884 return LE_AD_GENERAL;
890 bool mgmt_get_connectable(struct hci_dev *hdev)
892 struct mgmt_pending_cmd *cmd;
894 /* If there's a pending mgmt command the flag will not yet have
895 * it's final value, so check for this first.
897 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
899 struct mgmt_mode *cp = cmd->param;
904 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
907 static void service_cache_off(struct work_struct *work)
909 struct hci_dev *hdev = container_of(work, struct hci_dev,
911 struct hci_request req;
913 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
916 hci_req_init(&req, hdev);
920 __hci_req_update_eir(&req);
921 __hci_req_update_class(&req);
923 hci_dev_unlock(hdev);
925 hci_req_run(&req, NULL);
928 static void rpa_expired(struct work_struct *work)
930 struct hci_dev *hdev = container_of(work, struct hci_dev,
932 struct hci_request req;
936 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
938 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
941 /* The generation of a new RPA and programming it into the
942 * controller happens in the hci_req_enable_advertising()
945 hci_req_init(&req, hdev);
946 if (ext_adv_capable(hdev))
947 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
949 __hci_req_enable_advertising(&req);
950 hci_req_run(&req, NULL);
953 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
955 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
958 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
959 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
961 /* Non-mgmt controlled devices get this bit set
962 * implicitly so that pairing works for them, however
963 * for mgmt we require user-space to explicitly enable
966 hci_dev_clear_flag(hdev, HCI_BONDABLE);
969 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
970 void *data, u16 data_len)
972 struct mgmt_rp_read_info rp;
974 BT_DBG("sock %p %s", sk, hdev->name);
978 memset(&rp, 0, sizeof(rp));
980 bacpy(&rp.bdaddr, &hdev->bdaddr);
982 rp.version = hdev->hci_ver;
983 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
985 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
986 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
988 memcpy(rp.dev_class, hdev->dev_class, 3);
990 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
991 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
993 hci_dev_unlock(hdev);
995 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
999 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1004 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1005 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1006 hdev->dev_class, 3);
1008 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1009 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1012 name_len = strlen(hdev->dev_name);
1013 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1014 hdev->dev_name, name_len);
1016 name_len = strlen(hdev->short_name);
1017 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1018 hdev->short_name, name_len);
1023 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1024 void *data, u16 data_len)
1027 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1030 BT_DBG("sock %p %s", sk, hdev->name);
1032 memset(&buf, 0, sizeof(buf));
1036 bacpy(&rp->bdaddr, &hdev->bdaddr);
1038 rp->version = hdev->hci_ver;
1039 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1041 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1042 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1045 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1046 rp->eir_len = cpu_to_le16(eir_len);
1048 hci_dev_unlock(hdev);
1050 /* If this command is called at least once, then the events
1051 * for class of device and local name changes are disabled
1052 * and only the new extended controller information event
1055 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1056 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1057 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1059 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1060 sizeof(*rp) + eir_len);
1063 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1066 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1069 memset(buf, 0, sizeof(buf));
1071 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1072 ev->eir_len = cpu_to_le16(eir_len);
1074 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1075 sizeof(*ev) + eir_len,
1076 HCI_MGMT_EXT_INFO_EVENTS, skip);
1079 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1081 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1083 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1087 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1089 BT_DBG("%s status 0x%02x", hdev->name, status);
1091 if (hci_conn_count(hdev) == 0) {
1092 cancel_delayed_work(&hdev->power_off);
1093 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1097 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1099 struct mgmt_ev_advertising_added ev;
1101 ev.instance = instance;
1103 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1106 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1109 struct mgmt_ev_advertising_removed ev;
1111 ev.instance = instance;
1113 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1116 static void cancel_adv_timeout(struct hci_dev *hdev)
1118 if (hdev->adv_instance_timeout) {
1119 hdev->adv_instance_timeout = 0;
1120 cancel_delayed_work(&hdev->adv_instance_expire);
1124 static int clean_up_hci_state(struct hci_dev *hdev)
1126 struct hci_request req;
1127 struct hci_conn *conn;
1128 bool discov_stopped;
1131 hci_req_init(&req, hdev);
1133 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1134 test_bit(HCI_PSCAN, &hdev->flags)) {
1136 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1139 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1141 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1142 __hci_req_disable_advertising(&req);
1144 discov_stopped = hci_req_stop_discovery(&req);
1146 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1147 /* 0x15 == Terminated due to Power Off */
1148 __hci_abort_conn(&req, conn, 0x15);
1151 err = hci_req_run(&req, clean_up_hci_complete);
1152 if (!err && discov_stopped)
1153 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1158 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1161 struct mgmt_mode *cp = data;
1162 struct mgmt_pending_cmd *cmd;
1165 BT_DBG("request for %s", hdev->name);
1167 if (cp->val != 0x00 && cp->val != 0x01)
1168 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1169 MGMT_STATUS_INVALID_PARAMS);
1173 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1174 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1179 if (!!cp->val == hdev_is_powered(hdev)) {
1180 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1184 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1191 queue_work(hdev->req_workqueue, &hdev->power_on);
1194 /* Disconnect connections, stop scans, etc */
1195 err = clean_up_hci_state(hdev);
1197 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1198 HCI_POWER_OFF_TIMEOUT);
1200 /* ENODATA means there were no HCI commands queued */
1201 if (err == -ENODATA) {
1202 cancel_delayed_work(&hdev->power_off);
1203 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1209 hci_dev_unlock(hdev);
1213 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1215 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1217 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1218 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1221 int mgmt_new_settings(struct hci_dev *hdev)
1223 return new_settings(hdev, NULL);
1228 struct hci_dev *hdev;
1232 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1234 struct cmd_lookup *match = data;
1236 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1238 list_del(&cmd->list);
1240 if (match->sk == NULL) {
1241 match->sk = cmd->sk;
1242 sock_hold(match->sk);
1245 mgmt_pending_free(cmd);
1248 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1252 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1253 mgmt_pending_remove(cmd);
1256 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1258 if (cmd->cmd_complete) {
1261 cmd->cmd_complete(cmd, *status);
1262 mgmt_pending_remove(cmd);
1267 cmd_status_rsp(cmd, data);
1270 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1272 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1273 cmd->param, cmd->param_len);
1276 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1278 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1279 cmd->param, sizeof(struct mgmt_addr_info));
1282 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1284 if (!lmp_bredr_capable(hdev))
1285 return MGMT_STATUS_NOT_SUPPORTED;
1286 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1287 return MGMT_STATUS_REJECTED;
1289 return MGMT_STATUS_SUCCESS;
1292 static u8 mgmt_le_support(struct hci_dev *hdev)
1294 if (!lmp_le_capable(hdev))
1295 return MGMT_STATUS_NOT_SUPPORTED;
1296 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1297 return MGMT_STATUS_REJECTED;
1299 return MGMT_STATUS_SUCCESS;
1302 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1304 struct mgmt_pending_cmd *cmd;
1306 BT_DBG("status 0x%02x", status);
1310 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1315 u8 mgmt_err = mgmt_status(status);
1316 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1317 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1321 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1322 hdev->discov_timeout > 0) {
1323 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1324 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1327 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1328 new_settings(hdev, cmd->sk);
1331 mgmt_pending_remove(cmd);
1334 hci_dev_unlock(hdev);
1337 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1340 struct mgmt_cp_set_discoverable *cp = data;
1341 struct mgmt_pending_cmd *cmd;
1345 BT_DBG("request for %s", hdev->name);
1347 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1348 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1349 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1350 MGMT_STATUS_REJECTED);
1352 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1353 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1354 MGMT_STATUS_INVALID_PARAMS);
1356 timeout = __le16_to_cpu(cp->timeout);
1358 /* Disabling discoverable requires that no timeout is set,
1359 * and enabling limited discoverable requires a timeout.
1361 if ((cp->val == 0x00 && timeout > 0) ||
1362 (cp->val == 0x02 && timeout == 0))
1363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1364 MGMT_STATUS_INVALID_PARAMS);
1368 if (!hdev_is_powered(hdev) && timeout > 0) {
1369 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1370 MGMT_STATUS_NOT_POWERED);
1374 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1375 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1376 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1381 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1382 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1383 MGMT_STATUS_REJECTED);
1387 if (!hdev_is_powered(hdev)) {
1388 bool changed = false;
1390 /* Setting limited discoverable when powered off is
1391 * not a valid operation since it requires a timeout
1392 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1394 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1395 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1399 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1404 err = new_settings(hdev, sk);
1409 /* If the current mode is the same, then just update the timeout
1410 * value with the new value. And if only the timeout gets updated,
1411 * then no need for any HCI transactions.
1413 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1414 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1415 HCI_LIMITED_DISCOVERABLE)) {
1416 cancel_delayed_work(&hdev->discov_off);
1417 hdev->discov_timeout = timeout;
1419 if (cp->val && hdev->discov_timeout > 0) {
1420 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1421 queue_delayed_work(hdev->req_workqueue,
1422 &hdev->discov_off, to);
1425 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1429 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1435 /* Cancel any potential discoverable timeout that might be
1436 * still active and store new timeout value. The arming of
1437 * the timeout happens in the complete handler.
1439 cancel_delayed_work(&hdev->discov_off);
1440 hdev->discov_timeout = timeout;
1443 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1445 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1447 /* Limited discoverable mode */
1448 if (cp->val == 0x02)
1449 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1451 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1453 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1457 hci_dev_unlock(hdev);
1461 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1463 struct mgmt_pending_cmd *cmd;
1465 BT_DBG("status 0x%02x", status);
1469 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1474 u8 mgmt_err = mgmt_status(status);
1475 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1479 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1480 new_settings(hdev, cmd->sk);
1483 mgmt_pending_remove(cmd);
1486 hci_dev_unlock(hdev);
1489 static int set_connectable_update_settings(struct hci_dev *hdev,
1490 struct sock *sk, u8 val)
1492 bool changed = false;
1495 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1499 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1501 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1502 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1505 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1510 hci_req_update_scan(hdev);
1511 hci_update_background_scan(hdev);
1512 return new_settings(hdev, sk);
1518 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1521 struct mgmt_mode *cp = data;
1522 struct mgmt_pending_cmd *cmd;
1525 BT_DBG("request for %s", hdev->name);
1527 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1528 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1529 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1530 MGMT_STATUS_REJECTED);
1532 if (cp->val != 0x00 && cp->val != 0x01)
1533 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1534 MGMT_STATUS_INVALID_PARAMS);
1538 if (!hdev_is_powered(hdev)) {
1539 err = set_connectable_update_settings(hdev, sk, cp->val);
1543 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1544 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1545 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1550 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1557 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1559 if (hdev->discov_timeout > 0)
1560 cancel_delayed_work(&hdev->discov_off);
1562 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1563 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1564 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1567 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1571 hci_dev_unlock(hdev);
1575 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1578 struct mgmt_mode *cp = data;
1582 BT_DBG("request for %s", hdev->name);
1584 if (cp->val != 0x00 && cp->val != 0x01)
1585 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1586 MGMT_STATUS_INVALID_PARAMS);
1591 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1593 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1595 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1600 /* In limited privacy mode the change of bondable mode
1601 * may affect the local advertising address.
1603 if (hdev_is_powered(hdev) &&
1604 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1605 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1606 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1607 queue_work(hdev->req_workqueue,
1608 &hdev->discoverable_update);
1610 err = new_settings(hdev, sk);
1614 hci_dev_unlock(hdev);
1618 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1621 struct mgmt_mode *cp = data;
1622 struct mgmt_pending_cmd *cmd;
1626 BT_DBG("request for %s", hdev->name);
1628 status = mgmt_bredr_support(hdev);
1630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1633 if (cp->val != 0x00 && cp->val != 0x01)
1634 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1635 MGMT_STATUS_INVALID_PARAMS);
1639 if (!hdev_is_powered(hdev)) {
1640 bool changed = false;
1642 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1643 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1647 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1652 err = new_settings(hdev, sk);
1657 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1665 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1666 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1670 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1676 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1678 mgmt_pending_remove(cmd);
1683 hci_dev_unlock(hdev);
1687 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1689 struct mgmt_mode *cp = data;
1690 struct mgmt_pending_cmd *cmd;
1694 BT_DBG("request for %s", hdev->name);
1696 status = mgmt_bredr_support(hdev);
1698 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1700 if (!lmp_ssp_capable(hdev))
1701 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1702 MGMT_STATUS_NOT_SUPPORTED);
1704 if (cp->val != 0x00 && cp->val != 0x01)
1705 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1706 MGMT_STATUS_INVALID_PARAMS);
1710 if (!hdev_is_powered(hdev)) {
1714 changed = !hci_dev_test_and_set_flag(hdev,
1717 changed = hci_dev_test_and_clear_flag(hdev,
1720 changed = hci_dev_test_and_clear_flag(hdev,
1723 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1726 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1731 err = new_settings(hdev, sk);
1736 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1737 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1742 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1743 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1747 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1753 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1754 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1755 sizeof(cp->val), &cp->val);
1757 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1759 mgmt_pending_remove(cmd);
1764 hci_dev_unlock(hdev);
1768 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1770 struct mgmt_mode *cp = data;
1775 BT_DBG("request for %s", hdev->name);
1777 status = mgmt_bredr_support(hdev);
1779 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1781 if (!lmp_ssp_capable(hdev))
1782 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1783 MGMT_STATUS_NOT_SUPPORTED);
1785 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1786 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1787 MGMT_STATUS_REJECTED);
1789 if (cp->val != 0x00 && cp->val != 0x01)
1790 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1791 MGMT_STATUS_INVALID_PARAMS);
1795 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1796 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1802 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1804 if (hdev_is_powered(hdev)) {
1805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1806 MGMT_STATUS_REJECTED);
1810 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1813 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1818 err = new_settings(hdev, sk);
1821 hci_dev_unlock(hdev);
1825 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1827 struct cmd_lookup match = { NULL, hdev };
1832 u8 mgmt_err = mgmt_status(status);
1834 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1839 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1841 new_settings(hdev, match.sk);
1846 /* Make sure the controller has a good default for
1847 * advertising data. Restrict the update to when LE
1848 * has actually been enabled. During power on, the
1849 * update in powered_update_hci will take care of it.
1851 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1852 struct hci_request req;
1853 hci_req_init(&req, hdev);
1854 if (ext_adv_capable(hdev)) {
1857 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1859 __hci_req_update_scan_rsp_data(&req, 0x00);
1861 __hci_req_update_adv_data(&req, 0x00);
1862 __hci_req_update_scan_rsp_data(&req, 0x00);
1864 hci_req_run(&req, NULL);
1865 hci_update_background_scan(hdev);
1869 hci_dev_unlock(hdev);
1872 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1874 struct mgmt_mode *cp = data;
1875 struct hci_cp_write_le_host_supported hci_cp;
1876 struct mgmt_pending_cmd *cmd;
1877 struct hci_request req;
1881 BT_DBG("request for %s", hdev->name);
1883 if (!lmp_le_capable(hdev))
1884 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1885 MGMT_STATUS_NOT_SUPPORTED);
1887 if (cp->val != 0x00 && cp->val != 0x01)
1888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1889 MGMT_STATUS_INVALID_PARAMS);
1891 /* Bluetooth single mode LE only controllers or dual-mode
1892 * controllers configured as LE only devices, do not allow
1893 * switching LE off. These have either LE enabled explicitly
1894 * or BR/EDR has been previously switched off.
1896 * When trying to enable an already enabled LE, then gracefully
1897 * send a positive response. Trying to disable it however will
1898 * result into rejection.
1900 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1901 if (cp->val == 0x01)
1902 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1904 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1905 MGMT_STATUS_REJECTED);
1911 enabled = lmp_host_le_capable(hdev);
1914 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1916 if (!hdev_is_powered(hdev) || val == enabled) {
1917 bool changed = false;
1919 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1920 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1924 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1925 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1929 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1934 err = new_settings(hdev, sk);
1939 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1940 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1941 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1946 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1952 hci_req_init(&req, hdev);
1954 memset(&hci_cp, 0, sizeof(hci_cp));
1958 hci_cp.simul = 0x00;
1960 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1961 __hci_req_disable_advertising(&req);
1963 if (ext_adv_capable(hdev))
1964 __hci_req_clear_ext_adv_sets(&req);
1967 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1970 err = hci_req_run(&req, le_enable_complete);
1972 mgmt_pending_remove(cmd);
1975 hci_dev_unlock(hdev);
1979 /* This is a helper function to test for pending mgmt commands that can
1980 * cause CoD or EIR HCI commands. We can only allow one such pending
1981 * mgmt command at a time since otherwise we cannot easily track what
1982 * the current values are, will be, and based on that calculate if a new
1983 * HCI command needs to be sent and if yes with what value.
1985 static bool pending_eir_or_class(struct hci_dev *hdev)
1987 struct mgmt_pending_cmd *cmd;
1989 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1990 switch (cmd->opcode) {
1991 case MGMT_OP_ADD_UUID:
1992 case MGMT_OP_REMOVE_UUID:
1993 case MGMT_OP_SET_DEV_CLASS:
1994 case MGMT_OP_SET_POWERED:
2002 static const u8 bluetooth_base_uuid[] = {
2003 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2004 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2007 static u8 get_uuid_size(const u8 *uuid)
2011 if (memcmp(uuid, bluetooth_base_uuid, 12))
2014 val = get_unaligned_le32(&uuid[12]);
2021 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2023 struct mgmt_pending_cmd *cmd;
2027 cmd = pending_find(mgmt_op, hdev);
2031 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2032 mgmt_status(status), hdev->dev_class, 3);
2034 mgmt_pending_remove(cmd);
2037 hci_dev_unlock(hdev);
2040 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2042 BT_DBG("status 0x%02x", status);
2044 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2047 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2049 struct mgmt_cp_add_uuid *cp = data;
2050 struct mgmt_pending_cmd *cmd;
2051 struct hci_request req;
2052 struct bt_uuid *uuid;
2055 BT_DBG("request for %s", hdev->name);
2059 if (pending_eir_or_class(hdev)) {
2060 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2065 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2071 memcpy(uuid->uuid, cp->uuid, 16);
2072 uuid->svc_hint = cp->svc_hint;
2073 uuid->size = get_uuid_size(cp->uuid);
2075 list_add_tail(&uuid->list, &hdev->uuids);
2077 hci_req_init(&req, hdev);
2079 __hci_req_update_class(&req);
2080 __hci_req_update_eir(&req);
2082 err = hci_req_run(&req, add_uuid_complete);
2084 if (err != -ENODATA)
2087 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2088 hdev->dev_class, 3);
2092 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2101 hci_dev_unlock(hdev);
2105 static bool enable_service_cache(struct hci_dev *hdev)
2107 if (!hdev_is_powered(hdev))
2110 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2111 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2119 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2121 BT_DBG("status 0x%02x", status);
2123 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2126 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2129 struct mgmt_cp_remove_uuid *cp = data;
2130 struct mgmt_pending_cmd *cmd;
2131 struct bt_uuid *match, *tmp;
2132 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2133 struct hci_request req;
2136 BT_DBG("request for %s", hdev->name);
2140 if (pending_eir_or_class(hdev)) {
2141 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2146 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2147 hci_uuids_clear(hdev);
2149 if (enable_service_cache(hdev)) {
2150 err = mgmt_cmd_complete(sk, hdev->id,
2151 MGMT_OP_REMOVE_UUID,
2152 0, hdev->dev_class, 3);
2161 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2162 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2165 list_del(&match->list);
2171 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2172 MGMT_STATUS_INVALID_PARAMS);
2177 hci_req_init(&req, hdev);
2179 __hci_req_update_class(&req);
2180 __hci_req_update_eir(&req);
2182 err = hci_req_run(&req, remove_uuid_complete);
2184 if (err != -ENODATA)
2187 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2188 hdev->dev_class, 3);
2192 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2201 hci_dev_unlock(hdev);
2205 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2207 BT_DBG("status 0x%02x", status);
2209 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2212 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2215 struct mgmt_cp_set_dev_class *cp = data;
2216 struct mgmt_pending_cmd *cmd;
2217 struct hci_request req;
2220 BT_DBG("request for %s", hdev->name);
2222 if (!lmp_bredr_capable(hdev))
2223 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2224 MGMT_STATUS_NOT_SUPPORTED);
2228 if (pending_eir_or_class(hdev)) {
2229 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2234 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2235 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2236 MGMT_STATUS_INVALID_PARAMS);
2240 hdev->major_class = cp->major;
2241 hdev->minor_class = cp->minor;
2243 if (!hdev_is_powered(hdev)) {
2244 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2245 hdev->dev_class, 3);
2249 hci_req_init(&req, hdev);
2251 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2252 hci_dev_unlock(hdev);
2253 cancel_delayed_work_sync(&hdev->service_cache);
2255 __hci_req_update_eir(&req);
2258 __hci_req_update_class(&req);
2260 err = hci_req_run(&req, set_class_complete);
2262 if (err != -ENODATA)
2265 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2266 hdev->dev_class, 3);
2270 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2279 hci_dev_unlock(hdev);
2283 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2286 struct mgmt_cp_load_link_keys *cp = data;
2287 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2288 sizeof(struct mgmt_link_key_info));
2289 u16 key_count, expected_len;
2293 BT_DBG("request for %s", hdev->name);
2295 if (!lmp_bredr_capable(hdev))
2296 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2297 MGMT_STATUS_NOT_SUPPORTED);
2299 key_count = __le16_to_cpu(cp->key_count);
2300 if (key_count > max_key_count) {
2301 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2303 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2304 MGMT_STATUS_INVALID_PARAMS);
2307 expected_len = sizeof(*cp) + key_count *
2308 sizeof(struct mgmt_link_key_info);
2309 if (expected_len != len) {
2310 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2312 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2313 MGMT_STATUS_INVALID_PARAMS);
2316 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2317 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2318 MGMT_STATUS_INVALID_PARAMS);
2320 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2323 for (i = 0; i < key_count; i++) {
2324 struct mgmt_link_key_info *key = &cp->keys[i];
2326 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2327 return mgmt_cmd_status(sk, hdev->id,
2328 MGMT_OP_LOAD_LINK_KEYS,
2329 MGMT_STATUS_INVALID_PARAMS);
2334 hci_link_keys_clear(hdev);
2337 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2339 changed = hci_dev_test_and_clear_flag(hdev,
2340 HCI_KEEP_DEBUG_KEYS);
2343 new_settings(hdev, NULL);
2345 for (i = 0; i < key_count; i++) {
2346 struct mgmt_link_key_info *key = &cp->keys[i];
2348 /* Always ignore debug keys and require a new pairing if
2349 * the user wants to use them.
2351 if (key->type == HCI_LK_DEBUG_COMBINATION)
2354 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2355 key->type, key->pin_len, NULL);
2358 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2360 hci_dev_unlock(hdev);
2365 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2366 u8 addr_type, struct sock *skip_sk)
2368 struct mgmt_ev_device_unpaired ev;
2370 bacpy(&ev.addr.bdaddr, bdaddr);
2371 ev.addr.type = addr_type;
2373 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2377 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2380 struct mgmt_cp_unpair_device *cp = data;
2381 struct mgmt_rp_unpair_device rp;
2382 struct hci_conn_params *params;
2383 struct mgmt_pending_cmd *cmd;
2384 struct hci_conn *conn;
2388 memset(&rp, 0, sizeof(rp));
2389 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2390 rp.addr.type = cp->addr.type;
2392 if (!bdaddr_type_is_valid(cp->addr.type))
2393 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2394 MGMT_STATUS_INVALID_PARAMS,
2397 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2398 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2399 MGMT_STATUS_INVALID_PARAMS,
2404 if (!hdev_is_powered(hdev)) {
2405 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2406 MGMT_STATUS_NOT_POWERED, &rp,
2411 if (cp->addr.type == BDADDR_BREDR) {
2412 /* If disconnection is requested, then look up the
2413 * connection. If the remote device is connected, it
2414 * will be later used to terminate the link.
2416 * Setting it to NULL explicitly will cause no
2417 * termination of the link.
2420 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2425 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2427 err = mgmt_cmd_complete(sk, hdev->id,
2428 MGMT_OP_UNPAIR_DEVICE,
2429 MGMT_STATUS_NOT_PAIRED, &rp,
2437 /* LE address type */
2438 addr_type = le_addr_type(cp->addr.type);
2440 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2441 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2443 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2444 MGMT_STATUS_NOT_PAIRED, &rp,
2449 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2451 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2456 /* Defer clearing up the connection parameters until closing to
2457 * give a chance of keeping them if a repairing happens.
2459 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2461 /* Disable auto-connection parameters if present */
2462 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2464 if (params->explicit_connect)
2465 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2467 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2470 /* If disconnection is not requested, then clear the connection
2471 * variable so that the link is not terminated.
2473 if (!cp->disconnect)
2477 /* If the connection variable is set, then termination of the
2478 * link is requested.
2481 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2483 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2487 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2494 cmd->cmd_complete = addr_cmd_complete;
2496 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2498 mgmt_pending_remove(cmd);
2501 hci_dev_unlock(hdev);
2505 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2508 struct mgmt_cp_disconnect *cp = data;
2509 struct mgmt_rp_disconnect rp;
2510 struct mgmt_pending_cmd *cmd;
2511 struct hci_conn *conn;
2516 memset(&rp, 0, sizeof(rp));
2517 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2518 rp.addr.type = cp->addr.type;
2520 if (!bdaddr_type_is_valid(cp->addr.type))
2521 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2522 MGMT_STATUS_INVALID_PARAMS,
2527 if (!test_bit(HCI_UP, &hdev->flags)) {
2528 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2529 MGMT_STATUS_NOT_POWERED, &rp,
2534 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2535 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2536 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2540 if (cp->addr.type == BDADDR_BREDR)
2541 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2544 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2545 le_addr_type(cp->addr.type));
2547 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2549 MGMT_STATUS_NOT_CONNECTED, &rp,
2554 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2560 cmd->cmd_complete = generic_cmd_complete;
2562 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2564 mgmt_pending_remove(cmd);
2567 hci_dev_unlock(hdev);
2571 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2573 switch (link_type) {
2575 switch (addr_type) {
2576 case ADDR_LE_DEV_PUBLIC:
2577 return BDADDR_LE_PUBLIC;
2580 /* Fallback to LE Random address type */
2581 return BDADDR_LE_RANDOM;
2585 /* Fallback to BR/EDR type */
2586 return BDADDR_BREDR;
2590 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2593 struct mgmt_rp_get_connections *rp;
2603 if (!hdev_is_powered(hdev)) {
2604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2605 MGMT_STATUS_NOT_POWERED);
2610 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2611 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2615 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2616 rp = kmalloc(rp_len, GFP_KERNEL);
2623 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2624 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2626 bacpy(&rp->addr[i].bdaddr, &c->dst);
2627 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2628 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2633 rp->conn_count = cpu_to_le16(i);
2635 /* Recalculate length in case of filtered SCO connections, etc */
2636 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2638 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2644 hci_dev_unlock(hdev);
2648 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2649 struct mgmt_cp_pin_code_neg_reply *cp)
2651 struct mgmt_pending_cmd *cmd;
2654 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2659 cmd->cmd_complete = addr_cmd_complete;
2661 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2662 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2664 mgmt_pending_remove(cmd);
2669 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2672 struct hci_conn *conn;
2673 struct mgmt_cp_pin_code_reply *cp = data;
2674 struct hci_cp_pin_code_reply reply;
2675 struct mgmt_pending_cmd *cmd;
2682 if (!hdev_is_powered(hdev)) {
2683 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2684 MGMT_STATUS_NOT_POWERED);
2688 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2690 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2691 MGMT_STATUS_NOT_CONNECTED);
2695 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2696 struct mgmt_cp_pin_code_neg_reply ncp;
2698 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2700 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2702 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2704 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2705 MGMT_STATUS_INVALID_PARAMS);
2710 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2716 cmd->cmd_complete = addr_cmd_complete;
2718 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2719 reply.pin_len = cp->pin_len;
2720 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2722 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2724 mgmt_pending_remove(cmd);
2727 hci_dev_unlock(hdev);
2731 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2734 struct mgmt_cp_set_io_capability *cp = data;
2738 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2739 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2740 MGMT_STATUS_INVALID_PARAMS);
2744 hdev->io_capability = cp->io_capability;
2746 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2747 hdev->io_capability);
2749 hci_dev_unlock(hdev);
2751 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2755 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2757 struct hci_dev *hdev = conn->hdev;
2758 struct mgmt_pending_cmd *cmd;
2760 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2761 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2764 if (cmd->user_data != conn)
2773 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2775 struct mgmt_rp_pair_device rp;
2776 struct hci_conn *conn = cmd->user_data;
2779 bacpy(&rp.addr.bdaddr, &conn->dst);
2780 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2782 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2783 status, &rp, sizeof(rp));
2785 /* So we don't get further callbacks for this connection */
2786 conn->connect_cfm_cb = NULL;
2787 conn->security_cfm_cb = NULL;
2788 conn->disconn_cfm_cb = NULL;
2790 hci_conn_drop(conn);
2792 /* The device is paired so there is no need to remove
2793 * its connection parameters anymore.
2795 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2802 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2804 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2805 struct mgmt_pending_cmd *cmd;
2807 cmd = find_pairing(conn);
2809 cmd->cmd_complete(cmd, status);
2810 mgmt_pending_remove(cmd);
2814 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2816 struct mgmt_pending_cmd *cmd;
2818 BT_DBG("status %u", status);
2820 cmd = find_pairing(conn);
2822 BT_DBG("Unable to find a pending command");
2826 cmd->cmd_complete(cmd, mgmt_status(status));
2827 mgmt_pending_remove(cmd);
2830 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2832 struct mgmt_pending_cmd *cmd;
2834 BT_DBG("status %u", status);
2839 cmd = find_pairing(conn);
2841 BT_DBG("Unable to find a pending command");
2845 cmd->cmd_complete(cmd, mgmt_status(status));
2846 mgmt_pending_remove(cmd);
2849 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2852 struct mgmt_cp_pair_device *cp = data;
2853 struct mgmt_rp_pair_device rp;
2854 struct mgmt_pending_cmd *cmd;
2855 u8 sec_level, auth_type;
2856 struct hci_conn *conn;
2861 memset(&rp, 0, sizeof(rp));
2862 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2863 rp.addr.type = cp->addr.type;
2865 if (!bdaddr_type_is_valid(cp->addr.type))
2866 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2867 MGMT_STATUS_INVALID_PARAMS,
2870 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2871 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2872 MGMT_STATUS_INVALID_PARAMS,
2877 if (!hdev_is_powered(hdev)) {
2878 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2879 MGMT_STATUS_NOT_POWERED, &rp,
2884 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2885 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2886 MGMT_STATUS_ALREADY_PAIRED, &rp,
2891 sec_level = BT_SECURITY_MEDIUM;
2892 auth_type = HCI_AT_DEDICATED_BONDING;
2894 if (cp->addr.type == BDADDR_BREDR) {
2895 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2898 u8 addr_type = le_addr_type(cp->addr.type);
2899 struct hci_conn_params *p;
2901 /* When pairing a new device, it is expected to remember
2902 * this device for future connections. Adding the connection
2903 * parameter information ahead of time allows tracking
2904 * of the slave preferred values and will speed up any
2905 * further connection establishment.
2907 * If connection parameters already exist, then they
2908 * will be kept and this function does nothing.
2910 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2912 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2913 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2915 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2916 addr_type, sec_level,
2917 HCI_LE_CONN_TIMEOUT);
2923 if (PTR_ERR(conn) == -EBUSY)
2924 status = MGMT_STATUS_BUSY;
2925 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2926 status = MGMT_STATUS_NOT_SUPPORTED;
2927 else if (PTR_ERR(conn) == -ECONNREFUSED)
2928 status = MGMT_STATUS_REJECTED;
2930 status = MGMT_STATUS_CONNECT_FAILED;
2932 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2933 status, &rp, sizeof(rp));
2937 if (conn->connect_cfm_cb) {
2938 hci_conn_drop(conn);
2939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2940 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2944 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2947 hci_conn_drop(conn);
2951 cmd->cmd_complete = pairing_complete;
2953 /* For LE, just connecting isn't a proof that the pairing finished */
2954 if (cp->addr.type == BDADDR_BREDR) {
2955 conn->connect_cfm_cb = pairing_complete_cb;
2956 conn->security_cfm_cb = pairing_complete_cb;
2957 conn->disconn_cfm_cb = pairing_complete_cb;
2959 conn->connect_cfm_cb = le_pairing_complete_cb;
2960 conn->security_cfm_cb = le_pairing_complete_cb;
2961 conn->disconn_cfm_cb = le_pairing_complete_cb;
2964 conn->io_capability = cp->io_cap;
2965 cmd->user_data = hci_conn_get(conn);
2967 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2968 hci_conn_security(conn, sec_level, auth_type, true)) {
2969 cmd->cmd_complete(cmd, 0);
2970 mgmt_pending_remove(cmd);
2976 hci_dev_unlock(hdev);
2980 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2983 struct mgmt_addr_info *addr = data;
2984 struct mgmt_pending_cmd *cmd;
2985 struct hci_conn *conn;
2992 if (!hdev_is_powered(hdev)) {
2993 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2994 MGMT_STATUS_NOT_POWERED);
2998 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3000 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3001 MGMT_STATUS_INVALID_PARAMS);
3005 conn = cmd->user_data;
3007 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3008 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3009 MGMT_STATUS_INVALID_PARAMS);
3013 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3014 mgmt_pending_remove(cmd);
3016 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3017 addr, sizeof(*addr));
3019 hci_dev_unlock(hdev);
3023 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3024 struct mgmt_addr_info *addr, u16 mgmt_op,
3025 u16 hci_op, __le32 passkey)
3027 struct mgmt_pending_cmd *cmd;
3028 struct hci_conn *conn;
3033 if (!hdev_is_powered(hdev)) {
3034 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3035 MGMT_STATUS_NOT_POWERED, addr,
3040 if (addr->type == BDADDR_BREDR)
3041 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3043 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3044 le_addr_type(addr->type));
3047 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3048 MGMT_STATUS_NOT_CONNECTED, addr,
3053 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3054 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3056 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3057 MGMT_STATUS_SUCCESS, addr,
3060 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3061 MGMT_STATUS_FAILED, addr,
3067 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3073 cmd->cmd_complete = addr_cmd_complete;
3075 /* Continue with pairing via HCI */
3076 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3077 struct hci_cp_user_passkey_reply cp;
3079 bacpy(&cp.bdaddr, &addr->bdaddr);
3080 cp.passkey = passkey;
3081 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3083 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3087 mgmt_pending_remove(cmd);
3090 hci_dev_unlock(hdev);
3094 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3095 void *data, u16 len)
3097 struct mgmt_cp_pin_code_neg_reply *cp = data;
3101 return user_pairing_resp(sk, hdev, &cp->addr,
3102 MGMT_OP_PIN_CODE_NEG_REPLY,
3103 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3106 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3109 struct mgmt_cp_user_confirm_reply *cp = data;
3113 if (len != sizeof(*cp))
3114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3115 MGMT_STATUS_INVALID_PARAMS);
3117 return user_pairing_resp(sk, hdev, &cp->addr,
3118 MGMT_OP_USER_CONFIRM_REPLY,
3119 HCI_OP_USER_CONFIRM_REPLY, 0);
3122 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3123 void *data, u16 len)
3125 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3129 return user_pairing_resp(sk, hdev, &cp->addr,
3130 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3131 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3134 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3137 struct mgmt_cp_user_passkey_reply *cp = data;
3141 return user_pairing_resp(sk, hdev, &cp->addr,
3142 MGMT_OP_USER_PASSKEY_REPLY,
3143 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3146 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3147 void *data, u16 len)
3149 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3153 return user_pairing_resp(sk, hdev, &cp->addr,
3154 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3155 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3158 static void adv_expire(struct hci_dev *hdev, u32 flags)
3160 struct adv_info *adv_instance;
3161 struct hci_request req;
3164 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3168 /* stop if current instance doesn't need to be changed */
3169 if (!(adv_instance->flags & flags))
3172 cancel_adv_timeout(hdev);
3174 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3178 hci_req_init(&req, hdev);
3179 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3184 hci_req_run(&req, NULL);
3187 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3189 struct mgmt_cp_set_local_name *cp;
3190 struct mgmt_pending_cmd *cmd;
3192 BT_DBG("status 0x%02x", status);
3196 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3203 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3204 mgmt_status(status));
3206 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3209 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3210 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3213 mgmt_pending_remove(cmd);
3216 hci_dev_unlock(hdev);
3219 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3222 struct mgmt_cp_set_local_name *cp = data;
3223 struct mgmt_pending_cmd *cmd;
3224 struct hci_request req;
3231 /* If the old values are the same as the new ones just return a
3232 * direct command complete event.
3234 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3235 !memcmp(hdev->short_name, cp->short_name,
3236 sizeof(hdev->short_name))) {
3237 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3242 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3244 if (!hdev_is_powered(hdev)) {
3245 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3252 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3253 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3254 ext_info_changed(hdev, sk);
3259 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3265 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3267 hci_req_init(&req, hdev);
3269 if (lmp_bredr_capable(hdev)) {
3270 __hci_req_update_name(&req);
3271 __hci_req_update_eir(&req);
3274 /* The name is stored in the scan response data and so
3275 * no need to udpate the advertising data here.
3277 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3278 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3280 err = hci_req_run(&req, set_name_complete);
3282 mgmt_pending_remove(cmd);
3285 hci_dev_unlock(hdev);
3289 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3292 struct mgmt_cp_set_appearance *cp = data;
3298 if (!lmp_le_capable(hdev))
3299 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3300 MGMT_STATUS_NOT_SUPPORTED);
3302 apperance = le16_to_cpu(cp->appearance);
3306 if (hdev->appearance != apperance) {
3307 hdev->appearance = apperance;
3309 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3310 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3312 ext_info_changed(hdev, sk);
3315 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3318 hci_dev_unlock(hdev);
3323 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3324 void *data, u16 len)
3326 struct mgmt_rp_get_phy_confguration rp;
3328 BT_DBG("sock %p %s", sk, hdev->name);
3332 memset(&rp, 0, sizeof(rp));
3334 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3335 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3336 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3338 hci_dev_unlock(hdev);
3340 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3344 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3346 struct mgmt_ev_phy_configuration_changed ev;
3348 memset(&ev, 0, sizeof(ev));
3350 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3352 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3356 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3357 u16 opcode, struct sk_buff *skb)
3359 struct mgmt_pending_cmd *cmd;
3361 BT_DBG("status 0x%02x", status);
3365 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3370 mgmt_cmd_status(cmd->sk, hdev->id,
3371 MGMT_OP_SET_PHY_CONFIGURATION,
3372 mgmt_status(status));
3374 mgmt_cmd_complete(cmd->sk, hdev->id,
3375 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3378 mgmt_phy_configuration_changed(hdev, cmd->sk);
3381 mgmt_pending_remove(cmd);
3384 hci_dev_unlock(hdev);
3387 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3388 void *data, u16 len)
3390 struct mgmt_cp_set_phy_confguration *cp = data;
3391 struct hci_cp_le_set_default_phy cp_phy;
3392 struct mgmt_pending_cmd *cmd;
3393 struct hci_request req;
3394 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3395 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3396 bool changed = false;
3399 BT_DBG("sock %p %s", sk, hdev->name);
3401 configurable_phys = get_configurable_phys(hdev);
3402 supported_phys = get_supported_phys(hdev);
3403 selected_phys = __le32_to_cpu(cp->selected_phys);
3405 if (selected_phys & ~supported_phys)
3406 return mgmt_cmd_status(sk, hdev->id,
3407 MGMT_OP_SET_PHY_CONFIGURATION,
3408 MGMT_STATUS_INVALID_PARAMS);
3410 unconfigure_phys = supported_phys & ~configurable_phys;
3412 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3413 return mgmt_cmd_status(sk, hdev->id,
3414 MGMT_OP_SET_PHY_CONFIGURATION,
3415 MGMT_STATUS_INVALID_PARAMS);
3417 if (selected_phys == get_selected_phys(hdev))
3418 return mgmt_cmd_complete(sk, hdev->id,
3419 MGMT_OP_SET_PHY_CONFIGURATION,
3424 if (!hdev_is_powered(hdev)) {
3425 err = mgmt_cmd_status(sk, hdev->id,
3426 MGMT_OP_SET_PHY_CONFIGURATION,
3427 MGMT_STATUS_REJECTED);
3431 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3432 err = mgmt_cmd_status(sk, hdev->id,
3433 MGMT_OP_SET_PHY_CONFIGURATION,
3438 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3439 pkt_type |= (HCI_DH3 | HCI_DM3);
3441 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3443 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3444 pkt_type |= (HCI_DH5 | HCI_DM5);
3446 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3448 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3449 pkt_type &= ~HCI_2DH1;
3451 pkt_type |= HCI_2DH1;
3453 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3454 pkt_type &= ~HCI_2DH3;
3456 pkt_type |= HCI_2DH3;
3458 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3459 pkt_type &= ~HCI_2DH5;
3461 pkt_type |= HCI_2DH5;
3463 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3464 pkt_type &= ~HCI_3DH1;
3466 pkt_type |= HCI_3DH1;
3468 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3469 pkt_type &= ~HCI_3DH3;
3471 pkt_type |= HCI_3DH3;
3473 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3474 pkt_type &= ~HCI_3DH5;
3476 pkt_type |= HCI_3DH5;
3478 if (pkt_type != hdev->pkt_type) {
3479 hdev->pkt_type = pkt_type;
3483 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3484 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3486 mgmt_phy_configuration_changed(hdev, sk);
3488 err = mgmt_cmd_complete(sk, hdev->id,
3489 MGMT_OP_SET_PHY_CONFIGURATION,
3495 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3502 hci_req_init(&req, hdev);
3504 memset(&cp_phy, 0, sizeof(cp_phy));
3506 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3507 cp_phy.all_phys |= 0x01;
3509 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3510 cp_phy.all_phys |= 0x02;
3512 if (selected_phys & MGMT_PHY_LE_1M_TX)
3513 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3515 if (selected_phys & MGMT_PHY_LE_2M_TX)
3516 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3518 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3519 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3521 if (selected_phys & MGMT_PHY_LE_1M_RX)
3522 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3524 if (selected_phys & MGMT_PHY_LE_2M_RX)
3525 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3527 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3528 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3530 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3532 err = hci_req_run_skb(&req, set_default_phy_complete);
3534 mgmt_pending_remove(cmd);
3537 hci_dev_unlock(hdev);
3542 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3543 u16 opcode, struct sk_buff *skb)
3545 struct mgmt_rp_read_local_oob_data mgmt_rp;
3546 size_t rp_size = sizeof(mgmt_rp);
3547 struct mgmt_pending_cmd *cmd;
3549 BT_DBG("%s status %u", hdev->name, status);
3551 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3555 if (status || !skb) {
3556 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3557 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3561 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3563 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3564 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3566 if (skb->len < sizeof(*rp)) {
3567 mgmt_cmd_status(cmd->sk, hdev->id,
3568 MGMT_OP_READ_LOCAL_OOB_DATA,
3569 MGMT_STATUS_FAILED);
3573 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3574 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3576 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3578 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3580 if (skb->len < sizeof(*rp)) {
3581 mgmt_cmd_status(cmd->sk, hdev->id,
3582 MGMT_OP_READ_LOCAL_OOB_DATA,
3583 MGMT_STATUS_FAILED);
3587 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3588 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3590 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3591 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3594 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3595 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3598 mgmt_pending_remove(cmd);
3601 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3602 void *data, u16 data_len)
3604 struct mgmt_pending_cmd *cmd;
3605 struct hci_request req;
3608 BT_DBG("%s", hdev->name);
3612 if (!hdev_is_powered(hdev)) {
3613 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3614 MGMT_STATUS_NOT_POWERED);
3618 if (!lmp_ssp_capable(hdev)) {
3619 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3620 MGMT_STATUS_NOT_SUPPORTED);
3624 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3625 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3630 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3636 hci_req_init(&req, hdev);
3638 if (bredr_sc_enabled(hdev))
3639 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3641 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3643 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3645 mgmt_pending_remove(cmd);
3648 hci_dev_unlock(hdev);
3652 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3653 void *data, u16 len)
3655 struct mgmt_addr_info *addr = data;
3658 BT_DBG("%s ", hdev->name);
3660 if (!bdaddr_type_is_valid(addr->type))
3661 return mgmt_cmd_complete(sk, hdev->id,
3662 MGMT_OP_ADD_REMOTE_OOB_DATA,
3663 MGMT_STATUS_INVALID_PARAMS,
3664 addr, sizeof(*addr));
3668 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3669 struct mgmt_cp_add_remote_oob_data *cp = data;
3672 if (cp->addr.type != BDADDR_BREDR) {
3673 err = mgmt_cmd_complete(sk, hdev->id,
3674 MGMT_OP_ADD_REMOTE_OOB_DATA,
3675 MGMT_STATUS_INVALID_PARAMS,
3676 &cp->addr, sizeof(cp->addr));
3680 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3681 cp->addr.type, cp->hash,
3682 cp->rand, NULL, NULL);
3684 status = MGMT_STATUS_FAILED;
3686 status = MGMT_STATUS_SUCCESS;
3688 err = mgmt_cmd_complete(sk, hdev->id,
3689 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3690 &cp->addr, sizeof(cp->addr));
3691 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3692 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3693 u8 *rand192, *hash192, *rand256, *hash256;
3696 if (bdaddr_type_is_le(cp->addr.type)) {
3697 /* Enforce zero-valued 192-bit parameters as
3698 * long as legacy SMP OOB isn't implemented.
3700 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3701 memcmp(cp->hash192, ZERO_KEY, 16)) {
3702 err = mgmt_cmd_complete(sk, hdev->id,
3703 MGMT_OP_ADD_REMOTE_OOB_DATA,
3704 MGMT_STATUS_INVALID_PARAMS,
3705 addr, sizeof(*addr));
3712 /* In case one of the P-192 values is set to zero,
3713 * then just disable OOB data for P-192.
3715 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3716 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3720 rand192 = cp->rand192;
3721 hash192 = cp->hash192;
3725 /* In case one of the P-256 values is set to zero, then just
3726 * disable OOB data for P-256.
3728 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3729 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3733 rand256 = cp->rand256;
3734 hash256 = cp->hash256;
3737 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3738 cp->addr.type, hash192, rand192,
3741 status = MGMT_STATUS_FAILED;
3743 status = MGMT_STATUS_SUCCESS;
3745 err = mgmt_cmd_complete(sk, hdev->id,
3746 MGMT_OP_ADD_REMOTE_OOB_DATA,
3747 status, &cp->addr, sizeof(cp->addr));
3749 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3751 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3752 MGMT_STATUS_INVALID_PARAMS);
3756 hci_dev_unlock(hdev);
3760 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3761 void *data, u16 len)
3763 struct mgmt_cp_remove_remote_oob_data *cp = data;
3767 BT_DBG("%s", hdev->name);
3769 if (cp->addr.type != BDADDR_BREDR)
3770 return mgmt_cmd_complete(sk, hdev->id,
3771 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3772 MGMT_STATUS_INVALID_PARAMS,
3773 &cp->addr, sizeof(cp->addr));
3777 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3778 hci_remote_oob_data_clear(hdev);
3779 status = MGMT_STATUS_SUCCESS;
3783 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3785 status = MGMT_STATUS_INVALID_PARAMS;
3787 status = MGMT_STATUS_SUCCESS;
3790 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3791 status, &cp->addr, sizeof(cp->addr));
3793 hci_dev_unlock(hdev);
3797 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3799 struct mgmt_pending_cmd *cmd;
3801 BT_DBG("status %d", status);
3805 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3807 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3810 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3813 cmd->cmd_complete(cmd, mgmt_status(status));
3814 mgmt_pending_remove(cmd);
3817 hci_dev_unlock(hdev);
3820 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3821 uint8_t *mgmt_status)
3824 case DISCOV_TYPE_LE:
3825 *mgmt_status = mgmt_le_support(hdev);
3829 case DISCOV_TYPE_INTERLEAVED:
3830 *mgmt_status = mgmt_le_support(hdev);
3833 /* Intentional fall-through */
3834 case DISCOV_TYPE_BREDR:
3835 *mgmt_status = mgmt_bredr_support(hdev);
3840 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3847 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3848 u16 op, void *data, u16 len)
3850 struct mgmt_cp_start_discovery *cp = data;
3851 struct mgmt_pending_cmd *cmd;
3855 BT_DBG("%s", hdev->name);
3859 if (!hdev_is_powered(hdev)) {
3860 err = mgmt_cmd_complete(sk, hdev->id, op,
3861 MGMT_STATUS_NOT_POWERED,
3862 &cp->type, sizeof(cp->type));
3866 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3867 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3868 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3869 &cp->type, sizeof(cp->type));
3873 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3874 err = mgmt_cmd_complete(sk, hdev->id, op, status,
3875 &cp->type, sizeof(cp->type));
3879 /* Clear the discovery filter first to free any previously
3880 * allocated memory for the UUID list.
3882 hci_discovery_filter_clear(hdev);
3884 hdev->discovery.type = cp->type;
3885 hdev->discovery.report_invalid_rssi = false;
3886 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3887 hdev->discovery.limited = true;
3889 hdev->discovery.limited = false;
3891 cmd = mgmt_pending_add(sk, op, hdev, data, len);
3897 cmd->cmd_complete = generic_cmd_complete;
3899 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3900 queue_work(hdev->req_workqueue, &hdev->discov_update);
3904 hci_dev_unlock(hdev);
3908 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3909 void *data, u16 len)
3911 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3915 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3916 void *data, u16 len)
3918 return start_discovery_internal(sk, hdev,
3919 MGMT_OP_START_LIMITED_DISCOVERY,
3923 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3926 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3930 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3931 void *data, u16 len)
3933 struct mgmt_cp_start_service_discovery *cp = data;
3934 struct mgmt_pending_cmd *cmd;
3935 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3936 u16 uuid_count, expected_len;
3940 BT_DBG("%s", hdev->name);
3944 if (!hdev_is_powered(hdev)) {
3945 err = mgmt_cmd_complete(sk, hdev->id,
3946 MGMT_OP_START_SERVICE_DISCOVERY,
3947 MGMT_STATUS_NOT_POWERED,
3948 &cp->type, sizeof(cp->type));
3952 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3953 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3954 err = mgmt_cmd_complete(sk, hdev->id,
3955 MGMT_OP_START_SERVICE_DISCOVERY,
3956 MGMT_STATUS_BUSY, &cp->type,
3961 uuid_count = __le16_to_cpu(cp->uuid_count);
3962 if (uuid_count > max_uuid_count) {
3963 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
3965 err = mgmt_cmd_complete(sk, hdev->id,
3966 MGMT_OP_START_SERVICE_DISCOVERY,
3967 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3972 expected_len = sizeof(*cp) + uuid_count * 16;
3973 if (expected_len != len) {
3974 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
3976 err = mgmt_cmd_complete(sk, hdev->id,
3977 MGMT_OP_START_SERVICE_DISCOVERY,
3978 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3983 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3984 err = mgmt_cmd_complete(sk, hdev->id,
3985 MGMT_OP_START_SERVICE_DISCOVERY,
3986 status, &cp->type, sizeof(cp->type));
3990 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3997 cmd->cmd_complete = service_discovery_cmd_complete;
3999 /* Clear the discovery filter first to free any previously
4000 * allocated memory for the UUID list.
4002 hci_discovery_filter_clear(hdev);
4004 hdev->discovery.result_filtering = true;
4005 hdev->discovery.type = cp->type;
4006 hdev->discovery.rssi = cp->rssi;
4007 hdev->discovery.uuid_count = uuid_count;
4009 if (uuid_count > 0) {
4010 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4012 if (!hdev->discovery.uuids) {
4013 err = mgmt_cmd_complete(sk, hdev->id,
4014 MGMT_OP_START_SERVICE_DISCOVERY,
4016 &cp->type, sizeof(cp->type));
4017 mgmt_pending_remove(cmd);
4022 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4023 queue_work(hdev->req_workqueue, &hdev->discov_update);
4027 hci_dev_unlock(hdev);
4031 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4033 struct mgmt_pending_cmd *cmd;
4035 BT_DBG("status %d", status);
4039 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4041 cmd->cmd_complete(cmd, mgmt_status(status));
4042 mgmt_pending_remove(cmd);
4045 hci_dev_unlock(hdev);
4048 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4051 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4052 struct mgmt_pending_cmd *cmd;
4055 BT_DBG("%s", hdev->name);
4059 if (!hci_discovery_active(hdev)) {
4060 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4061 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4062 sizeof(mgmt_cp->type));
4066 if (hdev->discovery.type != mgmt_cp->type) {
4067 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4068 MGMT_STATUS_INVALID_PARAMS,
4069 &mgmt_cp->type, sizeof(mgmt_cp->type));
4073 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4079 cmd->cmd_complete = generic_cmd_complete;
4081 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4082 queue_work(hdev->req_workqueue, &hdev->discov_update);
4086 hci_dev_unlock(hdev);
4090 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4093 struct mgmt_cp_confirm_name *cp = data;
4094 struct inquiry_entry *e;
4097 BT_DBG("%s", hdev->name);
4101 if (!hci_discovery_active(hdev)) {
4102 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4103 MGMT_STATUS_FAILED, &cp->addr,
4108 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4110 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4111 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4116 if (cp->name_known) {
4117 e->name_state = NAME_KNOWN;
4120 e->name_state = NAME_NEEDED;
4121 hci_inquiry_cache_update_resolve(hdev, e);
4124 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4125 &cp->addr, sizeof(cp->addr));
4128 hci_dev_unlock(hdev);
4132 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4135 struct mgmt_cp_block_device *cp = data;
4139 BT_DBG("%s", hdev->name);
4141 if (!bdaddr_type_is_valid(cp->addr.type))
4142 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4143 MGMT_STATUS_INVALID_PARAMS,
4144 &cp->addr, sizeof(cp->addr));
4148 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4151 status = MGMT_STATUS_FAILED;
4155 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4157 status = MGMT_STATUS_SUCCESS;
4160 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4161 &cp->addr, sizeof(cp->addr));
4163 hci_dev_unlock(hdev);
4168 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4171 struct mgmt_cp_unblock_device *cp = data;
4175 BT_DBG("%s", hdev->name);
4177 if (!bdaddr_type_is_valid(cp->addr.type))
4178 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4179 MGMT_STATUS_INVALID_PARAMS,
4180 &cp->addr, sizeof(cp->addr));
4184 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4187 status = MGMT_STATUS_INVALID_PARAMS;
4191 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4193 status = MGMT_STATUS_SUCCESS;
4196 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4197 &cp->addr, sizeof(cp->addr));
4199 hci_dev_unlock(hdev);
4204 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4207 struct mgmt_cp_set_device_id *cp = data;
4208 struct hci_request req;
4212 BT_DBG("%s", hdev->name);
4214 source = __le16_to_cpu(cp->source);
4216 if (source > 0x0002)
4217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4218 MGMT_STATUS_INVALID_PARAMS);
4222 hdev->devid_source = source;
4223 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4224 hdev->devid_product = __le16_to_cpu(cp->product);
4225 hdev->devid_version = __le16_to_cpu(cp->version);
4227 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4230 hci_req_init(&req, hdev);
4231 __hci_req_update_eir(&req);
4232 hci_req_run(&req, NULL);
4234 hci_dev_unlock(hdev);
4239 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4242 BT_DBG("status %d", status);
4245 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4248 struct cmd_lookup match = { NULL, hdev };
4249 struct hci_request req;
4251 struct adv_info *adv_instance;
4257 u8 mgmt_err = mgmt_status(status);
4259 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4260 cmd_status_rsp, &mgmt_err);
4264 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4265 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4267 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4269 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4272 new_settings(hdev, match.sk);
4277 /* If "Set Advertising" was just disabled and instance advertising was
4278 * set up earlier, then re-enable multi-instance advertising.
4280 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4281 list_empty(&hdev->adv_instances))
4284 instance = hdev->cur_adv_instance;
4286 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4287 struct adv_info, list);
4291 instance = adv_instance->instance;
4294 hci_req_init(&req, hdev);
4296 err = __hci_req_schedule_adv_instance(&req, instance, true);
4299 err = hci_req_run(&req, enable_advertising_instance);
4302 bt_dev_err(hdev, "failed to re-configure advertising");
4305 hci_dev_unlock(hdev);
4308 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4311 struct mgmt_mode *cp = data;
4312 struct mgmt_pending_cmd *cmd;
4313 struct hci_request req;
4317 BT_DBG("request for %s", hdev->name);
4319 status = mgmt_le_support(hdev);
4321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4324 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4325 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4326 MGMT_STATUS_INVALID_PARAMS);
4332 /* The following conditions are ones which mean that we should
4333 * not do any HCI communication but directly send a mgmt
4334 * response to user space (after toggling the flag if
4337 if (!hdev_is_powered(hdev) ||
4338 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4339 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4340 hci_conn_num(hdev, LE_LINK) > 0 ||
4341 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4342 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4346 hdev->cur_adv_instance = 0x00;
4347 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4348 if (cp->val == 0x02)
4349 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4351 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4353 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4354 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4357 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4362 err = new_settings(hdev, sk);
4367 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4368 pending_find(MGMT_OP_SET_LE, hdev)) {
4369 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4374 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4380 hci_req_init(&req, hdev);
4382 if (cp->val == 0x02)
4383 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4385 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4387 cancel_adv_timeout(hdev);
4390 /* Switch to instance "0" for the Set Advertising setting.
4391 * We cannot use update_[adv|scan_rsp]_data() here as the
4392 * HCI_ADVERTISING flag is not yet set.
4394 hdev->cur_adv_instance = 0x00;
4396 if (ext_adv_capable(hdev)) {
4397 __hci_req_start_ext_adv(&req, 0x00);
4399 __hci_req_update_adv_data(&req, 0x00);
4400 __hci_req_update_scan_rsp_data(&req, 0x00);
4401 __hci_req_enable_advertising(&req);
4404 __hci_req_disable_advertising(&req);
4407 err = hci_req_run(&req, set_advertising_complete);
4409 mgmt_pending_remove(cmd);
4412 hci_dev_unlock(hdev);
4416 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4417 void *data, u16 len)
4419 struct mgmt_cp_set_static_address *cp = data;
4422 BT_DBG("%s", hdev->name);
4424 if (!lmp_le_capable(hdev))
4425 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4426 MGMT_STATUS_NOT_SUPPORTED);
4428 if (hdev_is_powered(hdev))
4429 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4430 MGMT_STATUS_REJECTED);
4432 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4433 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4434 return mgmt_cmd_status(sk, hdev->id,
4435 MGMT_OP_SET_STATIC_ADDRESS,
4436 MGMT_STATUS_INVALID_PARAMS);
4438 /* Two most significant bits shall be set */
4439 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4440 return mgmt_cmd_status(sk, hdev->id,
4441 MGMT_OP_SET_STATIC_ADDRESS,
4442 MGMT_STATUS_INVALID_PARAMS);
4447 bacpy(&hdev->static_addr, &cp->bdaddr);
4449 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4453 err = new_settings(hdev, sk);
4456 hci_dev_unlock(hdev);
4460 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4461 void *data, u16 len)
4463 struct mgmt_cp_set_scan_params *cp = data;
4464 __u16 interval, window;
4467 BT_DBG("%s", hdev->name);
4469 if (!lmp_le_capable(hdev))
4470 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4471 MGMT_STATUS_NOT_SUPPORTED);
4473 interval = __le16_to_cpu(cp->interval);
4475 if (interval < 0x0004 || interval > 0x4000)
4476 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4477 MGMT_STATUS_INVALID_PARAMS);
4479 window = __le16_to_cpu(cp->window);
4481 if (window < 0x0004 || window > 0x4000)
4482 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4483 MGMT_STATUS_INVALID_PARAMS);
4485 if (window > interval)
4486 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4487 MGMT_STATUS_INVALID_PARAMS);
4491 hdev->le_scan_interval = interval;
4492 hdev->le_scan_window = window;
4494 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4497 /* If background scan is running, restart it so new parameters are
4500 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4501 hdev->discovery.state == DISCOVERY_STOPPED) {
4502 struct hci_request req;
4504 hci_req_init(&req, hdev);
4506 hci_req_add_le_scan_disable(&req);
4507 hci_req_add_le_passive_scan(&req);
4509 hci_req_run(&req, NULL);
4512 hci_dev_unlock(hdev);
4517 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4520 struct mgmt_pending_cmd *cmd;
4522 BT_DBG("status 0x%02x", status);
4526 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4531 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4532 mgmt_status(status));
4534 struct mgmt_mode *cp = cmd->param;
4537 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4539 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4541 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4542 new_settings(hdev, cmd->sk);
4545 mgmt_pending_remove(cmd);
4548 hci_dev_unlock(hdev);
4551 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4552 void *data, u16 len)
4554 struct mgmt_mode *cp = data;
4555 struct mgmt_pending_cmd *cmd;
4556 struct hci_request req;
4559 BT_DBG("%s", hdev->name);
4561 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4562 hdev->hci_ver < BLUETOOTH_VER_1_2)
4563 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4564 MGMT_STATUS_NOT_SUPPORTED);
4566 if (cp->val != 0x00 && cp->val != 0x01)
4567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4568 MGMT_STATUS_INVALID_PARAMS);
4572 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4573 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4578 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4579 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4584 if (!hdev_is_powered(hdev)) {
4585 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4586 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4588 new_settings(hdev, sk);
4592 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4599 hci_req_init(&req, hdev);
4601 __hci_req_write_fast_connectable(&req, cp->val);
4603 err = hci_req_run(&req, fast_connectable_complete);
4605 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4606 MGMT_STATUS_FAILED);
4607 mgmt_pending_remove(cmd);
4611 hci_dev_unlock(hdev);
4616 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4618 struct mgmt_pending_cmd *cmd;
4620 BT_DBG("status 0x%02x", status);
4624 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4629 u8 mgmt_err = mgmt_status(status);
4631 /* We need to restore the flag if related HCI commands
4634 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4636 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4638 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4639 new_settings(hdev, cmd->sk);
4642 mgmt_pending_remove(cmd);
4645 hci_dev_unlock(hdev);
4648 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4650 struct mgmt_mode *cp = data;
4651 struct mgmt_pending_cmd *cmd;
4652 struct hci_request req;
4655 BT_DBG("request for %s", hdev->name);
4657 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4658 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4659 MGMT_STATUS_NOT_SUPPORTED);
4661 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4662 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4663 MGMT_STATUS_REJECTED);
4665 if (cp->val != 0x00 && cp->val != 0x01)
4666 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4667 MGMT_STATUS_INVALID_PARAMS);
4671 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4672 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4676 if (!hdev_is_powered(hdev)) {
4678 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4679 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4680 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4681 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4682 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4685 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4687 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4691 err = new_settings(hdev, sk);
4695 /* Reject disabling when powered on */
4697 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4698 MGMT_STATUS_REJECTED);
4701 /* When configuring a dual-mode controller to operate
4702 * with LE only and using a static address, then switching
4703 * BR/EDR back on is not allowed.
4705 * Dual-mode controllers shall operate with the public
4706 * address as its identity address for BR/EDR and LE. So
4707 * reject the attempt to create an invalid configuration.
4709 * The same restrictions applies when secure connections
4710 * has been enabled. For BR/EDR this is a controller feature
4711 * while for LE it is a host stack feature. This means that
4712 * switching BR/EDR back on when secure connections has been
4713 * enabled is not a supported transaction.
4715 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4716 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4717 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4718 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4719 MGMT_STATUS_REJECTED);
4724 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4725 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4730 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4736 /* We need to flip the bit already here so that
4737 * hci_req_update_adv_data generates the correct flags.
4739 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4741 hci_req_init(&req, hdev);
4743 __hci_req_write_fast_connectable(&req, false);
4744 __hci_req_update_scan(&req);
4746 /* Since only the advertising data flags will change, there
4747 * is no need to update the scan response data.
4749 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4751 err = hci_req_run(&req, set_bredr_complete);
4753 mgmt_pending_remove(cmd);
4756 hci_dev_unlock(hdev);
4760 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4762 struct mgmt_pending_cmd *cmd;
4763 struct mgmt_mode *cp;
4765 BT_DBG("%s status %u", hdev->name, status);
4769 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4774 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4775 mgmt_status(status));
4783 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4784 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4787 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4788 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4791 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4792 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4796 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4797 new_settings(hdev, cmd->sk);
4800 mgmt_pending_remove(cmd);
4802 hci_dev_unlock(hdev);
4805 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4806 void *data, u16 len)
4808 struct mgmt_mode *cp = data;
4809 struct mgmt_pending_cmd *cmd;
4810 struct hci_request req;
4814 BT_DBG("request for %s", hdev->name);
4816 if (!lmp_sc_capable(hdev) &&
4817 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4818 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4819 MGMT_STATUS_NOT_SUPPORTED);
4821 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4822 lmp_sc_capable(hdev) &&
4823 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4824 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4825 MGMT_STATUS_REJECTED);
4827 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4828 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4829 MGMT_STATUS_INVALID_PARAMS);
4833 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4834 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4838 changed = !hci_dev_test_and_set_flag(hdev,
4840 if (cp->val == 0x02)
4841 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4843 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4845 changed = hci_dev_test_and_clear_flag(hdev,
4847 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4850 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4855 err = new_settings(hdev, sk);
4860 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4861 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4868 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4869 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4870 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4874 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4880 hci_req_init(&req, hdev);
4881 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4882 err = hci_req_run(&req, sc_enable_complete);
4884 mgmt_pending_remove(cmd);
4889 hci_dev_unlock(hdev);
4893 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4894 void *data, u16 len)
4896 struct mgmt_mode *cp = data;
4897 bool changed, use_changed;
4900 BT_DBG("request for %s", hdev->name);
4902 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4903 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4904 MGMT_STATUS_INVALID_PARAMS);
4909 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4911 changed = hci_dev_test_and_clear_flag(hdev,
4912 HCI_KEEP_DEBUG_KEYS);
4914 if (cp->val == 0x02)
4915 use_changed = !hci_dev_test_and_set_flag(hdev,
4916 HCI_USE_DEBUG_KEYS);
4918 use_changed = hci_dev_test_and_clear_flag(hdev,
4919 HCI_USE_DEBUG_KEYS);
4921 if (hdev_is_powered(hdev) && use_changed &&
4922 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4923 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4924 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4925 sizeof(mode), &mode);
4928 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4933 err = new_settings(hdev, sk);
4936 hci_dev_unlock(hdev);
4940 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4943 struct mgmt_cp_set_privacy *cp = cp_data;
4947 BT_DBG("request for %s", hdev->name);
4949 if (!lmp_le_capable(hdev))
4950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4951 MGMT_STATUS_NOT_SUPPORTED);
4953 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4954 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4955 MGMT_STATUS_INVALID_PARAMS);
4957 if (hdev_is_powered(hdev))
4958 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4959 MGMT_STATUS_REJECTED);
4963 /* If user space supports this command it is also expected to
4964 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4966 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4969 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4970 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4971 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4972 hci_adv_instances_set_rpa_expired(hdev, true);
4973 if (cp->privacy == 0x02)
4974 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4976 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4978 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4979 memset(hdev->irk, 0, sizeof(hdev->irk));
4980 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4981 hci_adv_instances_set_rpa_expired(hdev, false);
4982 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4985 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4990 err = new_settings(hdev, sk);
4993 hci_dev_unlock(hdev);
4997 static bool irk_is_valid(struct mgmt_irk_info *irk)
4999 switch (irk->addr.type) {
5000 case BDADDR_LE_PUBLIC:
5003 case BDADDR_LE_RANDOM:
5004 /* Two most significant bits shall be set */
5005 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5013 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5016 struct mgmt_cp_load_irks *cp = cp_data;
5017 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5018 sizeof(struct mgmt_irk_info));
5019 u16 irk_count, expected_len;
5022 BT_DBG("request for %s", hdev->name);
5024 if (!lmp_le_capable(hdev))
5025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5026 MGMT_STATUS_NOT_SUPPORTED);
5028 irk_count = __le16_to_cpu(cp->irk_count);
5029 if (irk_count > max_irk_count) {
5030 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5032 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5033 MGMT_STATUS_INVALID_PARAMS);
5036 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5037 if (expected_len != len) {
5038 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5040 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5041 MGMT_STATUS_INVALID_PARAMS);
5044 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5046 for (i = 0; i < irk_count; i++) {
5047 struct mgmt_irk_info *key = &cp->irks[i];
5049 if (!irk_is_valid(key))
5050 return mgmt_cmd_status(sk, hdev->id,
5052 MGMT_STATUS_INVALID_PARAMS);
5057 hci_smp_irks_clear(hdev);
5059 for (i = 0; i < irk_count; i++) {
5060 struct mgmt_irk_info *irk = &cp->irks[i];
5062 hci_add_irk(hdev, &irk->addr.bdaddr,
5063 le_addr_type(irk->addr.type), irk->val,
5067 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5069 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5071 hci_dev_unlock(hdev);
5077 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
5078 void *data, u16 len)
5080 struct mgmt_cp_set_advertising_params *cp = data;
5085 BT_DBG("%s", hdev->name);
5087 if (!lmp_le_capable(hdev))
5088 return mgmt_cmd_status(sk, hdev->id,
5089 MGMT_OP_SET_ADVERTISING_PARAMS,
5090 MGMT_STATUS_NOT_SUPPORTED);
5092 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
5093 return mgmt_cmd_status(sk, hdev->id,
5094 MGMT_OP_SET_ADVERTISING_PARAMS,
5097 min_interval = __le16_to_cpu(cp->interval_min);
5098 max_interval = __le16_to_cpu(cp->interval_max);
5100 if (min_interval > max_interval ||
5101 min_interval < 0x0020 || max_interval > 0x4000)
5102 return mgmt_cmd_status(sk, hdev->id,
5103 MGMT_OP_SET_ADVERTISING_PARAMS,
5104 MGMT_STATUS_INVALID_PARAMS);
5108 hdev->le_adv_min_interval = min_interval;
5109 hdev->le_adv_max_interval = max_interval;
5110 hdev->adv_filter_policy = cp->filter_policy;
5111 hdev->adv_type = cp->type;
5113 err = mgmt_cmd_complete(sk, hdev->id,
5114 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
5116 hci_dev_unlock(hdev);
5121 static void set_advertising_data_complete(struct hci_dev *hdev,
5122 u8 status, u16 opcode)
5124 struct mgmt_cp_set_advertising_data *cp;
5125 struct mgmt_pending_cmd *cmd;
5127 BT_DBG("status 0x%02x", status);
5131 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
5138 mgmt_cmd_status(cmd->sk, hdev->id,
5139 MGMT_OP_SET_ADVERTISING_DATA,
5140 mgmt_status(status));
5142 mgmt_cmd_complete(cmd->sk, hdev->id,
5143 MGMT_OP_SET_ADVERTISING_DATA, 0,
5146 mgmt_pending_remove(cmd);
5149 hci_dev_unlock(hdev);
5152 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
5153 void *data, u16 len)
5155 struct mgmt_pending_cmd *cmd;
5156 struct hci_request req;
5157 struct mgmt_cp_set_advertising_data *cp = data;
5158 struct hci_cp_le_set_adv_data adv;
5161 BT_DBG("%s", hdev->name);
5163 if (!lmp_le_capable(hdev)) {
5164 return mgmt_cmd_status(sk, hdev->id,
5165 MGMT_OP_SET_ADVERTISING_DATA,
5166 MGMT_STATUS_NOT_SUPPORTED);
5171 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
5172 err = mgmt_cmd_status(sk, hdev->id,
5173 MGMT_OP_SET_ADVERTISING_DATA,
5178 if (len > HCI_MAX_AD_LENGTH) {
5179 err = mgmt_cmd_status(sk, hdev->id,
5180 MGMT_OP_SET_ADVERTISING_DATA,
5181 MGMT_STATUS_INVALID_PARAMS);
5185 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
5192 hci_req_init(&req, hdev);
5194 memset(&adv, 0, sizeof(adv));
5195 memcpy(adv.data, cp->data, len);
5198 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
5200 err = hci_req_run(&req, set_advertising_data_complete);
5202 mgmt_pending_remove(cmd);
5205 hci_dev_unlock(hdev);
5210 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
5213 struct mgmt_cp_set_scan_rsp_data *cp;
5214 struct mgmt_pending_cmd *cmd;
5216 BT_DBG("status 0x%02x", status);
5220 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
5227 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
5228 mgmt_status(status));
5230 mgmt_cmd_complete(cmd->sk, hdev->id,
5231 MGMT_OP_SET_SCAN_RSP_DATA, 0,
5234 mgmt_pending_remove(cmd);
5237 hci_dev_unlock(hdev);
5240 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
5243 struct mgmt_pending_cmd *cmd;
5244 struct hci_request req;
5245 struct mgmt_cp_set_scan_rsp_data *cp = data;
5246 struct hci_cp_le_set_scan_rsp_data rsp;
5249 BT_DBG("%s", hdev->name);
5251 if (!lmp_le_capable(hdev))
5252 return mgmt_cmd_status(sk, hdev->id,
5253 MGMT_OP_SET_SCAN_RSP_DATA,
5254 MGMT_STATUS_NOT_SUPPORTED);
5258 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
5259 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
5264 if (len > HCI_MAX_AD_LENGTH) {
5265 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
5266 MGMT_STATUS_INVALID_PARAMS);
5270 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
5276 hci_req_init(&req, hdev);
5278 memset(&rsp, 0, sizeof(rsp));
5279 memcpy(rsp.data, cp->data, len);
5282 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
5284 err = hci_req_run(&req, set_scan_rsp_data_complete);
5286 mgmt_pending_remove(cmd);
5289 hci_dev_unlock(hdev);
5294 /* Adv White List feature */
5295 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5297 struct mgmt_cp_add_dev_white_list *cp;
5298 struct mgmt_pending_cmd *cmd;
5300 BT_DBG("status 0x%02x", status);
5304 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
5311 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
5312 mgmt_status(status));
5314 mgmt_cmd_complete(cmd->sk, hdev->id,
5315 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
5317 mgmt_pending_remove(cmd);
5320 hci_dev_unlock(hdev);
5323 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
5324 void *data, u16 len)
5326 struct mgmt_pending_cmd *cmd;
5327 struct mgmt_cp_add_dev_white_list *cp = data;
5328 struct hci_request req;
5331 BT_DBG("%s", hdev->name);
5333 if (!lmp_le_capable(hdev))
5334 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
5335 MGMT_STATUS_NOT_SUPPORTED);
5337 if (!hdev_is_powered(hdev))
5338 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
5339 MGMT_STATUS_REJECTED);
5343 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
5344 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
5349 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
5355 hci_req_init(&req, hdev);
5357 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
5359 err = hci_req_run(&req, add_white_list_complete);
5361 mgmt_pending_remove(cmd);
5366 hci_dev_unlock(hdev);
5371 static void remove_from_white_list_complete(struct hci_dev *hdev,
5372 u8 status, u16 opcode)
5374 struct mgmt_cp_remove_dev_from_white_list *cp;
5375 struct mgmt_pending_cmd *cmd;
5377 BT_DBG("status 0x%02x", status);
5381 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
5388 mgmt_cmd_status(cmd->sk, hdev->id,
5389 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5390 mgmt_status(status));
5392 mgmt_cmd_complete(cmd->sk, hdev->id,
5393 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
5396 mgmt_pending_remove(cmd);
5399 hci_dev_unlock(hdev);
5402 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
5403 void *data, u16 len)
5405 struct mgmt_pending_cmd *cmd;
5406 struct mgmt_cp_remove_dev_from_white_list *cp = data;
5407 struct hci_request req;
5410 BT_DBG("%s", hdev->name);
5412 if (!lmp_le_capable(hdev))
5413 return mgmt_cmd_status(sk, hdev->id,
5414 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5415 MGMT_STATUS_NOT_SUPPORTED);
5417 if (!hdev_is_powered(hdev))
5418 return mgmt_cmd_status(sk, hdev->id,
5419 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5420 MGMT_STATUS_REJECTED);
5424 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
5425 err = mgmt_cmd_status(sk, hdev->id,
5426 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5431 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5438 hci_req_init(&req, hdev);
5440 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
5442 err = hci_req_run(&req, remove_from_white_list_complete);
5444 mgmt_pending_remove(cmd);
5449 hci_dev_unlock(hdev);
5454 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
5457 struct mgmt_pending_cmd *cmd;
5459 BT_DBG("status 0x%02x", status);
5463 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
5468 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
5469 mgmt_status(status));
5471 mgmt_cmd_complete(cmd->sk, hdev->id,
5472 MGMT_OP_CLEAR_DEV_WHITE_LIST,
5475 mgmt_pending_remove(cmd);
5478 hci_dev_unlock(hdev);
5481 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
5482 void *data, u16 len)
5484 struct mgmt_pending_cmd *cmd;
5485 struct hci_request req;
5488 BT_DBG("%s", hdev->name);
5490 if (!lmp_le_capable(hdev))
5491 return mgmt_cmd_status(sk, hdev->id,
5492 MGMT_OP_CLEAR_DEV_WHITE_LIST,
5493 MGMT_STATUS_NOT_SUPPORTED);
5495 if (!hdev_is_powered(hdev))
5496 return mgmt_cmd_status(sk, hdev->id,
5497 MGMT_OP_CLEAR_DEV_WHITE_LIST,
5498 MGMT_STATUS_REJECTED);
5502 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
5503 err = mgmt_cmd_status(sk, hdev->id,
5504 MGMT_OP_CLEAR_DEV_WHITE_LIST,
5509 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
5516 hci_req_init(&req, hdev);
5518 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
5520 err = hci_req_run(&req, clear_white_list_complete);
5522 mgmt_pending_remove(cmd);
5527 hci_dev_unlock(hdev);
5532 static void set_rssi_threshold_complete(struct hci_dev *hdev,
5533 u8 status, u16 opcode)
5535 struct mgmt_pending_cmd *cmd;
5537 BT_DBG("status 0x%02x", status);
5541 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5546 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5547 mgmt_status(status));
5549 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
5552 mgmt_pending_remove(cmd);
5555 hci_dev_unlock(hdev);
5558 static void set_rssi_disable_complete(struct hci_dev *hdev,
5559 u8 status, u16 opcode)
5561 struct mgmt_pending_cmd *cmd;
5563 BT_DBG("status 0x%02x", status);
5567 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
5572 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5573 mgmt_status(status));
5575 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5578 mgmt_pending_remove(cmd);
5581 hci_dev_unlock(hdev);
5584 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
5585 void *data, u16 len)
5588 struct hci_cp_set_rssi_threshold th = { 0, };
5589 struct mgmt_cp_set_enable_rssi *cp = data;
5590 struct hci_conn *conn;
5591 struct mgmt_pending_cmd *cmd;
5592 struct hci_request req;
5597 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5599 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5600 MGMT_STATUS_FAILED);
5604 if (!lmp_le_capable(hdev)) {
5605 mgmt_pending_remove(cmd);
5606 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5607 MGMT_STATUS_NOT_SUPPORTED);
5611 if (!hdev_is_powered(hdev)) {
5612 BT_DBG("%s", hdev->name);
5613 mgmt_pending_remove(cmd);
5614 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5615 MGMT_STATUS_NOT_POWERED);
5619 if (cp->link_type == 0x01)
5620 dest_type = LE_LINK;
5622 dest_type = ACL_LINK;
5624 /* Get LE/ACL link handle info */
5625 conn = hci_conn_hash_lookup_ba(hdev,
5626 dest_type, &cp->bdaddr);
5629 err = mgmt_cmd_complete(sk, hdev->id,
5630 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
5631 mgmt_pending_remove(cmd);
5635 hci_req_init(&req, hdev);
5637 th.hci_le_ext_opcode = 0x0B;
5639 th.conn_handle = conn->handle;
5640 th.alert_mask = 0x07;
5641 th.low_th = cp->low_th;
5642 th.in_range_th = cp->in_range_th;
5643 th.high_th = cp->high_th;
5645 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
5646 err = hci_req_run(&req, set_rssi_threshold_complete);
5649 mgmt_pending_remove(cmd);
5650 BT_ERR("Error in requesting hci_req_run");
5655 hci_dev_unlock(hdev);
5659 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
5660 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
5662 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
5663 struct mgmt_cp_set_enable_rssi *cp = data;
5664 struct mgmt_pending_cmd *cmd;
5669 mgmt_rp.status = rp->status;
5670 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
5671 mgmt_rp.bt_address = cp->bdaddr;
5672 mgmt_rp.link_type = cp->link_type;
5674 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5675 MGMT_STATUS_SUCCESS, &mgmt_rp,
5676 sizeof(struct mgmt_cc_rsp_enable_rssi));
5678 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
5679 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
5681 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
5682 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
5683 &mgmt_rp.bt_address, true);
5687 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5689 mgmt_pending_remove(cmd);
5691 hci_dev_unlock(hdev);
5694 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
5695 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
5697 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
5698 struct mgmt_cp_disable_rssi *cp = data;
5699 struct mgmt_pending_cmd *cmd;
5704 mgmt_rp.status = rp->status;
5705 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
5706 mgmt_rp.bt_address = cp->bdaddr;
5707 mgmt_rp.link_type = cp->link_type;
5709 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5710 MGMT_STATUS_SUCCESS, &mgmt_rp,
5711 sizeof(struct mgmt_cc_rsp_enable_rssi));
5713 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
5714 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
5716 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
5717 &mgmt_rp.bt_address, false);
5721 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
5723 mgmt_pending_remove(cmd);
5725 hci_dev_unlock(hdev);
5728 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
5729 void *data, u16 len)
5731 struct mgmt_pending_cmd *cmd;
5732 struct hci_request req;
5733 struct hci_cp_set_enable_rssi cp_en = { 0, };
5736 BT_DBG("Set Disable RSSI.");
5738 cp_en.hci_le_ext_opcode = 0x01;
5739 cp_en.le_enable_cs_Features = 0x00;
5740 cp_en.data[0] = 0x00;
5741 cp_en.data[1] = 0x00;
5742 cp_en.data[2] = 0x00;
5746 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
5748 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5749 MGMT_STATUS_FAILED);
5753 if (!lmp_le_capable(hdev)) {
5754 mgmt_pending_remove(cmd);
5755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5756 MGMT_STATUS_NOT_SUPPORTED);
5760 if (!hdev_is_powered(hdev)) {
5761 BT_DBG("%s", hdev->name);
5762 mgmt_pending_remove(cmd);
5763 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5764 MGMT_STATUS_NOT_POWERED);
5768 hci_req_init(&req, hdev);
5770 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
5771 sizeof(struct hci_cp_set_enable_rssi),
5772 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
5773 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
5775 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
5776 err = hci_req_run(&req, set_rssi_disable_complete);
5779 mgmt_pending_remove(cmd);
5780 BT_ERR("Error in requesting hci_req_run");
5785 hci_dev_unlock(hdev);
5789 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
5791 struct hci_cc_rsp_enable_rssi *rp = response;
5792 struct mgmt_pending_cmd *cmd_enable = NULL;
5793 struct mgmt_pending_cmd *cmd_disable = NULL;
5794 struct mgmt_cp_set_enable_rssi *cp_en;
5795 struct mgmt_cp_disable_rssi *cp_dis;
5798 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5799 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
5800 hci_dev_unlock(hdev);
5803 BT_DBG("Enable Request");
5806 BT_DBG("Disable Request");
5809 cp_en = cmd_enable->param;
5814 switch (rp->le_ext_opcode) {
5816 BT_DBG("RSSI enabled.. Setting Threshold...");
5817 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
5818 cp_en, sizeof(*cp_en));
5822 BT_DBG("Sending RSSI enable success");
5823 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
5824 cp_en, rp, rp->status);
5828 } else if (cmd_disable) {
5829 cp_dis = cmd_disable->param;
5834 switch (rp->le_ext_opcode) {
5836 BT_DBG("Sending RSSI disable success");
5837 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
5838 cp_dis, rp, rp->status);
5843 * Only unset RSSI Threshold values for the Link if
5844 * RSSI is monitored for other BREDR or LE Links
5846 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
5847 BT_DBG("Unset Threshold. Other links being monitored");
5848 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
5849 cp_dis, rp, rp->status);
5851 BT_DBG("Unset Threshold. Disabling...");
5852 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
5853 cp_dis, sizeof(*cp_dis));
5860 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
5863 struct mgmt_pending_cmd *cmd;
5865 BT_DBG("status 0x%02x", status);
5869 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5874 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5875 mgmt_status(status));
5877 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
5880 mgmt_pending_remove(cmd);
5883 hci_dev_unlock(hdev);
5886 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
5887 void *data, u16 len)
5889 struct mgmt_pending_cmd *cmd;
5890 struct hci_request req;
5891 struct mgmt_cp_set_enable_rssi *cp = data;
5892 struct hci_cp_set_enable_rssi cp_en = { 0, };
5895 BT_DBG("Set Enable RSSI.");
5897 cp_en.hci_le_ext_opcode = 0x01;
5898 cp_en.le_enable_cs_Features = 0x04;
5899 cp_en.data[0] = 0x00;
5900 cp_en.data[1] = 0x00;
5901 cp_en.data[2] = 0x00;
5905 if (!lmp_le_capable(hdev)) {
5906 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5907 MGMT_STATUS_NOT_SUPPORTED);
5911 if (!hdev_is_powered(hdev)) {
5912 BT_DBG("%s", hdev->name);
5913 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5914 MGMT_STATUS_NOT_POWERED);
5918 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
5919 BT_DBG("%s", hdev->name);
5920 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5925 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
5928 BT_DBG("%s", hdev->name);
5933 /* If RSSI is already enabled directly set Threshold values */
5934 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
5935 hci_dev_unlock(hdev);
5936 BT_DBG("RSSI Enabled. Directly set Threshold");
5937 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
5941 hci_req_init(&req, hdev);
5943 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
5944 sizeof(struct hci_cp_set_enable_rssi),
5945 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
5946 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
5948 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
5949 err = hci_req_run(&req, set_rssi_enable_complete);
5952 mgmt_pending_remove(cmd);
5953 BT_ERR("Error in requesting hci_req_run");
5958 hci_dev_unlock(hdev);
5963 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5965 struct mgmt_pending_cmd *cmd;
5967 BT_DBG("status 0x%02x", status);
5971 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
5975 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
5976 MGMT_STATUS_SUCCESS, &status, 1);
5978 mgmt_pending_remove(cmd);
5981 hci_dev_unlock(hdev);
5984 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
5987 struct mgmt_pending_cmd *cmd;
5988 struct hci_request req;
5989 struct mgmt_cp_get_raw_rssi *cp = data;
5990 struct hci_cp_get_raw_rssi hci_cp;
5992 struct hci_conn *conn;
5996 BT_DBG("Get Raw RSSI.");
6000 if (!lmp_le_capable(hdev)) {
6001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6002 MGMT_STATUS_NOT_SUPPORTED);
6006 if (cp->link_type == 0x01)
6007 dest_type = LE_LINK;
6009 dest_type = ACL_LINK;
6011 /* Get LE/BREDR link handle info */
6012 conn = hci_conn_hash_lookup_ba(hdev,
6013 dest_type, &cp->bt_address);
6015 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6016 MGMT_STATUS_NOT_CONNECTED);
6019 hci_cp.conn_handle = conn->handle;
6021 if (!hdev_is_powered(hdev)) {
6022 BT_DBG("%s", hdev->name);
6023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6024 MGMT_STATUS_NOT_POWERED);
6028 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
6029 BT_DBG("%s", hdev->name);
6030 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6035 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
6037 BT_DBG("%s", hdev->name);
6042 hci_req_init(&req, hdev);
6044 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
6045 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
6046 err = hci_req_run(&req, get_raw_rssi_complete);
6049 mgmt_pending_remove(cmd);
6050 BT_ERR("Error in requesting hci_req_run");
6054 hci_dev_unlock(hdev);
6059 void mgmt_raw_rssi_response(struct hci_dev *hdev,
6060 struct hci_cc_rp_get_raw_rssi *rp, int success)
6062 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
6063 struct hci_conn *conn;
6065 mgmt_rp.status = rp->status;
6066 mgmt_rp.rssi_dbm = rp->rssi_dbm;
6068 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
6072 bacpy(&mgmt_rp.bt_address, &conn->dst);
6073 if (conn->type == LE_LINK)
6074 mgmt_rp.link_type = 0x01;
6076 mgmt_rp.link_type = 0x00;
6078 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
6079 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
6082 static void set_disable_threshold_complete(struct hci_dev *hdev,
6083 u8 status, u16 opcode)
6085 struct mgmt_pending_cmd *cmd;
6087 BT_DBG("status 0x%02x", status);
6091 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6095 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6096 MGMT_STATUS_SUCCESS, &status, 1);
6098 mgmt_pending_remove(cmd);
6101 hci_dev_unlock(hdev);
6104 /** Removes monitoring for a link*/
6105 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
6106 void *data, u16 len)
6109 struct hci_cp_set_rssi_threshold th = { 0, };
6110 struct mgmt_cp_disable_rssi *cp = data;
6111 struct hci_conn *conn;
6112 struct mgmt_pending_cmd *cmd;
6113 struct hci_request req;
6116 BT_DBG("Set Disable RSSI.");
6120 if (!lmp_le_capable(hdev)) {
6121 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6122 MGMT_STATUS_NOT_SUPPORTED);
6126 /* Get LE/ACL link handle info*/
6127 if (cp->link_type == 0x01)
6128 dest_type = LE_LINK;
6130 dest_type = ACL_LINK;
6132 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
6134 err = mgmt_cmd_complete(sk, hdev->id,
6135 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
6139 th.hci_le_ext_opcode = 0x0B;
6141 th.conn_handle = conn->handle;
6142 th.alert_mask = 0x00;
6144 th.in_range_th = 0x00;
6147 if (!hdev_is_powered(hdev)) {
6148 BT_DBG("%s", hdev->name);
6149 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6154 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
6155 BT_DBG("%s", hdev->name);
6156 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6161 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
6164 BT_DBG("%s", hdev->name);
6169 hci_req_init(&req, hdev);
6171 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6172 err = hci_req_run(&req, set_disable_threshold_complete);
6174 mgmt_pending_remove(cmd);
6175 BT_ERR("Error in requesting hci_req_run");
6180 hci_dev_unlock(hdev);
6185 void mgmt_rssi_alert_evt(struct hci_dev *hdev, struct sk_buff *skb)
6187 struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
6188 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
6189 struct hci_conn *conn;
6191 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
6192 ev->conn_handle, ev->alert_type, ev->rssi_dbm);
6194 conn = hci_conn_hash_lookup_handle(hdev, ev->conn_handle);
6197 BT_ERR("RSSI alert Error: Device not found for handle");
6200 bacpy(&mgmt_ev.bdaddr, &conn->dst);
6202 if (conn->type == LE_LINK)
6203 mgmt_ev.link_type = 0x01;
6205 mgmt_ev.link_type = 0x00;
6207 mgmt_ev.alert_type = ev->alert_type;
6208 mgmt_ev.rssi_dbm = ev->rssi_dbm;
6210 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
6211 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
6215 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
6217 struct mgmt_pending_cmd *cmd;
6221 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
6223 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
6227 type = hdev->le_discovery.type;
6229 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
6230 mgmt_status(status), &type, sizeof(type));
6231 mgmt_pending_remove(cmd);
6236 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
6239 unsigned long timeout = 0;
6241 BT_DBG("status %d", status);
6245 mgmt_start_le_discovery_failed(hdev, status);
6246 hci_dev_unlock(hdev);
6251 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
6252 hci_dev_unlock(hdev);
6254 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
6255 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
6260 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
6263 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
6264 void *data, u16 len)
6266 struct mgmt_cp_start_le_discovery *cp = data;
6267 struct mgmt_pending_cmd *cmd;
6268 struct hci_cp_le_set_scan_param param_cp;
6269 struct hci_cp_le_set_scan_enable enable_cp;
6270 struct hci_request req;
6271 u8 status, own_addr_type;
6274 BT_DBG("%s", hdev->name);
6278 if (!hdev_is_powered(hdev)) {
6279 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6280 MGMT_STATUS_NOT_POWERED);
6284 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
6285 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6290 if (cp->type != DISCOV_TYPE_LE) {
6291 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6292 MGMT_STATUS_INVALID_PARAMS);
6296 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
6302 hdev->le_discovery.type = cp->type;
6304 hci_req_init(&req, hdev);
6306 status = mgmt_le_support(hdev);
6308 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6310 mgmt_pending_remove(cmd);
6314 /* If controller is scanning, it means the background scanning
6315 * is running. Thus, we should temporarily stop it in order to
6316 * set the discovery scanning parameters.
6318 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
6319 hci_req_add_le_scan_disable(&req);
6321 memset(¶m_cp, 0, sizeof(param_cp));
6323 /* All active scans will be done with either a resolvable
6324 * private address (when privacy feature has been enabled)
6325 * or unresolvable private address.
6327 err = hci_update_random_address(&req, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
6329 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6330 MGMT_STATUS_FAILED);
6331 mgmt_pending_remove(cmd);
6335 param_cp.type = hdev->le_scan_type;
6336 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
6337 param_cp.window = cpu_to_le16(hdev->le_scan_window);
6338 param_cp.own_address_type = own_addr_type;
6339 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
6342 memset(&enable_cp, 0, sizeof(enable_cp));
6343 enable_cp.enable = LE_SCAN_ENABLE;
6344 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
6346 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
6349 err = hci_req_run(&req, start_le_discovery_complete);
6351 mgmt_pending_remove(cmd);
6353 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
6356 hci_dev_unlock(hdev);
6360 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
6362 struct mgmt_pending_cmd *cmd;
6365 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
6369 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
6370 mgmt_status(status), &hdev->le_discovery.type,
6371 sizeof(hdev->le_discovery.type));
6372 mgmt_pending_remove(cmd);
6377 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
6380 BT_DBG("status %d", status);
6385 mgmt_stop_le_discovery_failed(hdev, status);
6389 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
6392 hci_dev_unlock(hdev);
6395 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
6396 void *data, u16 len)
6398 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
6399 struct mgmt_pending_cmd *cmd;
6400 struct hci_request req;
6403 BT_DBG("%s", hdev->name);
6407 if (!hci_le_discovery_active(hdev)) {
6408 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
6409 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6410 sizeof(mgmt_cp->type));
6414 if (hdev->le_discovery.type != mgmt_cp->type) {
6415 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
6416 MGMT_STATUS_INVALID_PARAMS,
6417 &mgmt_cp->type, sizeof(mgmt_cp->type));
6421 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
6427 hci_req_init(&req, hdev);
6429 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
6430 BT_DBG("unknown le discovery state %u",
6431 hdev->le_discovery.state);
6433 mgmt_pending_remove(cmd);
6434 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
6435 MGMT_STATUS_FAILED, &mgmt_cp->type,
6436 sizeof(mgmt_cp->type));
6440 cancel_delayed_work(&hdev->le_scan_disable);
6441 hci_req_add_le_scan_disable(&req);
6443 err = hci_req_run(&req, stop_le_discovery_complete);
6445 mgmt_pending_remove(cmd);
6447 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
6450 hci_dev_unlock(hdev);
6454 /* Separate LE discovery */
6455 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
6457 struct mgmt_ev_discovering ev;
6458 struct mgmt_pending_cmd *cmd;
6460 BT_DBG("%s le discovering %u", hdev->name, discovering);
6463 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
6465 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
6468 u8 type = hdev->le_discovery.type;
6470 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6472 mgmt_pending_remove(cmd);
6475 memset(&ev, 0, sizeof(ev));
6476 ev.type = hdev->le_discovery.type;
6477 ev.discovering = discovering;
6479 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6482 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
6483 void *data, u16 len)
6487 BT_DBG("%s", hdev->name);
6491 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
6493 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
6495 hci_dev_unlock(hdev);
6500 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
6505 if (min > max || min < 6 || max > 3200)
6508 if (to_multiplier < 10 || to_multiplier > 3200)
6511 if (max >= to_multiplier * 8)
6514 max_latency = (to_multiplier * 8 / max) - 1;
6516 if (latency > 499 || latency > max_latency)
6522 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
6525 struct mgmt_cp_le_conn_update *cp = data;
6527 struct hci_conn *conn;
6528 u16 min, max, latency, supervision_timeout;
6531 if (!hdev_is_powered(hdev))
6532 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
6533 MGMT_STATUS_NOT_POWERED);
6535 min = __le16_to_cpu(cp->conn_interval_min);
6536 max = __le16_to_cpu(cp->conn_interval_max);
6537 latency = __le16_to_cpu(cp->conn_latency);
6538 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
6540 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
6541 min, max, latency, supervision_timeout);
6543 err = check_le_conn_update_param(min, max, latency,
6544 supervision_timeout);
6547 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
6548 MGMT_STATUS_INVALID_PARAMS);
6552 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
6554 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
6555 MGMT_STATUS_NOT_CONNECTED);
6556 hci_dev_unlock(hdev);
6560 hci_dev_unlock(hdev);
6562 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
6564 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
6568 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
6571 struct mgmt_cp_set_manufacturer_data *cp;
6572 struct mgmt_pending_cmd *cmd;
6574 BT_DBG("status 0x%02x", status);
6578 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
6585 mgmt_cmd_status(cmd->sk, hdev->id,
6586 MGMT_OP_SET_MANUFACTURER_DATA,
6587 mgmt_status(status));
6589 mgmt_cmd_complete(cmd->sk, hdev->id,
6590 MGMT_OP_SET_MANUFACTURER_DATA, 0,
6593 mgmt_pending_remove(cmd);
6596 hci_dev_unlock(hdev);
6599 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
6600 void *data, u16 len)
6602 struct mgmt_pending_cmd *cmd;
6603 struct hci_request req;
6604 struct mgmt_cp_set_manufacturer_data *cp = data;
6605 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
6609 BT_DBG("%s", hdev->name);
6611 if (!lmp_bredr_capable(hdev))
6612 return mgmt_cmd_status(sk, hdev->id,
6613 MGMT_OP_SET_MANUFACTURER_DATA,
6614 MGMT_STATUS_NOT_SUPPORTED);
6616 if (cp->data[0] == 0 ||
6617 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
6618 return mgmt_cmd_status(sk, hdev->id,
6619 MGMT_OP_SET_MANUFACTURER_DATA,
6620 MGMT_STATUS_INVALID_PARAMS);
6622 if (cp->data[1] != 0xFF)
6623 return mgmt_cmd_status(sk, hdev->id,
6624 MGMT_OP_SET_MANUFACTURER_DATA,
6625 MGMT_STATUS_NOT_SUPPORTED);
6629 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
6630 err = mgmt_cmd_status(sk, hdev->id,
6631 MGMT_OP_SET_MANUFACTURER_DATA,
6636 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
6643 hci_req_init(&req, hdev);
6645 /* if new data is same as previous data then return command
6648 if (hdev->manufacturer_len == cp->data[0] - 1 &&
6649 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
6650 mgmt_pending_remove(cmd);
6651 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
6652 0, cp, sizeof(*cp));
6657 old_len = hdev->manufacturer_len;
6659 memcpy(old_data, hdev->manufacturer_data, old_len);
6661 hdev->manufacturer_len = cp->data[0] - 1;
6662 if (hdev->manufacturer_len > 0)
6663 memcpy(hdev->manufacturer_data, cp->data + 2,
6664 hdev->manufacturer_len);
6666 __hci_req_update_eir(&req);
6668 err = hci_req_run(&req, set_manufacturer_data_complete);
6670 mgmt_pending_remove(cmd);
6675 hci_dev_unlock(hdev);
6680 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
6681 hdev->manufacturer_len = old_len;
6682 if (hdev->manufacturer_len > 0)
6683 memcpy(hdev->manufacturer_data, old_data,
6684 hdev->manufacturer_len);
6685 hci_dev_unlock(hdev);
6689 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
6690 void *data, u16 len)
6692 struct mgmt_cp_le_set_scan_params *cp = data;
6693 __u16 interval, window;
6696 BT_DBG("%s", hdev->name);
6698 if (!lmp_le_capable(hdev))
6699 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
6700 MGMT_STATUS_NOT_SUPPORTED);
6702 interval = __le16_to_cpu(cp->interval);
6704 if (interval < 0x0004 || interval > 0x4000)
6705 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
6706 MGMT_STATUS_INVALID_PARAMS);
6708 window = __le16_to_cpu(cp->window);
6710 if (window < 0x0004 || window > 0x4000)
6711 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
6712 MGMT_STATUS_INVALID_PARAMS);
6714 if (window > interval)
6715 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
6716 MGMT_STATUS_INVALID_PARAMS);
6720 hdev->le_scan_type = cp->type;
6721 hdev->le_scan_interval = interval;
6722 hdev->le_scan_window = window;
6724 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
6727 /* If background scan is running, restart it so new parameters are
6730 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6731 hdev->discovery.state == DISCOVERY_STOPPED) {
6732 struct hci_request req;
6734 hci_req_init(&req, hdev);
6736 hci_req_add_le_scan_disable(&req);
6737 hci_req_add_le_passive_scan(&req);
6739 hci_req_run(&req, NULL);
6742 hci_dev_unlock(hdev);
6747 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
6749 struct mgmt_ev_hardware_error ev;
6751 ev.error_code = err_code;
6752 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
6754 #endif /* TIZEN_BT */
6756 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6758 if (key->master != 0x00 && key->master != 0x01)
6761 switch (key->addr.type) {
6762 case BDADDR_LE_PUBLIC:
6765 case BDADDR_LE_RANDOM:
6766 /* Two most significant bits shall be set */
6767 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6775 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6776 void *cp_data, u16 len)
6778 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6779 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6780 sizeof(struct mgmt_ltk_info));
6781 u16 key_count, expected_len;
6784 BT_DBG("request for %s", hdev->name);
6786 if (!lmp_le_capable(hdev))
6787 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6788 MGMT_STATUS_NOT_SUPPORTED);
6790 key_count = __le16_to_cpu(cp->key_count);
6791 if (key_count > max_key_count) {
6792 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6794 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6795 MGMT_STATUS_INVALID_PARAMS);
6798 expected_len = sizeof(*cp) + key_count *
6799 sizeof(struct mgmt_ltk_info);
6800 if (expected_len != len) {
6801 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6803 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6804 MGMT_STATUS_INVALID_PARAMS);
6807 BT_DBG("%s key_count %u", hdev->name, key_count);
6809 for (i = 0; i < key_count; i++) {
6810 struct mgmt_ltk_info *key = &cp->keys[i];
6812 if (!ltk_is_valid(key))
6813 return mgmt_cmd_status(sk, hdev->id,
6814 MGMT_OP_LOAD_LONG_TERM_KEYS,
6815 MGMT_STATUS_INVALID_PARAMS);
6820 hci_smp_ltks_clear(hdev);
6822 for (i = 0; i < key_count; i++) {
6823 struct mgmt_ltk_info *key = &cp->keys[i];
6824 u8 type, authenticated;
6826 switch (key->type) {
6827 case MGMT_LTK_UNAUTHENTICATED:
6828 authenticated = 0x00;
6829 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6831 case MGMT_LTK_AUTHENTICATED:
6832 authenticated = 0x01;
6833 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6835 case MGMT_LTK_P256_UNAUTH:
6836 authenticated = 0x00;
6837 type = SMP_LTK_P256;
6839 case MGMT_LTK_P256_AUTH:
6840 authenticated = 0x01;
6841 type = SMP_LTK_P256;
6843 case MGMT_LTK_P256_DEBUG:
6844 authenticated = 0x00;
6845 type = SMP_LTK_P256_DEBUG;
6851 hci_add_ltk(hdev, &key->addr.bdaddr,
6852 le_addr_type(key->addr.type), type, authenticated,
6853 key->val, key->enc_size, key->ediv, key->rand);
6856 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6859 hci_dev_unlock(hdev);
6864 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6866 struct hci_conn *conn = cmd->user_data;
6867 struct mgmt_rp_get_conn_info rp;
6870 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6872 if (status == MGMT_STATUS_SUCCESS) {
6873 rp.rssi = conn->rssi;
6874 rp.tx_power = conn->tx_power;
6875 rp.max_tx_power = conn->max_tx_power;
6877 rp.rssi = HCI_RSSI_INVALID;
6878 rp.tx_power = HCI_TX_POWER_INVALID;
6879 rp.max_tx_power = HCI_TX_POWER_INVALID;
6882 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6883 status, &rp, sizeof(rp));
6885 hci_conn_drop(conn);
6891 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6894 struct hci_cp_read_rssi *cp;
6895 struct mgmt_pending_cmd *cmd;
6896 struct hci_conn *conn;
6900 BT_DBG("status 0x%02x", hci_status);
6904 /* Commands sent in request are either Read RSSI or Read Transmit Power
6905 * Level so we check which one was last sent to retrieve connection
6906 * handle. Both commands have handle as first parameter so it's safe to
6907 * cast data on the same command struct.
6909 * First command sent is always Read RSSI and we fail only if it fails.
6910 * In other case we simply override error to indicate success as we
6911 * already remembered if TX power value is actually valid.
6913 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6915 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6916 status = MGMT_STATUS_SUCCESS;
6918 status = mgmt_status(hci_status);
6922 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6926 handle = __le16_to_cpu(cp->handle);
6927 conn = hci_conn_hash_lookup_handle(hdev, handle);
6929 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6934 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6938 cmd->cmd_complete(cmd, status);
6939 mgmt_pending_remove(cmd);
6942 hci_dev_unlock(hdev);
6945 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6948 struct mgmt_cp_get_conn_info *cp = data;
6949 struct mgmt_rp_get_conn_info rp;
6950 struct hci_conn *conn;
6951 unsigned long conn_info_age;
6954 BT_DBG("%s", hdev->name);
6956 memset(&rp, 0, sizeof(rp));
6957 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6958 rp.addr.type = cp->addr.type;
6960 if (!bdaddr_type_is_valid(cp->addr.type))
6961 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6962 MGMT_STATUS_INVALID_PARAMS,
6967 if (!hdev_is_powered(hdev)) {
6968 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6969 MGMT_STATUS_NOT_POWERED, &rp,
6974 if (cp->addr.type == BDADDR_BREDR)
6975 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6978 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6980 if (!conn || conn->state != BT_CONNECTED) {
6981 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6982 MGMT_STATUS_NOT_CONNECTED, &rp,
6987 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6988 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6989 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6993 /* To avoid client trying to guess when to poll again for information we
6994 * calculate conn info age as random value between min/max set in hdev.
6996 conn_info_age = hdev->conn_info_min_age +
6997 prandom_u32_max(hdev->conn_info_max_age -
6998 hdev->conn_info_min_age);
7000 /* Query controller to refresh cached values if they are too old or were
7003 if (time_after(jiffies, conn->conn_info_timestamp +
7004 msecs_to_jiffies(conn_info_age)) ||
7005 !conn->conn_info_timestamp) {
7006 struct hci_request req;
7007 struct hci_cp_read_tx_power req_txp_cp;
7008 struct hci_cp_read_rssi req_rssi_cp;
7009 struct mgmt_pending_cmd *cmd;
7011 hci_req_init(&req, hdev);
7012 req_rssi_cp.handle = cpu_to_le16(conn->handle);
7013 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
7016 /* For LE links TX power does not change thus we don't need to
7017 * query for it once value is known.
7019 if (!bdaddr_type_is_le(cp->addr.type) ||
7020 conn->tx_power == HCI_TX_POWER_INVALID) {
7021 req_txp_cp.handle = cpu_to_le16(conn->handle);
7022 req_txp_cp.type = 0x00;
7023 hci_req_add(&req, HCI_OP_READ_TX_POWER,
7024 sizeof(req_txp_cp), &req_txp_cp);
7027 /* Max TX power needs to be read only once per connection */
7028 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
7029 req_txp_cp.handle = cpu_to_le16(conn->handle);
7030 req_txp_cp.type = 0x01;
7031 hci_req_add(&req, HCI_OP_READ_TX_POWER,
7032 sizeof(req_txp_cp), &req_txp_cp);
7035 err = hci_req_run(&req, conn_info_refresh_complete);
7039 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
7046 hci_conn_hold(conn);
7047 cmd->user_data = hci_conn_get(conn);
7048 cmd->cmd_complete = conn_info_cmd_complete;
7050 conn->conn_info_timestamp = jiffies;
7052 /* Cache is valid, just reply with values cached in hci_conn */
7053 rp.rssi = conn->rssi;
7054 rp.tx_power = conn->tx_power;
7055 rp.max_tx_power = conn->max_tx_power;
7057 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7058 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7062 hci_dev_unlock(hdev);
7066 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
7068 struct hci_conn *conn = cmd->user_data;
7069 struct mgmt_rp_get_clock_info rp;
7070 struct hci_dev *hdev;
7073 memset(&rp, 0, sizeof(rp));
7074 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
7079 hdev = hci_dev_get(cmd->index);
7081 rp.local_clock = cpu_to_le32(hdev->clock);
7086 rp.piconet_clock = cpu_to_le32(conn->clock);
7087 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7091 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7095 hci_conn_drop(conn);
7102 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7104 struct hci_cp_read_clock *hci_cp;
7105 struct mgmt_pending_cmd *cmd;
7106 struct hci_conn *conn;
7108 BT_DBG("%s status %u", hdev->name, status);
7112 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
7116 if (hci_cp->which) {
7117 u16 handle = __le16_to_cpu(hci_cp->handle);
7118 conn = hci_conn_hash_lookup_handle(hdev, handle);
7123 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
7127 cmd->cmd_complete(cmd, mgmt_status(status));
7128 mgmt_pending_remove(cmd);
7131 hci_dev_unlock(hdev);
7134 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7137 struct mgmt_cp_get_clock_info *cp = data;
7138 struct mgmt_rp_get_clock_info rp;
7139 struct hci_cp_read_clock hci_cp;
7140 struct mgmt_pending_cmd *cmd;
7141 struct hci_request req;
7142 struct hci_conn *conn;
7145 BT_DBG("%s", hdev->name);
7147 memset(&rp, 0, sizeof(rp));
7148 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7149 rp.addr.type = cp->addr.type;
7151 if (cp->addr.type != BDADDR_BREDR)
7152 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7153 MGMT_STATUS_INVALID_PARAMS,
7158 if (!hdev_is_powered(hdev)) {
7159 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7160 MGMT_STATUS_NOT_POWERED, &rp,
7165 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7166 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7168 if (!conn || conn->state != BT_CONNECTED) {
7169 err = mgmt_cmd_complete(sk, hdev->id,
7170 MGMT_OP_GET_CLOCK_INFO,
7171 MGMT_STATUS_NOT_CONNECTED,
7179 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7185 cmd->cmd_complete = clock_info_cmd_complete;
7187 hci_req_init(&req, hdev);
7189 memset(&hci_cp, 0, sizeof(hci_cp));
7190 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
7193 hci_conn_hold(conn);
7194 cmd->user_data = hci_conn_get(conn);
7196 hci_cp.handle = cpu_to_le16(conn->handle);
7197 hci_cp.which = 0x01; /* Piconet clock */
7198 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
7201 err = hci_req_run(&req, get_clock_info_complete);
7203 mgmt_pending_remove(cmd);
7206 hci_dev_unlock(hdev);
7210 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7212 struct hci_conn *conn;
7214 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7218 if (conn->dst_type != type)
7221 if (conn->state != BT_CONNECTED)
7227 /* This function requires the caller holds hdev->lock */
7228 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7229 u8 addr_type, u8 auto_connect)
7231 struct hci_conn_params *params;
7233 params = hci_conn_params_add(hdev, addr, addr_type);
7237 if (params->auto_connect == auto_connect)
7240 list_del_init(¶ms->action);
7242 switch (auto_connect) {
7243 case HCI_AUTO_CONN_DISABLED:
7244 case HCI_AUTO_CONN_LINK_LOSS:
7245 /* If auto connect is being disabled when we're trying to
7246 * connect to device, keep connecting.
7248 if (params->explicit_connect)
7249 list_add(¶ms->action, &hdev->pend_le_conns);
7251 case HCI_AUTO_CONN_REPORT:
7252 if (params->explicit_connect)
7253 list_add(¶ms->action, &hdev->pend_le_conns);
7255 list_add(¶ms->action, &hdev->pend_le_reports);
7257 case HCI_AUTO_CONN_DIRECT:
7258 case HCI_AUTO_CONN_ALWAYS:
7259 if (!is_connected(hdev, addr, addr_type))
7260 list_add(¶ms->action, &hdev->pend_le_conns);
7264 params->auto_connect = auto_connect;
7266 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
7272 static void device_added(struct sock *sk, struct hci_dev *hdev,
7273 bdaddr_t *bdaddr, u8 type, u8 action)
7275 struct mgmt_ev_device_added ev;
7277 bacpy(&ev.addr.bdaddr, bdaddr);
7278 ev.addr.type = type;
7281 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7284 static int add_device(struct sock *sk, struct hci_dev *hdev,
7285 void *data, u16 len)
7287 struct mgmt_cp_add_device *cp = data;
7288 u8 auto_conn, addr_type;
7291 BT_DBG("%s", hdev->name);
7293 if (!bdaddr_type_is_valid(cp->addr.type) ||
7294 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7295 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7296 MGMT_STATUS_INVALID_PARAMS,
7297 &cp->addr, sizeof(cp->addr));
7299 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7300 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7301 MGMT_STATUS_INVALID_PARAMS,
7302 &cp->addr, sizeof(cp->addr));
7306 if (cp->addr.type == BDADDR_BREDR) {
7307 /* Only incoming connections action is supported for now */
7308 if (cp->action != 0x01) {
7309 err = mgmt_cmd_complete(sk, hdev->id,
7311 MGMT_STATUS_INVALID_PARAMS,
7312 &cp->addr, sizeof(cp->addr));
7316 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
7321 hci_req_update_scan(hdev);
7326 addr_type = le_addr_type(cp->addr.type);
7328 if (cp->action == 0x02)
7329 auto_conn = HCI_AUTO_CONN_ALWAYS;
7330 else if (cp->action == 0x01)
7331 auto_conn = HCI_AUTO_CONN_DIRECT;
7333 auto_conn = HCI_AUTO_CONN_REPORT;
7335 /* Kernel internally uses conn_params with resolvable private
7336 * address, but Add Device allows only identity addresses.
7337 * Make sure it is enforced before calling
7338 * hci_conn_params_lookup.
7340 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7341 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7342 MGMT_STATUS_INVALID_PARAMS,
7343 &cp->addr, sizeof(cp->addr));
7347 /* If the connection parameters don't exist for this device,
7348 * they will be created and configured with defaults.
7350 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7352 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7353 MGMT_STATUS_FAILED, &cp->addr,
7358 hci_update_background_scan(hdev);
7361 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7363 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7364 MGMT_STATUS_SUCCESS, &cp->addr,
7368 hci_dev_unlock(hdev);
7372 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7373 bdaddr_t *bdaddr, u8 type)
7375 struct mgmt_ev_device_removed ev;
7377 bacpy(&ev.addr.bdaddr, bdaddr);
7378 ev.addr.type = type;
7380 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7383 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7384 void *data, u16 len)
7386 struct mgmt_cp_remove_device *cp = data;
7389 BT_DBG("%s", hdev->name);
7393 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7394 struct hci_conn_params *params;
7397 if (!bdaddr_type_is_valid(cp->addr.type)) {
7398 err = mgmt_cmd_complete(sk, hdev->id,
7399 MGMT_OP_REMOVE_DEVICE,
7400 MGMT_STATUS_INVALID_PARAMS,
7401 &cp->addr, sizeof(cp->addr));
7405 if (cp->addr.type == BDADDR_BREDR) {
7406 err = hci_bdaddr_list_del(&hdev->whitelist,
7410 err = mgmt_cmd_complete(sk, hdev->id,
7411 MGMT_OP_REMOVE_DEVICE,
7412 MGMT_STATUS_INVALID_PARAMS,
7418 hci_req_update_scan(hdev);
7420 device_removed(sk, hdev, &cp->addr.bdaddr,
7425 addr_type = le_addr_type(cp->addr.type);
7427 /* Kernel internally uses conn_params with resolvable private
7428 * address, but Remove Device allows only identity addresses.
7429 * Make sure it is enforced before calling
7430 * hci_conn_params_lookup.
7432 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7433 err = mgmt_cmd_complete(sk, hdev->id,
7434 MGMT_OP_REMOVE_DEVICE,
7435 MGMT_STATUS_INVALID_PARAMS,
7436 &cp->addr, sizeof(cp->addr));
7440 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7443 err = mgmt_cmd_complete(sk, hdev->id,
7444 MGMT_OP_REMOVE_DEVICE,
7445 MGMT_STATUS_INVALID_PARAMS,
7446 &cp->addr, sizeof(cp->addr));
7450 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7451 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7452 err = mgmt_cmd_complete(sk, hdev->id,
7453 MGMT_OP_REMOVE_DEVICE,
7454 MGMT_STATUS_INVALID_PARAMS,
7455 &cp->addr, sizeof(cp->addr));
7459 list_del(¶ms->action);
7460 list_del(¶ms->list);
7462 hci_update_background_scan(hdev);
7464 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7466 struct hci_conn_params *p, *tmp;
7467 struct bdaddr_list *b, *btmp;
7469 if (cp->addr.type) {
7470 err = mgmt_cmd_complete(sk, hdev->id,
7471 MGMT_OP_REMOVE_DEVICE,
7472 MGMT_STATUS_INVALID_PARAMS,
7473 &cp->addr, sizeof(cp->addr));
7477 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
7478 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7483 hci_req_update_scan(hdev);
7485 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7486 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7488 device_removed(sk, hdev, &p->addr, p->addr_type);
7489 if (p->explicit_connect) {
7490 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7493 list_del(&p->action);
7498 BT_DBG("All LE connection parameters were removed");
7500 hci_update_background_scan(hdev);
7504 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7505 MGMT_STATUS_SUCCESS, &cp->addr,
7508 hci_dev_unlock(hdev);
7512 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7515 struct mgmt_cp_load_conn_param *cp = data;
7516 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7517 sizeof(struct mgmt_conn_param));
7518 u16 param_count, expected_len;
7521 if (!lmp_le_capable(hdev))
7522 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7523 MGMT_STATUS_NOT_SUPPORTED);
7525 param_count = __le16_to_cpu(cp->param_count);
7526 if (param_count > max_param_count) {
7527 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7529 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7530 MGMT_STATUS_INVALID_PARAMS);
7533 expected_len = sizeof(*cp) + param_count *
7534 sizeof(struct mgmt_conn_param);
7535 if (expected_len != len) {
7536 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7538 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7539 MGMT_STATUS_INVALID_PARAMS);
7542 BT_DBG("%s param_count %u", hdev->name, param_count);
7546 hci_conn_params_clear_disabled(hdev);
7548 for (i = 0; i < param_count; i++) {
7549 struct mgmt_conn_param *param = &cp->params[i];
7550 struct hci_conn_params *hci_param;
7551 u16 min, max, latency, timeout;
7554 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
7557 if (param->addr.type == BDADDR_LE_PUBLIC) {
7558 addr_type = ADDR_LE_DEV_PUBLIC;
7559 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7560 addr_type = ADDR_LE_DEV_RANDOM;
7562 bt_dev_err(hdev, "ignoring invalid connection parameters");
7566 min = le16_to_cpu(param->min_interval);
7567 max = le16_to_cpu(param->max_interval);
7568 latency = le16_to_cpu(param->latency);
7569 timeout = le16_to_cpu(param->timeout);
7571 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7572 min, max, latency, timeout);
7574 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7575 bt_dev_err(hdev, "ignoring invalid connection parameters");
7579 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7582 bt_dev_err(hdev, "failed to add connection parameters");
7586 hci_param->conn_min_interval = min;
7587 hci_param->conn_max_interval = max;
7588 hci_param->conn_latency = latency;
7589 hci_param->supervision_timeout = timeout;
7592 hci_dev_unlock(hdev);
7594 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7598 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7599 void *data, u16 len)
7601 struct mgmt_cp_set_external_config *cp = data;
7605 BT_DBG("%s", hdev->name);
7607 if (hdev_is_powered(hdev))
7608 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7609 MGMT_STATUS_REJECTED);
7611 if (cp->config != 0x00 && cp->config != 0x01)
7612 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7613 MGMT_STATUS_INVALID_PARAMS);
7615 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7616 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7617 MGMT_STATUS_NOT_SUPPORTED);
7622 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7624 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7626 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7633 err = new_options(hdev, sk);
7635 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7636 mgmt_index_removed(hdev);
7638 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7639 hci_dev_set_flag(hdev, HCI_CONFIG);
7640 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7642 queue_work(hdev->req_workqueue, &hdev->power_on);
7644 set_bit(HCI_RAW, &hdev->flags);
7645 mgmt_index_added(hdev);
7650 hci_dev_unlock(hdev);
7654 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7655 void *data, u16 len)
7657 struct mgmt_cp_set_public_address *cp = data;
7661 BT_DBG("%s", hdev->name);
7663 if (hdev_is_powered(hdev))
7664 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7665 MGMT_STATUS_REJECTED);
7667 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7668 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7669 MGMT_STATUS_INVALID_PARAMS);
7671 if (!hdev->set_bdaddr)
7672 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7673 MGMT_STATUS_NOT_SUPPORTED);
7677 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7678 bacpy(&hdev->public_addr, &cp->bdaddr);
7680 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7687 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7688 err = new_options(hdev, sk);
7690 if (is_configured(hdev)) {
7691 mgmt_index_removed(hdev);
7693 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7695 hci_dev_set_flag(hdev, HCI_CONFIG);
7696 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7698 queue_work(hdev->req_workqueue, &hdev->power_on);
7702 hci_dev_unlock(hdev);
7707 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
7711 struct mgmt_ev_device_name_update *ev = (void *)buf;
7717 bacpy(&ev->addr.bdaddr, bdaddr);
7718 ev->addr.type = BDADDR_BREDR;
7720 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7723 ev->eir_len = cpu_to_le16(eir_len);
7725 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
7726 sizeof(*ev) + eir_len, NULL);
7729 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7730 u8 link_type, u8 addr_type, u8 status)
7732 struct mgmt_ev_conn_update_failed ev;
7734 bacpy(&ev.addr.bdaddr, bdaddr);
7735 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7738 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
7739 &ev, sizeof(ev), NULL);
7742 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
7743 u8 link_type, u8 addr_type, u16 conn_interval,
7744 u16 conn_latency, u16 supervision_timeout)
7746 struct mgmt_ev_conn_updated ev;
7748 bacpy(&ev.addr.bdaddr, bdaddr);
7749 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7750 ev.conn_interval = cpu_to_le16(conn_interval);
7751 ev.conn_latency = cpu_to_le16(conn_latency);
7752 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
7754 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
7755 &ev, sizeof(ev), NULL);
7759 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7760 u16 opcode, struct sk_buff *skb)
7762 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7763 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7764 u8 *h192, *r192, *h256, *r256;
7765 struct mgmt_pending_cmd *cmd;
7769 BT_DBG("%s status %u", hdev->name, status);
7771 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7775 mgmt_cp = cmd->param;
7778 status = mgmt_status(status);
7785 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7786 struct hci_rp_read_local_oob_data *rp;
7788 if (skb->len != sizeof(*rp)) {
7789 status = MGMT_STATUS_FAILED;
7792 status = MGMT_STATUS_SUCCESS;
7793 rp = (void *)skb->data;
7795 eir_len = 5 + 18 + 18;
7802 struct hci_rp_read_local_oob_ext_data *rp;
7804 if (skb->len != sizeof(*rp)) {
7805 status = MGMT_STATUS_FAILED;
7808 status = MGMT_STATUS_SUCCESS;
7809 rp = (void *)skb->data;
7811 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7812 eir_len = 5 + 18 + 18;
7816 eir_len = 5 + 18 + 18 + 18 + 18;
7826 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7833 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7834 hdev->dev_class, 3);
7837 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7838 EIR_SSP_HASH_C192, h192, 16);
7839 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7840 EIR_SSP_RAND_R192, r192, 16);
7844 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7845 EIR_SSP_HASH_C256, h256, 16);
7846 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7847 EIR_SSP_RAND_R256, r256, 16);
7851 mgmt_rp->type = mgmt_cp->type;
7852 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7854 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7855 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7856 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7857 if (err < 0 || status)
7860 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7862 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7863 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7864 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7867 mgmt_pending_remove(cmd);
7870 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7871 struct mgmt_cp_read_local_oob_ext_data *cp)
7873 struct mgmt_pending_cmd *cmd;
7874 struct hci_request req;
7877 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7882 hci_req_init(&req, hdev);
7884 if (bredr_sc_enabled(hdev))
7885 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7887 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7889 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7891 mgmt_pending_remove(cmd);
7898 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7899 void *data, u16 data_len)
7901 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7902 struct mgmt_rp_read_local_oob_ext_data *rp;
7905 u8 status, flags, role, addr[7], hash[16], rand[16];
7908 BT_DBG("%s", hdev->name);
7910 if (hdev_is_powered(hdev)) {
7912 case BIT(BDADDR_BREDR):
7913 status = mgmt_bredr_support(hdev);
7919 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7920 status = mgmt_le_support(hdev);
7924 eir_len = 9 + 3 + 18 + 18 + 3;
7927 status = MGMT_STATUS_INVALID_PARAMS;
7932 status = MGMT_STATUS_NOT_POWERED;
7936 rp_len = sizeof(*rp) + eir_len;
7937 rp = kmalloc(rp_len, GFP_ATOMIC);
7948 case BIT(BDADDR_BREDR):
7949 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7950 err = read_local_ssp_oob_req(hdev, sk, cp);
7951 hci_dev_unlock(hdev);
7955 status = MGMT_STATUS_FAILED;
7958 eir_len = eir_append_data(rp->eir, eir_len,
7960 hdev->dev_class, 3);
7963 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7964 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7965 smp_generate_oob(hdev, hash, rand) < 0) {
7966 hci_dev_unlock(hdev);
7967 status = MGMT_STATUS_FAILED;
7971 /* This should return the active RPA, but since the RPA
7972 * is only programmed on demand, it is really hard to fill
7973 * this in at the moment. For now disallow retrieving
7974 * local out-of-band data when privacy is in use.
7976 * Returning the identity address will not help here since
7977 * pairing happens before the identity resolving key is
7978 * known and thus the connection establishment happens
7979 * based on the RPA and not the identity address.
7981 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7982 hci_dev_unlock(hdev);
7983 status = MGMT_STATUS_REJECTED;
7987 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7988 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7989 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7990 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7991 memcpy(addr, &hdev->static_addr, 6);
7994 memcpy(addr, &hdev->bdaddr, 6);
7998 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7999 addr, sizeof(addr));
8001 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8006 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8007 &role, sizeof(role));
8009 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8010 eir_len = eir_append_data(rp->eir, eir_len,
8012 hash, sizeof(hash));
8014 eir_len = eir_append_data(rp->eir, eir_len,
8016 rand, sizeof(rand));
8019 flags = mgmt_get_adv_discov_flags(hdev);
8021 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8022 flags |= LE_AD_NO_BREDR;
8024 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8025 &flags, sizeof(flags));
8029 hci_dev_unlock(hdev);
8031 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8033 status = MGMT_STATUS_SUCCESS;
8036 rp->type = cp->type;
8037 rp->eir_len = cpu_to_le16(eir_len);
8039 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8040 status, rp, sizeof(*rp) + eir_len);
8041 if (err < 0 || status)
8044 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8045 rp, sizeof(*rp) + eir_len,
8046 HCI_MGMT_OOB_DATA_EVENTS, sk);
8054 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8058 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8059 flags |= MGMT_ADV_FLAG_DISCOV;
8060 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8061 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8062 flags |= MGMT_ADV_FLAG_APPEARANCE;
8063 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8065 /* In extended adv TX_POWER returned from Set Adv Param
8066 * will be always valid.
8068 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
8069 ext_adv_capable(hdev))
8070 flags |= MGMT_ADV_FLAG_TX_POWER;
8072 if (ext_adv_capable(hdev)) {
8073 flags |= MGMT_ADV_FLAG_SEC_1M;
8075 if (hdev->le_features[1] & HCI_LE_PHY_2M)
8076 flags |= MGMT_ADV_FLAG_SEC_2M;
8078 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8079 flags |= MGMT_ADV_FLAG_SEC_CODED;
8085 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8086 void *data, u16 data_len)
8088 struct mgmt_rp_read_adv_features *rp;
8091 struct adv_info *adv_instance;
8092 u32 supported_flags;
8095 BT_DBG("%s", hdev->name);
8097 if (!lmp_le_capable(hdev))
8098 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8099 MGMT_STATUS_REJECTED);
8103 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8104 rp = kmalloc(rp_len, GFP_ATOMIC);
8106 hci_dev_unlock(hdev);
8110 supported_flags = get_supported_adv_flags(hdev);
8112 rp->supported_flags = cpu_to_le32(supported_flags);
8113 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
8114 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
8115 rp->max_instances = HCI_MAX_ADV_INSTANCES;
8116 rp->num_instances = hdev->adv_instance_cnt;
8118 instance = rp->instance;
8119 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8120 *instance = adv_instance->instance;
8124 hci_dev_unlock(hdev);
8126 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8127 MGMT_STATUS_SUCCESS, rp, rp_len);
8134 static u8 calculate_name_len(struct hci_dev *hdev)
8136 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8138 return append_local_name(hdev, buf, 0);
8141 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8144 u8 max_len = HCI_MAX_AD_LENGTH;
8147 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8148 MGMT_ADV_FLAG_LIMITED_DISCOV |
8149 MGMT_ADV_FLAG_MANAGED_FLAGS))
8152 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8155 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8156 max_len -= calculate_name_len(hdev);
8158 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8165 static bool flags_managed(u32 adv_flags)
8167 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8168 MGMT_ADV_FLAG_LIMITED_DISCOV |
8169 MGMT_ADV_FLAG_MANAGED_FLAGS);
8172 static bool tx_power_managed(u32 adv_flags)
8174 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8177 static bool name_managed(u32 adv_flags)
8179 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8182 static bool appearance_managed(u32 adv_flags)
8184 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8187 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8188 u8 len, bool is_adv_data)
8193 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8198 /* Make sure that the data is correctly formatted. */
8199 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
8202 if (data[i + 1] == EIR_FLAGS &&
8203 (!is_adv_data || flags_managed(adv_flags)))
8206 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8209 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8212 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8215 if (data[i + 1] == EIR_APPEARANCE &&
8216 appearance_managed(adv_flags))
8219 /* If the current field length would exceed the total data
8220 * length, then it's invalid.
8222 if (i + cur_len >= len)
8229 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
8232 struct mgmt_pending_cmd *cmd;
8233 struct mgmt_cp_add_advertising *cp;
8234 struct mgmt_rp_add_advertising rp;
8235 struct adv_info *adv_instance, *n;
8238 BT_DBG("status %d", status);
8242 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
8244 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
8245 if (!adv_instance->pending)
8249 adv_instance->pending = false;
8253 instance = adv_instance->instance;
8255 if (hdev->cur_adv_instance == instance)
8256 cancel_adv_timeout(hdev);
8258 hci_remove_adv_instance(hdev, instance);
8259 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
8266 rp.instance = cp->instance;
8269 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8270 mgmt_status(status));
8272 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8273 mgmt_status(status), &rp, sizeof(rp));
8275 mgmt_pending_remove(cmd);
8278 hci_dev_unlock(hdev);
8281 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8282 void *data, u16 data_len)
8284 struct mgmt_cp_add_advertising *cp = data;
8285 struct mgmt_rp_add_advertising rp;
8287 u32 supported_flags, phy_flags;
8289 u16 timeout, duration;
8290 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
8291 u8 schedule_instance = 0;
8292 struct adv_info *next_instance;
8294 struct mgmt_pending_cmd *cmd;
8295 struct hci_request req;
8297 BT_DBG("%s", hdev->name);
8299 status = mgmt_le_support(hdev);
8301 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8304 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
8305 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8306 MGMT_STATUS_INVALID_PARAMS);
8308 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8309 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8310 MGMT_STATUS_INVALID_PARAMS);
8312 flags = __le32_to_cpu(cp->flags);
8313 timeout = __le16_to_cpu(cp->timeout);
8314 duration = __le16_to_cpu(cp->duration);
8316 /* The current implementation only supports a subset of the specified
8317 * flags. Also need to check mutual exclusiveness of sec flags.
8319 supported_flags = get_supported_adv_flags(hdev);
8320 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
8321 if (flags & ~supported_flags ||
8322 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8323 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8324 MGMT_STATUS_INVALID_PARAMS);
8328 if (timeout && !hdev_is_powered(hdev)) {
8329 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8330 MGMT_STATUS_REJECTED);
8334 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8335 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8336 pending_find(MGMT_OP_SET_LE, hdev)) {
8337 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8342 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8343 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8344 cp->scan_rsp_len, false)) {
8345 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8346 MGMT_STATUS_INVALID_PARAMS);
8350 err = hci_add_adv_instance(hdev, cp->instance, flags,
8351 cp->adv_data_len, cp->data,
8353 cp->data + cp->adv_data_len,
8356 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8357 MGMT_STATUS_FAILED);
8361 /* Only trigger an advertising added event if a new instance was
8364 if (hdev->adv_instance_cnt > prev_instance_cnt)
8365 mgmt_advertising_added(sk, hdev, cp->instance);
8367 if (hdev->cur_adv_instance == cp->instance) {
8368 /* If the currently advertised instance is being changed then
8369 * cancel the current advertising and schedule the next
8370 * instance. If there is only one instance then the overridden
8371 * advertising data will be visible right away.
8373 cancel_adv_timeout(hdev);
8375 next_instance = hci_get_next_instance(hdev, cp->instance);
8377 schedule_instance = next_instance->instance;
8378 } else if (!hdev->adv_instance_timeout) {
8379 /* Immediately advertise the new instance if no other
8380 * instance is currently being advertised.
8382 schedule_instance = cp->instance;
8385 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8386 * there is no instance to be advertised then we have no HCI
8387 * communication to make. Simply return.
8389 if (!hdev_is_powered(hdev) ||
8390 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8391 !schedule_instance) {
8392 rp.instance = cp->instance;
8393 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8394 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8398 /* We're good to go, update advertising data, parameters, and start
8401 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8408 hci_req_init(&req, hdev);
8410 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
8413 err = hci_req_run(&req, add_advertising_complete);
8416 mgmt_pending_remove(cmd);
8419 hci_dev_unlock(hdev);
8424 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
8427 struct mgmt_pending_cmd *cmd;
8428 struct mgmt_cp_remove_advertising *cp;
8429 struct mgmt_rp_remove_advertising rp;
8431 BT_DBG("status %d", status);
8435 /* A failure status here only means that we failed to disable
8436 * advertising. Otherwise, the advertising instance has been removed,
8437 * so report success.
8439 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
8444 rp.instance = cp->instance;
8446 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
8448 mgmt_pending_remove(cmd);
8451 hci_dev_unlock(hdev);
8454 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8455 void *data, u16 data_len)
8457 struct mgmt_cp_remove_advertising *cp = data;
8458 struct mgmt_rp_remove_advertising rp;
8459 struct mgmt_pending_cmd *cmd;
8460 struct hci_request req;
8463 BT_DBG("%s", hdev->name);
8467 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8468 err = mgmt_cmd_status(sk, hdev->id,
8469 MGMT_OP_REMOVE_ADVERTISING,
8470 MGMT_STATUS_INVALID_PARAMS);
8474 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8475 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8476 pending_find(MGMT_OP_SET_LE, hdev)) {
8477 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8482 if (list_empty(&hdev->adv_instances)) {
8483 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8484 MGMT_STATUS_INVALID_PARAMS);
8488 hci_req_init(&req, hdev);
8490 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8492 if (list_empty(&hdev->adv_instances))
8493 __hci_req_disable_advertising(&req);
8495 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
8496 * flag is set or the device isn't powered then we have no HCI
8497 * communication to make. Simply return.
8499 if (skb_queue_empty(&req.cmd_q) ||
8500 !hdev_is_powered(hdev) ||
8501 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8502 hci_req_purge(&req);
8503 rp.instance = cp->instance;
8504 err = mgmt_cmd_complete(sk, hdev->id,
8505 MGMT_OP_REMOVE_ADVERTISING,
8506 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8510 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8517 err = hci_req_run(&req, remove_advertising_complete);
8519 mgmt_pending_remove(cmd);
8522 hci_dev_unlock(hdev);
8527 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8528 void *data, u16 data_len)
8530 struct mgmt_cp_get_adv_size_info *cp = data;
8531 struct mgmt_rp_get_adv_size_info rp;
8532 u32 flags, supported_flags;
8535 BT_DBG("%s", hdev->name);
8537 if (!lmp_le_capable(hdev))
8538 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8539 MGMT_STATUS_REJECTED);
8541 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
8542 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8543 MGMT_STATUS_INVALID_PARAMS);
8545 flags = __le32_to_cpu(cp->flags);
8547 /* The current implementation only supports a subset of the specified
8550 supported_flags = get_supported_adv_flags(hdev);
8551 if (flags & ~supported_flags)
8552 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8553 MGMT_STATUS_INVALID_PARAMS);
8555 rp.instance = cp->instance;
8556 rp.flags = cp->flags;
8557 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8558 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8560 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8561 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8566 static const struct hci_mgmt_handler mgmt_handlers[] = {
8567 { NULL }, /* 0x0000 (no command) */
8568 { read_version, MGMT_READ_VERSION_SIZE,
8570 HCI_MGMT_UNTRUSTED },
8571 { read_commands, MGMT_READ_COMMANDS_SIZE,
8573 HCI_MGMT_UNTRUSTED },
8574 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8576 HCI_MGMT_UNTRUSTED },
8577 { read_controller_info, MGMT_READ_INFO_SIZE,
8578 HCI_MGMT_UNTRUSTED },
8579 { set_powered, MGMT_SETTING_SIZE },
8580 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8581 { set_connectable, MGMT_SETTING_SIZE },
8582 { set_fast_connectable, MGMT_SETTING_SIZE },
8583 { set_bondable, MGMT_SETTING_SIZE },
8584 { set_link_security, MGMT_SETTING_SIZE },
8585 { set_ssp, MGMT_SETTING_SIZE },
8586 { set_hs, MGMT_SETTING_SIZE },
8587 { set_le, MGMT_SETTING_SIZE },
8588 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8589 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8590 { add_uuid, MGMT_ADD_UUID_SIZE },
8591 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8592 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8594 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8596 { disconnect, MGMT_DISCONNECT_SIZE },
8597 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8598 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8599 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8600 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8601 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8602 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8603 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8604 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8605 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8606 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8607 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8608 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8609 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8611 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8612 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8613 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8614 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8615 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8616 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8617 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8618 { set_advertising, MGMT_SETTING_SIZE },
8619 { set_bredr, MGMT_SETTING_SIZE },
8620 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8621 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8622 { set_secure_conn, MGMT_SETTING_SIZE },
8623 { set_debug_keys, MGMT_SETTING_SIZE },
8624 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8625 { load_irks, MGMT_LOAD_IRKS_SIZE,
8627 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8628 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8629 { add_device, MGMT_ADD_DEVICE_SIZE },
8630 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8631 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8633 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8635 HCI_MGMT_UNTRUSTED },
8636 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8637 HCI_MGMT_UNCONFIGURED |
8638 HCI_MGMT_UNTRUSTED },
8639 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8640 HCI_MGMT_UNCONFIGURED },
8641 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8642 HCI_MGMT_UNCONFIGURED },
8643 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8645 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8646 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8648 HCI_MGMT_UNTRUSTED },
8649 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8650 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8652 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8653 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8654 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8655 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8656 HCI_MGMT_UNTRUSTED },
8657 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8658 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8659 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8663 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
8664 { NULL }, /* 0x0000 (no command) */
8665 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
8666 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
8668 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
8670 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
8671 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
8672 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
8673 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
8674 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
8675 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
8676 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
8677 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
8678 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
8679 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
8680 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
8681 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
8685 void mgmt_index_added(struct hci_dev *hdev)
8687 struct mgmt_ev_ext_index ev;
8689 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8692 switch (hdev->dev_type) {
8694 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8695 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8696 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8699 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8700 HCI_MGMT_INDEX_EVENTS);
8713 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8714 HCI_MGMT_EXT_INDEX_EVENTS);
8717 void mgmt_index_removed(struct hci_dev *hdev)
8719 struct mgmt_ev_ext_index ev;
8720 u8 status = MGMT_STATUS_INVALID_INDEX;
8722 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8725 switch (hdev->dev_type) {
8727 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8729 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8730 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8731 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8734 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8735 HCI_MGMT_INDEX_EVENTS);
8748 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8749 HCI_MGMT_EXT_INDEX_EVENTS);
8752 /* This function requires the caller holds hdev->lock */
8753 static void restart_le_actions(struct hci_dev *hdev)
8755 struct hci_conn_params *p;
8757 list_for_each_entry(p, &hdev->le_conn_params, list) {
8758 /* Needed for AUTO_OFF case where might not "really"
8759 * have been powered off.
8761 list_del_init(&p->action);
8763 switch (p->auto_connect) {
8764 case HCI_AUTO_CONN_DIRECT:
8765 case HCI_AUTO_CONN_ALWAYS:
8766 list_add(&p->action, &hdev->pend_le_conns);
8768 case HCI_AUTO_CONN_REPORT:
8769 list_add(&p->action, &hdev->pend_le_reports);
8777 void mgmt_power_on(struct hci_dev *hdev, int err)
8779 struct cmd_lookup match = { NULL, hdev };
8781 BT_DBG("err %d", err);
8786 restart_le_actions(hdev);
8787 hci_update_background_scan(hdev);
8790 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8792 new_settings(hdev, match.sk);
8797 hci_dev_unlock(hdev);
8800 void __mgmt_power_off(struct hci_dev *hdev)
8802 struct cmd_lookup match = { NULL, hdev };
8803 u8 status, zero_cod[] = { 0, 0, 0 };
8805 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8807 /* If the power off is because of hdev unregistration let
8808 * use the appropriate INVALID_INDEX status. Otherwise use
8809 * NOT_POWERED. We cover both scenarios here since later in
8810 * mgmt_index_removed() any hci_conn callbacks will have already
8811 * been triggered, potentially causing misleading DISCONNECTED
8814 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8815 status = MGMT_STATUS_INVALID_INDEX;
8817 status = MGMT_STATUS_NOT_POWERED;
8819 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8821 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8822 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8823 zero_cod, sizeof(zero_cod),
8824 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8825 ext_info_changed(hdev, NULL);
8828 new_settings(hdev, match.sk);
8834 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8836 struct mgmt_pending_cmd *cmd;
8839 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8843 if (err == -ERFKILL)
8844 status = MGMT_STATUS_RFKILLED;
8846 status = MGMT_STATUS_FAILED;
8848 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8850 mgmt_pending_remove(cmd);
8853 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8856 struct mgmt_ev_new_link_key ev;
8858 memset(&ev, 0, sizeof(ev));
8860 ev.store_hint = persistent;
8861 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8862 ev.key.addr.type = BDADDR_BREDR;
8863 ev.key.type = key->type;
8864 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8865 ev.key.pin_len = key->pin_len;
8867 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8870 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8872 switch (ltk->type) {
8875 if (ltk->authenticated)
8876 return MGMT_LTK_AUTHENTICATED;
8877 return MGMT_LTK_UNAUTHENTICATED;
8879 if (ltk->authenticated)
8880 return MGMT_LTK_P256_AUTH;
8881 return MGMT_LTK_P256_UNAUTH;
8882 case SMP_LTK_P256_DEBUG:
8883 return MGMT_LTK_P256_DEBUG;
8886 return MGMT_LTK_UNAUTHENTICATED;
8889 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8891 struct mgmt_ev_new_long_term_key ev;
8893 memset(&ev, 0, sizeof(ev));
8895 /* Devices using resolvable or non-resolvable random addresses
8896 * without providing an identity resolving key don't require
8897 * to store long term keys. Their addresses will change the
8900 * Only when a remote device provides an identity address
8901 * make sure the long term key is stored. If the remote
8902 * identity is known, the long term keys are internally
8903 * mapped to the identity address. So allow static random
8904 * and public addresses here.
8906 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8907 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8908 ev.store_hint = 0x00;
8910 ev.store_hint = persistent;
8912 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8913 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8914 ev.key.type = mgmt_ltk_type(key);
8915 ev.key.enc_size = key->enc_size;
8916 ev.key.ediv = key->ediv;
8917 ev.key.rand = key->rand;
8919 if (key->type == SMP_LTK)
8922 /* Make sure we copy only the significant bytes based on the
8923 * encryption key size, and set the rest of the value to zeroes.
8925 memcpy(ev.key.val, key->val, key->enc_size);
8926 memset(ev.key.val + key->enc_size, 0,
8927 sizeof(ev.key.val) - key->enc_size);
8929 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8932 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8934 struct mgmt_ev_new_irk ev;
8936 memset(&ev, 0, sizeof(ev));
8938 ev.store_hint = persistent;
8940 bacpy(&ev.rpa, &irk->rpa);
8941 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8942 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8943 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8945 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8948 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8951 struct mgmt_ev_new_csrk ev;
8953 memset(&ev, 0, sizeof(ev));
8955 /* Devices using resolvable or non-resolvable random addresses
8956 * without providing an identity resolving key don't require
8957 * to store signature resolving keys. Their addresses will change
8958 * the next time around.
8960 * Only when a remote device provides an identity address
8961 * make sure the signature resolving key is stored. So allow
8962 * static random and public addresses here.
8964 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8965 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8966 ev.store_hint = 0x00;
8968 ev.store_hint = persistent;
8970 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8971 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8972 ev.key.type = csrk->type;
8973 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8975 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8978 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8979 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8980 u16 max_interval, u16 latency, u16 timeout)
8982 struct mgmt_ev_new_conn_param ev;
8984 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8987 memset(&ev, 0, sizeof(ev));
8988 bacpy(&ev.addr.bdaddr, bdaddr);
8989 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8990 ev.store_hint = store_hint;
8991 ev.min_interval = cpu_to_le16(min_interval);
8992 ev.max_interval = cpu_to_le16(max_interval);
8993 ev.latency = cpu_to_le16(latency);
8994 ev.timeout = cpu_to_le16(timeout);
8996 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8999 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9000 u32 flags, u8 *name, u8 name_len)
9003 struct mgmt_ev_device_connected *ev = (void *) buf;
9006 bacpy(&ev->addr.bdaddr, &conn->dst);
9007 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9009 ev->flags = __cpu_to_le32(flags);
9011 /* We must ensure that the EIR Data fields are ordered and
9012 * unique. Keep it simple for now and avoid the problem by not
9013 * adding any BR/EDR data to the LE adv.
9015 if (conn->le_adv_data_len > 0) {
9016 memcpy(&ev->eir[eir_len],
9017 conn->le_adv_data, conn->le_adv_data_len);
9018 eir_len = conn->le_adv_data_len;
9021 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9024 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
9025 eir_len = eir_append_data(ev->eir, eir_len,
9027 conn->dev_class, 3);
9030 ev->eir_len = cpu_to_le16(eir_len);
9032 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
9033 sizeof(*ev) + eir_len, NULL);
9036 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9038 struct sock **sk = data;
9040 cmd->cmd_complete(cmd, 0);
9045 mgmt_pending_remove(cmd);
9048 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9050 struct hci_dev *hdev = data;
9051 struct mgmt_cp_unpair_device *cp = cmd->param;
9053 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9055 cmd->cmd_complete(cmd, 0);
9056 mgmt_pending_remove(cmd);
9059 bool mgmt_powering_down(struct hci_dev *hdev)
9061 struct mgmt_pending_cmd *cmd;
9062 struct mgmt_mode *cp;
9064 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9075 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9076 u8 link_type, u8 addr_type, u8 reason,
9077 bool mgmt_connected)
9079 struct mgmt_ev_device_disconnected ev;
9080 struct sock *sk = NULL;
9082 /* The connection is still in hci_conn_hash so test for 1
9083 * instead of 0 to know if this is the last one.
9085 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9086 cancel_delayed_work(&hdev->power_off);
9087 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9090 if (!mgmt_connected)
9093 if (link_type != ACL_LINK && link_type != LE_LINK)
9096 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9098 bacpy(&ev.addr.bdaddr, bdaddr);
9099 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9102 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9107 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9111 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9112 u8 link_type, u8 addr_type, u8 status)
9114 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9115 struct mgmt_cp_disconnect *cp;
9116 struct mgmt_pending_cmd *cmd;
9118 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9121 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9127 if (bacmp(bdaddr, &cp->addr.bdaddr))
9130 if (cp->addr.type != bdaddr_type)
9133 cmd->cmd_complete(cmd, mgmt_status(status));
9134 mgmt_pending_remove(cmd);
9137 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9138 u8 addr_type, u8 status)
9140 struct mgmt_ev_connect_failed ev;
9142 /* The connection is still in hci_conn_hash so test for 1
9143 * instead of 0 to know if this is the last one.
9145 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9146 cancel_delayed_work(&hdev->power_off);
9147 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9150 bacpy(&ev.addr.bdaddr, bdaddr);
9151 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9152 ev.status = mgmt_status(status);
9154 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9157 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9159 struct mgmt_ev_pin_code_request ev;
9161 bacpy(&ev.addr.bdaddr, bdaddr);
9162 ev.addr.type = BDADDR_BREDR;
9165 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9168 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9171 struct mgmt_pending_cmd *cmd;
9173 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9177 cmd->cmd_complete(cmd, mgmt_status(status));
9178 mgmt_pending_remove(cmd);
9181 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9184 struct mgmt_pending_cmd *cmd;
9186 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9190 cmd->cmd_complete(cmd, mgmt_status(status));
9191 mgmt_pending_remove(cmd);
9194 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9195 u8 link_type, u8 addr_type, u32 value,
9198 struct mgmt_ev_user_confirm_request ev;
9200 BT_DBG("%s", hdev->name);
9202 bacpy(&ev.addr.bdaddr, bdaddr);
9203 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9204 ev.confirm_hint = confirm_hint;
9205 ev.value = cpu_to_le32(value);
9207 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9211 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9212 u8 link_type, u8 addr_type)
9214 struct mgmt_ev_user_passkey_request ev;
9216 BT_DBG("%s", hdev->name);
9218 bacpy(&ev.addr.bdaddr, bdaddr);
9219 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9221 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9225 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9226 u8 link_type, u8 addr_type, u8 status,
9229 struct mgmt_pending_cmd *cmd;
9231 cmd = pending_find(opcode, hdev);
9235 cmd->cmd_complete(cmd, mgmt_status(status));
9236 mgmt_pending_remove(cmd);
9241 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9242 u8 link_type, u8 addr_type, u8 status)
9244 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9245 status, MGMT_OP_USER_CONFIRM_REPLY);
9248 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9249 u8 link_type, u8 addr_type, u8 status)
9251 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9253 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9256 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9257 u8 link_type, u8 addr_type, u8 status)
9259 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9260 status, MGMT_OP_USER_PASSKEY_REPLY);
9263 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9264 u8 link_type, u8 addr_type, u8 status)
9266 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9268 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9271 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9272 u8 link_type, u8 addr_type, u32 passkey,
9275 struct mgmt_ev_passkey_notify ev;
9277 BT_DBG("%s", hdev->name);
9279 bacpy(&ev.addr.bdaddr, bdaddr);
9280 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9281 ev.passkey = __cpu_to_le32(passkey);
9282 ev.entered = entered;
9284 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9287 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9289 struct mgmt_ev_auth_failed ev;
9290 struct mgmt_pending_cmd *cmd;
9291 u8 status = mgmt_status(hci_status);
9293 bacpy(&ev.addr.bdaddr, &conn->dst);
9294 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9297 cmd = find_pairing(conn);
9299 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9300 cmd ? cmd->sk : NULL);
9303 cmd->cmd_complete(cmd, status);
9304 mgmt_pending_remove(cmd);
9308 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9310 struct cmd_lookup match = { NULL, hdev };
9314 u8 mgmt_err = mgmt_status(status);
9315 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9316 cmd_status_rsp, &mgmt_err);
9320 if (test_bit(HCI_AUTH, &hdev->flags))
9321 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9323 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9325 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9329 new_settings(hdev, match.sk);
9335 static void clear_eir(struct hci_request *req)
9337 struct hci_dev *hdev = req->hdev;
9338 struct hci_cp_write_eir cp;
9340 if (!lmp_ext_inq_capable(hdev))
9343 memset(hdev->eir, 0, sizeof(hdev->eir));
9345 memset(&cp, 0, sizeof(cp));
9347 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9350 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9352 struct cmd_lookup match = { NULL, hdev };
9353 struct hci_request req;
9354 bool changed = false;
9357 u8 mgmt_err = mgmt_status(status);
9359 if (enable && hci_dev_test_and_clear_flag(hdev,
9361 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9362 new_settings(hdev, NULL);
9365 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9371 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9373 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9375 changed = hci_dev_test_and_clear_flag(hdev,
9378 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9381 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9384 new_settings(hdev, match.sk);
9389 hci_req_init(&req, hdev);
9391 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9392 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9393 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9394 sizeof(enable), &enable);
9395 __hci_req_update_eir(&req);
9400 hci_req_run(&req, NULL);
9403 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9405 struct cmd_lookup *match = data;
9407 if (match->sk == NULL) {
9408 match->sk = cmd->sk;
9409 sock_hold(match->sk);
9413 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9416 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9418 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9419 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9420 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9423 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9424 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9425 ext_info_changed(hdev, NULL);
9432 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9434 struct mgmt_cp_set_local_name ev;
9435 struct mgmt_pending_cmd *cmd;
9440 memset(&ev, 0, sizeof(ev));
9441 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9442 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9444 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9446 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9448 /* If this is a HCI command related to powering on the
9449 * HCI dev don't send any mgmt signals.
9451 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9455 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9456 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9457 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9460 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9464 for (i = 0; i < uuid_count; i++) {
9465 if (!memcmp(uuid, uuids[i], 16))
9472 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9476 while (parsed < eir_len) {
9477 u8 field_len = eir[0];
9484 if (eir_len - parsed < field_len + 1)
9488 case EIR_UUID16_ALL:
9489 case EIR_UUID16_SOME:
9490 for (i = 0; i + 3 <= field_len; i += 2) {
9491 memcpy(uuid, bluetooth_base_uuid, 16);
9492 uuid[13] = eir[i + 3];
9493 uuid[12] = eir[i + 2];
9494 if (has_uuid(uuid, uuid_count, uuids))
9498 case EIR_UUID32_ALL:
9499 case EIR_UUID32_SOME:
9500 for (i = 0; i + 5 <= field_len; i += 4) {
9501 memcpy(uuid, bluetooth_base_uuid, 16);
9502 uuid[15] = eir[i + 5];
9503 uuid[14] = eir[i + 4];
9504 uuid[13] = eir[i + 3];
9505 uuid[12] = eir[i + 2];
9506 if (has_uuid(uuid, uuid_count, uuids))
9510 case EIR_UUID128_ALL:
9511 case EIR_UUID128_SOME:
9512 for (i = 0; i + 17 <= field_len; i += 16) {
9513 memcpy(uuid, eir + i + 2, 16);
9514 if (has_uuid(uuid, uuid_count, uuids))
9520 parsed += field_len + 1;
9521 eir += field_len + 1;
9527 static void restart_le_scan(struct hci_dev *hdev)
9529 /* If controller is not scanning we are done. */
9530 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9533 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9534 hdev->discovery.scan_start +
9535 hdev->discovery.scan_duration))
9538 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9539 DISCOV_LE_RESTART_DELAY);
9542 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9543 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9545 /* If a RSSI threshold has been specified, and
9546 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9547 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9548 * is set, let it through for further processing, as we might need to
9551 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9552 * the results are also dropped.
9554 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9555 (rssi == HCI_RSSI_INVALID ||
9556 (rssi < hdev->discovery.rssi &&
9557 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9560 if (hdev->discovery.uuid_count != 0) {
9561 /* If a list of UUIDs is provided in filter, results with no
9562 * matching UUID should be dropped.
9564 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9565 hdev->discovery.uuids) &&
9566 !eir_has_uuids(scan_rsp, scan_rsp_len,
9567 hdev->discovery.uuid_count,
9568 hdev->discovery.uuids))
9572 /* If duplicate filtering does not report RSSI changes, then restart
9573 * scanning to ensure updated result with updated RSSI values.
9575 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9576 restart_le_scan(hdev);
9578 /* Validate RSSI value against the RSSI threshold once more. */
9579 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9580 rssi < hdev->discovery.rssi)
9587 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9588 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9589 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9592 struct mgmt_ev_device_found *ev = (void *)buf;
9595 /* Don't send events for a non-kernel initiated discovery. With
9596 * LE one exception is if we have pend_le_reports > 0 in which
9597 * case we're doing passive scanning and want these events.
9599 if (!hci_discovery_active(hdev)) {
9600 if (link_type == ACL_LINK)
9602 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
9606 if (hdev->discovery.result_filtering) {
9607 /* We are using service discovery */
9608 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9613 if (hdev->discovery.limited) {
9614 /* Check for limited discoverable bit */
9616 if (!(dev_class[1] & 0x20))
9619 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9620 if (!flags || !(flags[0] & LE_AD_LIMITED))
9625 /* Make sure that the buffer is big enough. The 5 extra bytes
9626 * are for the potential CoD field.
9628 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9631 memset(buf, 0, sizeof(buf));
9633 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9634 * RSSI value was reported as 0 when not available. This behavior
9635 * is kept when using device discovery. This is required for full
9636 * backwards compatibility with the API.
9638 * However when using service discovery, the value 127 will be
9639 * returned when the RSSI is not available.
9641 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9642 link_type == ACL_LINK)
9645 bacpy(&ev->addr.bdaddr, bdaddr);
9646 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9648 ev->flags = cpu_to_le32(flags);
9651 /* Copy EIR or advertising data into event */
9652 memcpy(ev->eir, eir, eir_len);
9654 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9656 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9659 if (scan_rsp_len > 0)
9660 /* Append scan response data to event */
9661 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9663 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9664 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9666 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9669 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9670 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9672 struct mgmt_ev_device_found *ev;
9673 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9676 ev = (struct mgmt_ev_device_found *) buf;
9678 memset(buf, 0, sizeof(buf));
9680 bacpy(&ev->addr.bdaddr, bdaddr);
9681 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9684 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9687 ev->eir_len = cpu_to_le16(eir_len);
9689 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9692 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9694 struct mgmt_ev_discovering ev;
9696 BT_DBG("%s discovering %u", hdev->name, discovering);
9698 memset(&ev, 0, sizeof(ev));
9699 ev.type = hdev->discovery.type;
9700 ev.discovering = discovering;
9702 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9705 static struct hci_mgmt_chan chan = {
9706 .channel = HCI_CHANNEL_CONTROL,
9707 .handler_count = ARRAY_SIZE(mgmt_handlers),
9708 .handlers = mgmt_handlers,
9710 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
9711 .tizen_handlers = tizen_mgmt_handlers,
9713 .hdev_init = mgmt_init_hdev,
9718 return hci_mgmt_chan_register(&chan);
9721 void mgmt_exit(void)
9723 hci_mgmt_chan_unregister(&chan);