2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
39 #include "hci_request.h"
41 #include "mgmt_util.h"
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 14
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_LINK_SECURITY,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
114 static const u16 mgmt_events[] = {
115 MGMT_EV_CONTROLLER_ERROR,
117 MGMT_EV_INDEX_REMOVED,
118 MGMT_EV_NEW_SETTINGS,
119 MGMT_EV_CLASS_OF_DEV_CHANGED,
120 MGMT_EV_LOCAL_NAME_CHANGED,
121 MGMT_EV_NEW_LINK_KEY,
122 MGMT_EV_NEW_LONG_TERM_KEY,
123 MGMT_EV_DEVICE_CONNECTED,
124 MGMT_EV_DEVICE_DISCONNECTED,
125 MGMT_EV_CONNECT_FAILED,
126 MGMT_EV_PIN_CODE_REQUEST,
127 MGMT_EV_USER_CONFIRM_REQUEST,
128 MGMT_EV_USER_PASSKEY_REQUEST,
130 MGMT_EV_DEVICE_FOUND,
132 MGMT_EV_DEVICE_BLOCKED,
133 MGMT_EV_DEVICE_UNBLOCKED,
134 MGMT_EV_DEVICE_UNPAIRED,
135 MGMT_EV_PASSKEY_NOTIFY,
138 MGMT_EV_DEVICE_ADDED,
139 MGMT_EV_DEVICE_REMOVED,
140 MGMT_EV_NEW_CONN_PARAM,
141 MGMT_EV_UNCONF_INDEX_ADDED,
142 MGMT_EV_UNCONF_INDEX_REMOVED,
143 MGMT_EV_NEW_CONFIG_OPTIONS,
144 MGMT_EV_EXT_INDEX_ADDED,
145 MGMT_EV_EXT_INDEX_REMOVED,
146 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
147 MGMT_EV_ADVERTISING_ADDED,
148 MGMT_EV_ADVERTISING_REMOVED,
149 MGMT_EV_EXT_INFO_CHANGED,
152 static const u16 mgmt_untrusted_commands[] = {
153 MGMT_OP_READ_INDEX_LIST,
155 MGMT_OP_READ_UNCONF_INDEX_LIST,
156 MGMT_OP_READ_CONFIG_INFO,
157 MGMT_OP_READ_EXT_INDEX_LIST,
158 MGMT_OP_READ_EXT_INFO,
161 static const u16 mgmt_untrusted_events[] = {
163 MGMT_EV_INDEX_REMOVED,
164 MGMT_EV_NEW_SETTINGS,
165 MGMT_EV_CLASS_OF_DEV_CHANGED,
166 MGMT_EV_LOCAL_NAME_CHANGED,
167 MGMT_EV_UNCONF_INDEX_ADDED,
168 MGMT_EV_UNCONF_INDEX_REMOVED,
169 MGMT_EV_NEW_CONFIG_OPTIONS,
170 MGMT_EV_EXT_INDEX_ADDED,
171 MGMT_EV_EXT_INDEX_REMOVED,
172 MGMT_EV_EXT_INFO_CHANGED,
175 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
177 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
178 "\x00\x00\x00\x00\x00\x00\x00\x00"
180 /* HCI to MGMT error code conversion table */
181 static u8 mgmt_status_table[] = {
183 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
184 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
185 MGMT_STATUS_FAILED, /* Hardware Failure */
186 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
187 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
188 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
189 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
190 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
191 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
192 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
193 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
194 MGMT_STATUS_BUSY, /* Command Disallowed */
195 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
196 MGMT_STATUS_REJECTED, /* Rejected Security */
197 MGMT_STATUS_REJECTED, /* Rejected Personal */
198 MGMT_STATUS_TIMEOUT, /* Host Timeout */
199 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
200 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
201 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
202 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
203 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
204 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
205 MGMT_STATUS_BUSY, /* Repeated Attempts */
206 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
207 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
208 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
209 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
210 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
211 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
212 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
213 MGMT_STATUS_FAILED, /* Unspecified Error */
214 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
215 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
216 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
217 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
218 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
219 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
220 MGMT_STATUS_FAILED, /* Unit Link Key Used */
221 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
222 MGMT_STATUS_TIMEOUT, /* Instant Passed */
223 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
224 MGMT_STATUS_FAILED, /* Transaction Collision */
225 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
226 MGMT_STATUS_REJECTED, /* QoS Rejected */
227 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
228 MGMT_STATUS_REJECTED, /* Insufficient Security */
229 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
230 MGMT_STATUS_BUSY, /* Role Switch Pending */
231 MGMT_STATUS_FAILED, /* Slot Violation */
232 MGMT_STATUS_FAILED, /* Role Switch Failed */
233 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
234 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
235 MGMT_STATUS_BUSY, /* Host Busy Pairing */
236 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
237 MGMT_STATUS_BUSY, /* Controller Busy */
238 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
239 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
240 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
241 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
242 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
245 static u8 mgmt_status(u8 hci_status)
247 if (hci_status < ARRAY_SIZE(mgmt_status_table))
248 return mgmt_status_table[hci_status];
250 return MGMT_STATUS_FAILED;
253 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
256 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
260 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
261 u16 len, int flag, struct sock *skip_sk)
263 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
267 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
268 struct sock *skip_sk)
270 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
271 HCI_SOCK_TRUSTED, skip_sk);
274 static u8 le_addr_type(u8 mgmt_addr_type)
276 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
277 return ADDR_LE_DEV_PUBLIC;
279 return ADDR_LE_DEV_RANDOM;
282 void mgmt_fill_version_info(void *ver)
284 struct mgmt_rp_read_version *rp = ver;
286 rp->version = MGMT_VERSION;
287 rp->revision = cpu_to_le16(MGMT_REVISION);
290 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
293 struct mgmt_rp_read_version rp;
295 BT_DBG("sock %p", sk);
297 mgmt_fill_version_info(&rp);
299 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
303 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
306 struct mgmt_rp_read_commands *rp;
307 u16 num_commands, num_events;
311 BT_DBG("sock %p", sk);
313 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
314 num_commands = ARRAY_SIZE(mgmt_commands);
315 num_events = ARRAY_SIZE(mgmt_events);
317 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
318 num_events = ARRAY_SIZE(mgmt_untrusted_events);
321 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
323 rp = kmalloc(rp_size, GFP_KERNEL);
327 rp->num_commands = cpu_to_le16(num_commands);
328 rp->num_events = cpu_to_le16(num_events);
330 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
331 __le16 *opcode = rp->opcodes;
333 for (i = 0; i < num_commands; i++, opcode++)
334 put_unaligned_le16(mgmt_commands[i], opcode);
336 for (i = 0; i < num_events; i++, opcode++)
337 put_unaligned_le16(mgmt_events[i], opcode);
339 __le16 *opcode = rp->opcodes;
341 for (i = 0; i < num_commands; i++, opcode++)
342 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
344 for (i = 0; i < num_events; i++, opcode++)
345 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
348 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
355 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
358 struct mgmt_rp_read_index_list *rp;
364 BT_DBG("sock %p", sk);
366 read_lock(&hci_dev_list_lock);
369 list_for_each_entry(d, &hci_dev_list, list) {
370 if (d->dev_type == HCI_PRIMARY &&
371 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
375 rp_len = sizeof(*rp) + (2 * count);
376 rp = kmalloc(rp_len, GFP_ATOMIC);
378 read_unlock(&hci_dev_list_lock);
383 list_for_each_entry(d, &hci_dev_list, list) {
384 if (hci_dev_test_flag(d, HCI_SETUP) ||
385 hci_dev_test_flag(d, HCI_CONFIG) ||
386 hci_dev_test_flag(d, HCI_USER_CHANNEL))
389 /* Devices marked as raw-only are neither configured
390 * nor unconfigured controllers.
392 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
395 if (d->dev_type == HCI_PRIMARY &&
396 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
397 rp->index[count++] = cpu_to_le16(d->id);
398 BT_DBG("Added hci%u", d->id);
402 rp->num_controllers = cpu_to_le16(count);
403 rp_len = sizeof(*rp) + (2 * count);
405 read_unlock(&hci_dev_list_lock);
407 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
415 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
416 void *data, u16 data_len)
418 struct mgmt_rp_read_unconf_index_list *rp;
424 BT_DBG("sock %p", sk);
426 read_lock(&hci_dev_list_lock);
429 list_for_each_entry(d, &hci_dev_list, list) {
430 if (d->dev_type == HCI_PRIMARY &&
431 hci_dev_test_flag(d, HCI_UNCONFIGURED))
435 rp_len = sizeof(*rp) + (2 * count);
436 rp = kmalloc(rp_len, GFP_ATOMIC);
438 read_unlock(&hci_dev_list_lock);
443 list_for_each_entry(d, &hci_dev_list, list) {
444 if (hci_dev_test_flag(d, HCI_SETUP) ||
445 hci_dev_test_flag(d, HCI_CONFIG) ||
446 hci_dev_test_flag(d, HCI_USER_CHANNEL))
449 /* Devices marked as raw-only are neither configured
450 * nor unconfigured controllers.
452 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
455 if (d->dev_type == HCI_PRIMARY &&
456 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
457 rp->index[count++] = cpu_to_le16(d->id);
458 BT_DBG("Added hci%u", d->id);
462 rp->num_controllers = cpu_to_le16(count);
463 rp_len = sizeof(*rp) + (2 * count);
465 read_unlock(&hci_dev_list_lock);
467 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
468 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
475 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
476 void *data, u16 data_len)
478 struct mgmt_rp_read_ext_index_list *rp;
483 BT_DBG("sock %p", sk);
485 read_lock(&hci_dev_list_lock);
488 list_for_each_entry(d, &hci_dev_list, list) {
489 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
493 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
495 read_unlock(&hci_dev_list_lock);
500 list_for_each_entry(d, &hci_dev_list, list) {
501 if (hci_dev_test_flag(d, HCI_SETUP) ||
502 hci_dev_test_flag(d, HCI_CONFIG) ||
503 hci_dev_test_flag(d, HCI_USER_CHANNEL))
506 /* Devices marked as raw-only are neither configured
507 * nor unconfigured controllers.
509 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
512 if (d->dev_type == HCI_PRIMARY) {
513 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
514 rp->entry[count].type = 0x01;
516 rp->entry[count].type = 0x00;
517 } else if (d->dev_type == HCI_AMP) {
518 rp->entry[count].type = 0x02;
523 rp->entry[count].bus = d->bus;
524 rp->entry[count++].index = cpu_to_le16(d->id);
525 BT_DBG("Added hci%u", d->id);
528 rp->num_controllers = cpu_to_le16(count);
530 read_unlock(&hci_dev_list_lock);
532 /* If this command is called at least once, then all the
533 * default index and unconfigured index events are disabled
534 * and from now on only extended index events are used.
536 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
537 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
538 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
540 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
541 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
542 struct_size(rp, entry, count));
549 static bool is_configured(struct hci_dev *hdev)
551 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
552 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
555 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
556 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
557 !bacmp(&hdev->public_addr, BDADDR_ANY))
563 static __le32 get_missing_options(struct hci_dev *hdev)
567 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
568 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
569 options |= MGMT_OPTION_EXTERNAL_CONFIG;
571 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
572 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
573 !bacmp(&hdev->public_addr, BDADDR_ANY))
574 options |= MGMT_OPTION_PUBLIC_ADDRESS;
576 return cpu_to_le32(options);
579 static int new_options(struct hci_dev *hdev, struct sock *skip)
581 __le32 options = get_missing_options(hdev);
583 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
584 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
587 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
589 __le32 options = get_missing_options(hdev);
591 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
595 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
596 void *data, u16 data_len)
598 struct mgmt_rp_read_config_info rp;
601 BT_DBG("sock %p %s", sk, hdev->name);
605 memset(&rp, 0, sizeof(rp));
606 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
608 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
609 options |= MGMT_OPTION_EXTERNAL_CONFIG;
611 if (hdev->set_bdaddr)
612 options |= MGMT_OPTION_PUBLIC_ADDRESS;
614 rp.supported_options = cpu_to_le32(options);
615 rp.missing_options = get_missing_options(hdev);
617 hci_dev_unlock(hdev);
619 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
623 static u32 get_supported_phys(struct hci_dev *hdev)
625 u32 supported_phys = 0;
627 if (lmp_bredr_capable(hdev)) {
628 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
630 if (hdev->features[0][0] & LMP_3SLOT)
631 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
633 if (hdev->features[0][0] & LMP_5SLOT)
634 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
636 if (lmp_edr_2m_capable(hdev)) {
637 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
639 if (lmp_edr_3slot_capable(hdev))
640 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
642 if (lmp_edr_5slot_capable(hdev))
643 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
645 if (lmp_edr_3m_capable(hdev)) {
646 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
648 if (lmp_edr_3slot_capable(hdev))
649 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
651 if (lmp_edr_5slot_capable(hdev))
652 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
657 if (lmp_le_capable(hdev)) {
658 supported_phys |= MGMT_PHY_LE_1M_TX;
659 supported_phys |= MGMT_PHY_LE_1M_RX;
661 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
662 supported_phys |= MGMT_PHY_LE_2M_TX;
663 supported_phys |= MGMT_PHY_LE_2M_RX;
666 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
667 supported_phys |= MGMT_PHY_LE_CODED_TX;
668 supported_phys |= MGMT_PHY_LE_CODED_RX;
672 return supported_phys;
675 static u32 get_selected_phys(struct hci_dev *hdev)
677 u32 selected_phys = 0;
679 if (lmp_bredr_capable(hdev)) {
680 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
682 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
683 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
685 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
686 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
688 if (lmp_edr_2m_capable(hdev)) {
689 if (!(hdev->pkt_type & HCI_2DH1))
690 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
692 if (lmp_edr_3slot_capable(hdev) &&
693 !(hdev->pkt_type & HCI_2DH3))
694 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
696 if (lmp_edr_5slot_capable(hdev) &&
697 !(hdev->pkt_type & HCI_2DH5))
698 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
700 if (lmp_edr_3m_capable(hdev)) {
701 if (!(hdev->pkt_type & HCI_3DH1))
702 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
704 if (lmp_edr_3slot_capable(hdev) &&
705 !(hdev->pkt_type & HCI_3DH3))
706 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
708 if (lmp_edr_5slot_capable(hdev) &&
709 !(hdev->pkt_type & HCI_3DH5))
710 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
715 if (lmp_le_capable(hdev)) {
716 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
717 selected_phys |= MGMT_PHY_LE_1M_TX;
719 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
720 selected_phys |= MGMT_PHY_LE_1M_RX;
722 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
723 selected_phys |= MGMT_PHY_LE_2M_TX;
725 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
726 selected_phys |= MGMT_PHY_LE_2M_RX;
728 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
729 selected_phys |= MGMT_PHY_LE_CODED_TX;
731 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
732 selected_phys |= MGMT_PHY_LE_CODED_RX;
735 return selected_phys;
738 static u32 get_configurable_phys(struct hci_dev *hdev)
740 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
741 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
744 static u32 get_supported_settings(struct hci_dev *hdev)
748 settings |= MGMT_SETTING_POWERED;
749 settings |= MGMT_SETTING_BONDABLE;
750 settings |= MGMT_SETTING_DEBUG_KEYS;
751 settings |= MGMT_SETTING_CONNECTABLE;
752 settings |= MGMT_SETTING_DISCOVERABLE;
754 if (lmp_bredr_capable(hdev)) {
755 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
756 settings |= MGMT_SETTING_FAST_CONNECTABLE;
757 settings |= MGMT_SETTING_BREDR;
758 settings |= MGMT_SETTING_LINK_SECURITY;
760 if (lmp_ssp_capable(hdev)) {
761 settings |= MGMT_SETTING_SSP;
762 settings |= MGMT_SETTING_HS;
765 if (lmp_sc_capable(hdev))
766 settings |= MGMT_SETTING_SECURE_CONN;
769 if (lmp_le_capable(hdev)) {
770 settings |= MGMT_SETTING_LE;
771 settings |= MGMT_SETTING_ADVERTISING;
772 settings |= MGMT_SETTING_SECURE_CONN;
773 settings |= MGMT_SETTING_PRIVACY;
774 settings |= MGMT_SETTING_STATIC_ADDRESS;
777 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
779 settings |= MGMT_SETTING_CONFIGURATION;
781 settings |= MGMT_SETTING_PHY_CONFIGURATION;
786 static u32 get_current_settings(struct hci_dev *hdev)
790 if (hdev_is_powered(hdev))
791 settings |= MGMT_SETTING_POWERED;
793 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
794 settings |= MGMT_SETTING_CONNECTABLE;
796 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
797 settings |= MGMT_SETTING_FAST_CONNECTABLE;
799 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
800 settings |= MGMT_SETTING_DISCOVERABLE;
802 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
803 settings |= MGMT_SETTING_BONDABLE;
805 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
806 settings |= MGMT_SETTING_BREDR;
808 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
809 settings |= MGMT_SETTING_LE;
811 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
812 settings |= MGMT_SETTING_LINK_SECURITY;
814 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
815 settings |= MGMT_SETTING_SSP;
817 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
818 settings |= MGMT_SETTING_HS;
820 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
821 settings |= MGMT_SETTING_ADVERTISING;
823 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
824 settings |= MGMT_SETTING_SECURE_CONN;
826 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
827 settings |= MGMT_SETTING_DEBUG_KEYS;
829 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
830 settings |= MGMT_SETTING_PRIVACY;
832 /* The current setting for static address has two purposes. The
833 * first is to indicate if the static address will be used and
834 * the second is to indicate if it is actually set.
836 * This means if the static address is not configured, this flag
837 * will never be set. If the address is configured, then if the
838 * address is actually used decides if the flag is set or not.
840 * For single mode LE only controllers and dual-mode controllers
841 * with BR/EDR disabled, the existence of the static address will
844 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
845 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
846 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
847 if (bacmp(&hdev->static_addr, BDADDR_ANY))
848 settings |= MGMT_SETTING_STATIC_ADDRESS;
854 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
856 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
859 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
860 struct hci_dev *hdev,
863 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
866 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
868 struct mgmt_pending_cmd *cmd;
870 /* If there's a pending mgmt command the flags will not yet have
871 * their final values, so check for this first.
873 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
875 struct mgmt_mode *cp = cmd->param;
877 return LE_AD_GENERAL;
878 else if (cp->val == 0x02)
879 return LE_AD_LIMITED;
881 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
882 return LE_AD_LIMITED;
883 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
884 return LE_AD_GENERAL;
890 bool mgmt_get_connectable(struct hci_dev *hdev)
892 struct mgmt_pending_cmd *cmd;
894 /* If there's a pending mgmt command the flag will not yet have
895 * it's final value, so check for this first.
897 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
899 struct mgmt_mode *cp = cmd->param;
904 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
907 static void service_cache_off(struct work_struct *work)
909 struct hci_dev *hdev = container_of(work, struct hci_dev,
911 struct hci_request req;
913 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
916 hci_req_init(&req, hdev);
920 __hci_req_update_eir(&req);
921 __hci_req_update_class(&req);
923 hci_dev_unlock(hdev);
925 hci_req_run(&req, NULL);
928 static void rpa_expired(struct work_struct *work)
930 struct hci_dev *hdev = container_of(work, struct hci_dev,
932 struct hci_request req;
936 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
938 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
941 /* The generation of a new RPA and programming it into the
942 * controller happens in the hci_req_enable_advertising()
945 hci_req_init(&req, hdev);
946 if (ext_adv_capable(hdev))
947 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
949 __hci_req_enable_advertising(&req);
950 hci_req_run(&req, NULL);
953 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
955 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
958 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
959 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
961 /* Non-mgmt controlled devices get this bit set
962 * implicitly so that pairing works for them, however
963 * for mgmt we require user-space to explicitly enable
966 hci_dev_clear_flag(hdev, HCI_BONDABLE);
969 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
970 void *data, u16 data_len)
972 struct mgmt_rp_read_info rp;
974 BT_DBG("sock %p %s", sk, hdev->name);
978 memset(&rp, 0, sizeof(rp));
980 bacpy(&rp.bdaddr, &hdev->bdaddr);
982 rp.version = hdev->hci_ver;
983 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
985 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
986 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
988 memcpy(rp.dev_class, hdev->dev_class, 3);
990 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
991 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
993 hci_dev_unlock(hdev);
995 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
999 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1004 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1005 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1006 hdev->dev_class, 3);
1008 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1009 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1012 name_len = strlen(hdev->dev_name);
1013 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1014 hdev->dev_name, name_len);
1016 name_len = strlen(hdev->short_name);
1017 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1018 hdev->short_name, name_len);
1023 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1024 void *data, u16 data_len)
1027 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1030 BT_DBG("sock %p %s", sk, hdev->name);
1032 memset(&buf, 0, sizeof(buf));
1036 bacpy(&rp->bdaddr, &hdev->bdaddr);
1038 rp->version = hdev->hci_ver;
1039 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1041 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1042 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1045 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1046 rp->eir_len = cpu_to_le16(eir_len);
1048 hci_dev_unlock(hdev);
1050 /* If this command is called at least once, then the events
1051 * for class of device and local name changes are disabled
1052 * and only the new extended controller information event
1055 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1056 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1057 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1059 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1060 sizeof(*rp) + eir_len);
1063 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1066 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1069 memset(buf, 0, sizeof(buf));
1071 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1072 ev->eir_len = cpu_to_le16(eir_len);
1074 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1075 sizeof(*ev) + eir_len,
1076 HCI_MGMT_EXT_INFO_EVENTS, skip);
1079 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1081 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1083 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1087 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1089 BT_DBG("%s status 0x%02x", hdev->name, status);
1091 if (hci_conn_count(hdev) == 0) {
1092 cancel_delayed_work(&hdev->power_off);
1093 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1097 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1099 struct mgmt_ev_advertising_added ev;
1101 ev.instance = instance;
1103 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1106 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1109 struct mgmt_ev_advertising_removed ev;
1111 ev.instance = instance;
1113 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1116 static void cancel_adv_timeout(struct hci_dev *hdev)
1118 if (hdev->adv_instance_timeout) {
1119 hdev->adv_instance_timeout = 0;
1120 cancel_delayed_work(&hdev->adv_instance_expire);
1124 static int clean_up_hci_state(struct hci_dev *hdev)
1126 struct hci_request req;
1127 struct hci_conn *conn;
1128 bool discov_stopped;
1131 hci_req_init(&req, hdev);
1133 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1134 test_bit(HCI_PSCAN, &hdev->flags)) {
1136 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1139 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1141 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1142 __hci_req_disable_advertising(&req);
1144 discov_stopped = hci_req_stop_discovery(&req);
1146 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1147 /* 0x15 == Terminated due to Power Off */
1148 __hci_abort_conn(&req, conn, 0x15);
1151 err = hci_req_run(&req, clean_up_hci_complete);
1152 if (!err && discov_stopped)
1153 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1158 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1161 struct mgmt_mode *cp = data;
1162 struct mgmt_pending_cmd *cmd;
1165 BT_DBG("request for %s", hdev->name);
1167 if (cp->val != 0x00 && cp->val != 0x01)
1168 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1169 MGMT_STATUS_INVALID_PARAMS);
1173 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1174 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1179 if (!!cp->val == hdev_is_powered(hdev)) {
1180 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1184 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1191 queue_work(hdev->req_workqueue, &hdev->power_on);
1194 /* Disconnect connections, stop scans, etc */
1195 err = clean_up_hci_state(hdev);
1197 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1198 HCI_POWER_OFF_TIMEOUT);
1200 /* ENODATA means there were no HCI commands queued */
1201 if (err == -ENODATA) {
1202 cancel_delayed_work(&hdev->power_off);
1203 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1209 hci_dev_unlock(hdev);
1213 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1215 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1217 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1218 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1221 int mgmt_new_settings(struct hci_dev *hdev)
1223 return new_settings(hdev, NULL);
1228 struct hci_dev *hdev;
1232 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1234 struct cmd_lookup *match = data;
1236 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1238 list_del(&cmd->list);
1240 if (match->sk == NULL) {
1241 match->sk = cmd->sk;
1242 sock_hold(match->sk);
1245 mgmt_pending_free(cmd);
1248 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1252 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1253 mgmt_pending_remove(cmd);
1256 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1258 if (cmd->cmd_complete) {
1261 cmd->cmd_complete(cmd, *status);
1262 mgmt_pending_remove(cmd);
1267 cmd_status_rsp(cmd, data);
1270 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1272 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1273 cmd->param, cmd->param_len);
1276 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1278 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1279 cmd->param, sizeof(struct mgmt_addr_info));
1282 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1284 if (!lmp_bredr_capable(hdev))
1285 return MGMT_STATUS_NOT_SUPPORTED;
1286 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1287 return MGMT_STATUS_REJECTED;
1289 return MGMT_STATUS_SUCCESS;
1292 static u8 mgmt_le_support(struct hci_dev *hdev)
1294 if (!lmp_le_capable(hdev))
1295 return MGMT_STATUS_NOT_SUPPORTED;
1296 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1297 return MGMT_STATUS_REJECTED;
1299 return MGMT_STATUS_SUCCESS;
1302 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1304 struct mgmt_pending_cmd *cmd;
1306 BT_DBG("status 0x%02x", status);
1310 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1315 u8 mgmt_err = mgmt_status(status);
1316 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1317 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1321 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1322 hdev->discov_timeout > 0) {
1323 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1324 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1327 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1328 new_settings(hdev, cmd->sk);
1331 mgmt_pending_remove(cmd);
1334 hci_dev_unlock(hdev);
1337 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1340 struct mgmt_cp_set_discoverable *cp = data;
1341 struct mgmt_pending_cmd *cmd;
1345 BT_DBG("request for %s", hdev->name);
1347 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1348 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1349 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1350 MGMT_STATUS_REJECTED);
1352 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1353 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1354 MGMT_STATUS_INVALID_PARAMS);
1356 timeout = __le16_to_cpu(cp->timeout);
1358 /* Disabling discoverable requires that no timeout is set,
1359 * and enabling limited discoverable requires a timeout.
1361 if ((cp->val == 0x00 && timeout > 0) ||
1362 (cp->val == 0x02 && timeout == 0))
1363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1364 MGMT_STATUS_INVALID_PARAMS);
1368 if (!hdev_is_powered(hdev) && timeout > 0) {
1369 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1370 MGMT_STATUS_NOT_POWERED);
1374 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1375 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1376 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1381 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1382 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1383 MGMT_STATUS_REJECTED);
1387 if (!hdev_is_powered(hdev)) {
1388 bool changed = false;
1390 /* Setting limited discoverable when powered off is
1391 * not a valid operation since it requires a timeout
1392 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1394 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1395 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1399 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1404 err = new_settings(hdev, sk);
1409 /* If the current mode is the same, then just update the timeout
1410 * value with the new value. And if only the timeout gets updated,
1411 * then no need for any HCI transactions.
1413 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1414 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1415 HCI_LIMITED_DISCOVERABLE)) {
1416 cancel_delayed_work(&hdev->discov_off);
1417 hdev->discov_timeout = timeout;
1419 if (cp->val && hdev->discov_timeout > 0) {
1420 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1421 queue_delayed_work(hdev->req_workqueue,
1422 &hdev->discov_off, to);
1425 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1429 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1435 /* Cancel any potential discoverable timeout that might be
1436 * still active and store new timeout value. The arming of
1437 * the timeout happens in the complete handler.
1439 cancel_delayed_work(&hdev->discov_off);
1440 hdev->discov_timeout = timeout;
1443 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1445 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1447 /* Limited discoverable mode */
1448 if (cp->val == 0x02)
1449 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1451 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1453 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1457 hci_dev_unlock(hdev);
1461 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1463 struct mgmt_pending_cmd *cmd;
1465 BT_DBG("status 0x%02x", status);
1469 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1474 u8 mgmt_err = mgmt_status(status);
1475 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1479 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1480 new_settings(hdev, cmd->sk);
1483 mgmt_pending_remove(cmd);
1486 hci_dev_unlock(hdev);
1489 static int set_connectable_update_settings(struct hci_dev *hdev,
1490 struct sock *sk, u8 val)
1492 bool changed = false;
1495 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1499 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1501 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1502 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1505 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1510 hci_req_update_scan(hdev);
1511 hci_update_background_scan(hdev);
1512 return new_settings(hdev, sk);
1518 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1521 struct mgmt_mode *cp = data;
1522 struct mgmt_pending_cmd *cmd;
1525 BT_DBG("request for %s", hdev->name);
1527 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1528 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1529 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1530 MGMT_STATUS_REJECTED);
1532 if (cp->val != 0x00 && cp->val != 0x01)
1533 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1534 MGMT_STATUS_INVALID_PARAMS);
1538 if (!hdev_is_powered(hdev)) {
1539 err = set_connectable_update_settings(hdev, sk, cp->val);
1543 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1544 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1545 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1550 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1557 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1559 if (hdev->discov_timeout > 0)
1560 cancel_delayed_work(&hdev->discov_off);
1562 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1563 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1564 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1567 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1571 hci_dev_unlock(hdev);
1575 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1578 struct mgmt_mode *cp = data;
1582 BT_DBG("request for %s", hdev->name);
1584 if (cp->val != 0x00 && cp->val != 0x01)
1585 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1586 MGMT_STATUS_INVALID_PARAMS);
1591 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1593 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1595 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1600 /* In limited privacy mode the change of bondable mode
1601 * may affect the local advertising address.
1603 if (hdev_is_powered(hdev) &&
1604 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1605 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1606 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1607 queue_work(hdev->req_workqueue,
1608 &hdev->discoverable_update);
1610 err = new_settings(hdev, sk);
1614 hci_dev_unlock(hdev);
1618 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1621 struct mgmt_mode *cp = data;
1622 struct mgmt_pending_cmd *cmd;
1626 BT_DBG("request for %s", hdev->name);
1628 status = mgmt_bredr_support(hdev);
1630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1633 if (cp->val != 0x00 && cp->val != 0x01)
1634 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1635 MGMT_STATUS_INVALID_PARAMS);
1639 if (!hdev_is_powered(hdev)) {
1640 bool changed = false;
1642 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1643 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1647 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1652 err = new_settings(hdev, sk);
1657 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1665 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1666 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1670 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1676 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1678 mgmt_pending_remove(cmd);
1683 hci_dev_unlock(hdev);
1687 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1689 struct mgmt_mode *cp = data;
1690 struct mgmt_pending_cmd *cmd;
1694 BT_DBG("request for %s", hdev->name);
1696 status = mgmt_bredr_support(hdev);
1698 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1700 if (!lmp_ssp_capable(hdev))
1701 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1702 MGMT_STATUS_NOT_SUPPORTED);
1704 if (cp->val != 0x00 && cp->val != 0x01)
1705 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1706 MGMT_STATUS_INVALID_PARAMS);
1710 if (!hdev_is_powered(hdev)) {
1714 changed = !hci_dev_test_and_set_flag(hdev,
1717 changed = hci_dev_test_and_clear_flag(hdev,
1720 changed = hci_dev_test_and_clear_flag(hdev,
1723 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1726 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1731 err = new_settings(hdev, sk);
1736 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1737 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1742 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1743 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1747 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1753 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1754 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1755 sizeof(cp->val), &cp->val);
1757 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1759 mgmt_pending_remove(cmd);
1764 hci_dev_unlock(hdev);
1768 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1770 struct mgmt_mode *cp = data;
1775 BT_DBG("request for %s", hdev->name);
1777 status = mgmt_bredr_support(hdev);
1779 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1781 if (!lmp_ssp_capable(hdev))
1782 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1783 MGMT_STATUS_NOT_SUPPORTED);
1785 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1786 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1787 MGMT_STATUS_REJECTED);
1789 if (cp->val != 0x00 && cp->val != 0x01)
1790 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1791 MGMT_STATUS_INVALID_PARAMS);
1795 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1796 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1802 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1804 if (hdev_is_powered(hdev)) {
1805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1806 MGMT_STATUS_REJECTED);
1810 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1813 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1818 err = new_settings(hdev, sk);
1821 hci_dev_unlock(hdev);
1825 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1827 struct cmd_lookup match = { NULL, hdev };
1832 u8 mgmt_err = mgmt_status(status);
1834 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1839 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1841 new_settings(hdev, match.sk);
1846 /* Make sure the controller has a good default for
1847 * advertising data. Restrict the update to when LE
1848 * has actually been enabled. During power on, the
1849 * update in powered_update_hci will take care of it.
1851 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1852 struct hci_request req;
1853 hci_req_init(&req, hdev);
1854 if (ext_adv_capable(hdev)) {
1857 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1859 __hci_req_update_scan_rsp_data(&req, 0x00);
1861 __hci_req_update_adv_data(&req, 0x00);
1862 __hci_req_update_scan_rsp_data(&req, 0x00);
1864 hci_req_run(&req, NULL);
1865 hci_update_background_scan(hdev);
1869 hci_dev_unlock(hdev);
1872 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1874 struct mgmt_mode *cp = data;
1875 struct hci_cp_write_le_host_supported hci_cp;
1876 struct mgmt_pending_cmd *cmd;
1877 struct hci_request req;
1881 BT_DBG("request for %s", hdev->name);
1883 if (!lmp_le_capable(hdev))
1884 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1885 MGMT_STATUS_NOT_SUPPORTED);
1887 if (cp->val != 0x00 && cp->val != 0x01)
1888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1889 MGMT_STATUS_INVALID_PARAMS);
1891 /* Bluetooth single mode LE only controllers or dual-mode
1892 * controllers configured as LE only devices, do not allow
1893 * switching LE off. These have either LE enabled explicitly
1894 * or BR/EDR has been previously switched off.
1896 * When trying to enable an already enabled LE, then gracefully
1897 * send a positive response. Trying to disable it however will
1898 * result into rejection.
1900 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1901 if (cp->val == 0x01)
1902 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1904 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1905 MGMT_STATUS_REJECTED);
1911 enabled = lmp_host_le_capable(hdev);
1914 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1916 if (!hdev_is_powered(hdev) || val == enabled) {
1917 bool changed = false;
1919 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1920 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1924 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1925 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1929 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1934 err = new_settings(hdev, sk);
1939 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1940 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1941 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1946 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1952 hci_req_init(&req, hdev);
1954 memset(&hci_cp, 0, sizeof(hci_cp));
1958 hci_cp.simul = 0x00;
1960 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1961 __hci_req_disable_advertising(&req);
1963 if (ext_adv_capable(hdev))
1964 __hci_req_clear_ext_adv_sets(&req);
1967 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1970 err = hci_req_run(&req, le_enable_complete);
1972 mgmt_pending_remove(cmd);
1975 hci_dev_unlock(hdev);
1979 /* This is a helper function to test for pending mgmt commands that can
1980 * cause CoD or EIR HCI commands. We can only allow one such pending
1981 * mgmt command at a time since otherwise we cannot easily track what
1982 * the current values are, will be, and based on that calculate if a new
1983 * HCI command needs to be sent and if yes with what value.
1985 static bool pending_eir_or_class(struct hci_dev *hdev)
1987 struct mgmt_pending_cmd *cmd;
1989 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1990 switch (cmd->opcode) {
1991 case MGMT_OP_ADD_UUID:
1992 case MGMT_OP_REMOVE_UUID:
1993 case MGMT_OP_SET_DEV_CLASS:
1994 case MGMT_OP_SET_POWERED:
2002 static const u8 bluetooth_base_uuid[] = {
2003 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2004 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2007 static u8 get_uuid_size(const u8 *uuid)
2011 if (memcmp(uuid, bluetooth_base_uuid, 12))
2014 val = get_unaligned_le32(&uuid[12]);
2021 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2023 struct mgmt_pending_cmd *cmd;
2027 cmd = pending_find(mgmt_op, hdev);
2031 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2032 mgmt_status(status), hdev->dev_class, 3);
2034 mgmt_pending_remove(cmd);
2037 hci_dev_unlock(hdev);
2040 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2042 BT_DBG("status 0x%02x", status);
2044 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2047 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2049 struct mgmt_cp_add_uuid *cp = data;
2050 struct mgmt_pending_cmd *cmd;
2051 struct hci_request req;
2052 struct bt_uuid *uuid;
2055 BT_DBG("request for %s", hdev->name);
2059 if (pending_eir_or_class(hdev)) {
2060 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2065 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2071 memcpy(uuid->uuid, cp->uuid, 16);
2072 uuid->svc_hint = cp->svc_hint;
2073 uuid->size = get_uuid_size(cp->uuid);
2075 list_add_tail(&uuid->list, &hdev->uuids);
2077 hci_req_init(&req, hdev);
2079 __hci_req_update_class(&req);
2080 __hci_req_update_eir(&req);
2082 err = hci_req_run(&req, add_uuid_complete);
2084 if (err != -ENODATA)
2087 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2088 hdev->dev_class, 3);
2092 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2101 hci_dev_unlock(hdev);
2105 static bool enable_service_cache(struct hci_dev *hdev)
2107 if (!hdev_is_powered(hdev))
2110 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2111 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2119 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2121 BT_DBG("status 0x%02x", status);
2123 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2126 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2129 struct mgmt_cp_remove_uuid *cp = data;
2130 struct mgmt_pending_cmd *cmd;
2131 struct bt_uuid *match, *tmp;
2132 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2133 struct hci_request req;
2136 BT_DBG("request for %s", hdev->name);
2140 if (pending_eir_or_class(hdev)) {
2141 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2146 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2147 hci_uuids_clear(hdev);
2149 if (enable_service_cache(hdev)) {
2150 err = mgmt_cmd_complete(sk, hdev->id,
2151 MGMT_OP_REMOVE_UUID,
2152 0, hdev->dev_class, 3);
2161 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2162 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2165 list_del(&match->list);
2171 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2172 MGMT_STATUS_INVALID_PARAMS);
2177 hci_req_init(&req, hdev);
2179 __hci_req_update_class(&req);
2180 __hci_req_update_eir(&req);
2182 err = hci_req_run(&req, remove_uuid_complete);
2184 if (err != -ENODATA)
2187 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2188 hdev->dev_class, 3);
2192 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2201 hci_dev_unlock(hdev);
2205 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2207 BT_DBG("status 0x%02x", status);
2209 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2212 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2215 struct mgmt_cp_set_dev_class *cp = data;
2216 struct mgmt_pending_cmd *cmd;
2217 struct hci_request req;
2220 BT_DBG("request for %s", hdev->name);
2222 if (!lmp_bredr_capable(hdev))
2223 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2224 MGMT_STATUS_NOT_SUPPORTED);
2228 if (pending_eir_or_class(hdev)) {
2229 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2234 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2235 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2236 MGMT_STATUS_INVALID_PARAMS);
2240 hdev->major_class = cp->major;
2241 hdev->minor_class = cp->minor;
2243 if (!hdev_is_powered(hdev)) {
2244 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2245 hdev->dev_class, 3);
2249 hci_req_init(&req, hdev);
2251 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2252 hci_dev_unlock(hdev);
2253 cancel_delayed_work_sync(&hdev->service_cache);
2255 __hci_req_update_eir(&req);
2258 __hci_req_update_class(&req);
2260 err = hci_req_run(&req, set_class_complete);
2262 if (err != -ENODATA)
2265 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2266 hdev->dev_class, 3);
2270 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2279 hci_dev_unlock(hdev);
2283 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2286 struct mgmt_cp_load_link_keys *cp = data;
2287 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2288 sizeof(struct mgmt_link_key_info));
2289 u16 key_count, expected_len;
2293 BT_DBG("request for %s", hdev->name);
2295 if (!lmp_bredr_capable(hdev))
2296 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2297 MGMT_STATUS_NOT_SUPPORTED);
2299 key_count = __le16_to_cpu(cp->key_count);
2300 if (key_count > max_key_count) {
2301 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2303 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2304 MGMT_STATUS_INVALID_PARAMS);
2307 expected_len = struct_size(cp, keys, key_count);
2308 if (expected_len != len) {
2309 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2311 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2312 MGMT_STATUS_INVALID_PARAMS);
2315 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2317 MGMT_STATUS_INVALID_PARAMS);
2319 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2322 for (i = 0; i < key_count; i++) {
2323 struct mgmt_link_key_info *key = &cp->keys[i];
2325 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2326 return mgmt_cmd_status(sk, hdev->id,
2327 MGMT_OP_LOAD_LINK_KEYS,
2328 MGMT_STATUS_INVALID_PARAMS);
2333 hci_link_keys_clear(hdev);
2336 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2338 changed = hci_dev_test_and_clear_flag(hdev,
2339 HCI_KEEP_DEBUG_KEYS);
2342 new_settings(hdev, NULL);
2344 for (i = 0; i < key_count; i++) {
2345 struct mgmt_link_key_info *key = &cp->keys[i];
2347 /* Always ignore debug keys and require a new pairing if
2348 * the user wants to use them.
2350 if (key->type == HCI_LK_DEBUG_COMBINATION)
2353 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2354 key->type, key->pin_len, NULL);
2357 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2359 hci_dev_unlock(hdev);
2364 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2365 u8 addr_type, struct sock *skip_sk)
2367 struct mgmt_ev_device_unpaired ev;
2369 bacpy(&ev.addr.bdaddr, bdaddr);
2370 ev.addr.type = addr_type;
2372 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2376 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2379 struct mgmt_cp_unpair_device *cp = data;
2380 struct mgmt_rp_unpair_device rp;
2381 struct hci_conn_params *params;
2382 struct mgmt_pending_cmd *cmd;
2383 struct hci_conn *conn;
2387 memset(&rp, 0, sizeof(rp));
2388 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2389 rp.addr.type = cp->addr.type;
2391 if (!bdaddr_type_is_valid(cp->addr.type))
2392 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2393 MGMT_STATUS_INVALID_PARAMS,
2396 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2397 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2398 MGMT_STATUS_INVALID_PARAMS,
2403 if (!hdev_is_powered(hdev)) {
2404 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2405 MGMT_STATUS_NOT_POWERED, &rp,
2410 if (cp->addr.type == BDADDR_BREDR) {
2411 /* If disconnection is requested, then look up the
2412 * connection. If the remote device is connected, it
2413 * will be later used to terminate the link.
2415 * Setting it to NULL explicitly will cause no
2416 * termination of the link.
2419 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2424 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2426 err = mgmt_cmd_complete(sk, hdev->id,
2427 MGMT_OP_UNPAIR_DEVICE,
2428 MGMT_STATUS_NOT_PAIRED, &rp,
2436 /* LE address type */
2437 addr_type = le_addr_type(cp->addr.type);
2439 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2440 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2442 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2443 MGMT_STATUS_NOT_PAIRED, &rp,
2448 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2450 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2455 /* Defer clearing up the connection parameters until closing to
2456 * give a chance of keeping them if a repairing happens.
2458 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2460 /* Disable auto-connection parameters if present */
2461 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2463 if (params->explicit_connect)
2464 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2466 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2469 /* If disconnection is not requested, then clear the connection
2470 * variable so that the link is not terminated.
2472 if (!cp->disconnect)
2476 /* If the connection variable is set, then termination of the
2477 * link is requested.
2480 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2482 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2486 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2493 cmd->cmd_complete = addr_cmd_complete;
2495 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2497 mgmt_pending_remove(cmd);
2500 hci_dev_unlock(hdev);
2504 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2507 struct mgmt_cp_disconnect *cp = data;
2508 struct mgmt_rp_disconnect rp;
2509 struct mgmt_pending_cmd *cmd;
2510 struct hci_conn *conn;
2515 memset(&rp, 0, sizeof(rp));
2516 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2517 rp.addr.type = cp->addr.type;
2519 if (!bdaddr_type_is_valid(cp->addr.type))
2520 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2521 MGMT_STATUS_INVALID_PARAMS,
2526 if (!test_bit(HCI_UP, &hdev->flags)) {
2527 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2528 MGMT_STATUS_NOT_POWERED, &rp,
2533 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2534 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2535 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2539 if (cp->addr.type == BDADDR_BREDR)
2540 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2543 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2544 le_addr_type(cp->addr.type));
2546 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2548 MGMT_STATUS_NOT_CONNECTED, &rp,
2553 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2559 cmd->cmd_complete = generic_cmd_complete;
2561 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2563 mgmt_pending_remove(cmd);
2566 hci_dev_unlock(hdev);
2570 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2572 switch (link_type) {
2574 switch (addr_type) {
2575 case ADDR_LE_DEV_PUBLIC:
2576 return BDADDR_LE_PUBLIC;
2579 /* Fallback to LE Random address type */
2580 return BDADDR_LE_RANDOM;
2584 /* Fallback to BR/EDR type */
2585 return BDADDR_BREDR;
2589 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2592 struct mgmt_rp_get_connections *rp;
2601 if (!hdev_is_powered(hdev)) {
2602 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2603 MGMT_STATUS_NOT_POWERED);
2608 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2609 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2613 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2620 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2621 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2623 bacpy(&rp->addr[i].bdaddr, &c->dst);
2624 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2625 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2630 rp->conn_count = cpu_to_le16(i);
2632 /* Recalculate length in case of filtered SCO connections, etc */
2633 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2634 struct_size(rp, addr, i));
2639 hci_dev_unlock(hdev);
2643 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2644 struct mgmt_cp_pin_code_neg_reply *cp)
2646 struct mgmt_pending_cmd *cmd;
2649 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2654 cmd->cmd_complete = addr_cmd_complete;
2656 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2657 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2659 mgmt_pending_remove(cmd);
2664 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2667 struct hci_conn *conn;
2668 struct mgmt_cp_pin_code_reply *cp = data;
2669 struct hci_cp_pin_code_reply reply;
2670 struct mgmt_pending_cmd *cmd;
2677 if (!hdev_is_powered(hdev)) {
2678 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2679 MGMT_STATUS_NOT_POWERED);
2683 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2685 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2686 MGMT_STATUS_NOT_CONNECTED);
2690 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2691 struct mgmt_cp_pin_code_neg_reply ncp;
2693 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2695 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2697 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2699 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2700 MGMT_STATUS_INVALID_PARAMS);
2705 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2711 cmd->cmd_complete = addr_cmd_complete;
2713 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2714 reply.pin_len = cp->pin_len;
2715 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2717 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2719 mgmt_pending_remove(cmd);
2722 hci_dev_unlock(hdev);
2726 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2729 struct mgmt_cp_set_io_capability *cp = data;
2733 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2734 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2735 MGMT_STATUS_INVALID_PARAMS);
2739 hdev->io_capability = cp->io_capability;
2741 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2742 hdev->io_capability);
2744 hci_dev_unlock(hdev);
2746 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2750 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2752 struct hci_dev *hdev = conn->hdev;
2753 struct mgmt_pending_cmd *cmd;
2755 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2756 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2759 if (cmd->user_data != conn)
2768 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2770 struct mgmt_rp_pair_device rp;
2771 struct hci_conn *conn = cmd->user_data;
2774 bacpy(&rp.addr.bdaddr, &conn->dst);
2775 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2777 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2778 status, &rp, sizeof(rp));
2780 /* So we don't get further callbacks for this connection */
2781 conn->connect_cfm_cb = NULL;
2782 conn->security_cfm_cb = NULL;
2783 conn->disconn_cfm_cb = NULL;
2785 hci_conn_drop(conn);
2787 /* The device is paired so there is no need to remove
2788 * its connection parameters anymore.
2790 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2797 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2799 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2800 struct mgmt_pending_cmd *cmd;
2802 cmd = find_pairing(conn);
2804 cmd->cmd_complete(cmd, status);
2805 mgmt_pending_remove(cmd);
2809 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2811 struct mgmt_pending_cmd *cmd;
2813 BT_DBG("status %u", status);
2815 cmd = find_pairing(conn);
2817 BT_DBG("Unable to find a pending command");
2821 cmd->cmd_complete(cmd, mgmt_status(status));
2822 mgmt_pending_remove(cmd);
2825 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2827 struct mgmt_pending_cmd *cmd;
2829 BT_DBG("status %u", status);
2834 cmd = find_pairing(conn);
2836 BT_DBG("Unable to find a pending command");
2840 cmd->cmd_complete(cmd, mgmt_status(status));
2841 mgmt_pending_remove(cmd);
2844 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2847 struct mgmt_cp_pair_device *cp = data;
2848 struct mgmt_rp_pair_device rp;
2849 struct mgmt_pending_cmd *cmd;
2850 u8 sec_level, auth_type;
2851 struct hci_conn *conn;
2856 memset(&rp, 0, sizeof(rp));
2857 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2858 rp.addr.type = cp->addr.type;
2860 if (!bdaddr_type_is_valid(cp->addr.type))
2861 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2862 MGMT_STATUS_INVALID_PARAMS,
2865 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2866 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2867 MGMT_STATUS_INVALID_PARAMS,
2872 if (!hdev_is_powered(hdev)) {
2873 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2874 MGMT_STATUS_NOT_POWERED, &rp,
2879 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2880 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2881 MGMT_STATUS_ALREADY_PAIRED, &rp,
2886 sec_level = BT_SECURITY_MEDIUM;
2887 auth_type = HCI_AT_DEDICATED_BONDING;
2889 if (cp->addr.type == BDADDR_BREDR) {
2890 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2893 u8 addr_type = le_addr_type(cp->addr.type);
2894 struct hci_conn_params *p;
2896 /* When pairing a new device, it is expected to remember
2897 * this device for future connections. Adding the connection
2898 * parameter information ahead of time allows tracking
2899 * of the slave preferred values and will speed up any
2900 * further connection establishment.
2902 * If connection parameters already exist, then they
2903 * will be kept and this function does nothing.
2905 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2907 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2908 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2910 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2911 addr_type, sec_level,
2912 HCI_LE_CONN_TIMEOUT);
2918 if (PTR_ERR(conn) == -EBUSY)
2919 status = MGMT_STATUS_BUSY;
2920 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2921 status = MGMT_STATUS_NOT_SUPPORTED;
2922 else if (PTR_ERR(conn) == -ECONNREFUSED)
2923 status = MGMT_STATUS_REJECTED;
2925 status = MGMT_STATUS_CONNECT_FAILED;
2927 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2928 status, &rp, sizeof(rp));
2932 if (conn->connect_cfm_cb) {
2933 hci_conn_drop(conn);
2934 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2935 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2939 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2942 hci_conn_drop(conn);
2946 cmd->cmd_complete = pairing_complete;
2948 /* For LE, just connecting isn't a proof that the pairing finished */
2949 if (cp->addr.type == BDADDR_BREDR) {
2950 conn->connect_cfm_cb = pairing_complete_cb;
2951 conn->security_cfm_cb = pairing_complete_cb;
2952 conn->disconn_cfm_cb = pairing_complete_cb;
2954 conn->connect_cfm_cb = le_pairing_complete_cb;
2955 conn->security_cfm_cb = le_pairing_complete_cb;
2956 conn->disconn_cfm_cb = le_pairing_complete_cb;
2959 conn->io_capability = cp->io_cap;
2960 cmd->user_data = hci_conn_get(conn);
2962 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2963 hci_conn_security(conn, sec_level, auth_type, true)) {
2964 cmd->cmd_complete(cmd, 0);
2965 mgmt_pending_remove(cmd);
2971 hci_dev_unlock(hdev);
2975 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2978 struct mgmt_addr_info *addr = data;
2979 struct mgmt_pending_cmd *cmd;
2980 struct hci_conn *conn;
2987 if (!hdev_is_powered(hdev)) {
2988 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2989 MGMT_STATUS_NOT_POWERED);
2993 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2995 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2996 MGMT_STATUS_INVALID_PARAMS);
3000 conn = cmd->user_data;
3002 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3003 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3004 MGMT_STATUS_INVALID_PARAMS);
3008 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3009 mgmt_pending_remove(cmd);
3011 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3012 addr, sizeof(*addr));
3014 hci_dev_unlock(hdev);
3018 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3019 struct mgmt_addr_info *addr, u16 mgmt_op,
3020 u16 hci_op, __le32 passkey)
3022 struct mgmt_pending_cmd *cmd;
3023 struct hci_conn *conn;
3028 if (!hdev_is_powered(hdev)) {
3029 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3030 MGMT_STATUS_NOT_POWERED, addr,
3035 if (addr->type == BDADDR_BREDR)
3036 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3038 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3039 le_addr_type(addr->type));
3042 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3043 MGMT_STATUS_NOT_CONNECTED, addr,
3048 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3049 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3051 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3052 MGMT_STATUS_SUCCESS, addr,
3055 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3056 MGMT_STATUS_FAILED, addr,
3062 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3068 cmd->cmd_complete = addr_cmd_complete;
3070 /* Continue with pairing via HCI */
3071 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3072 struct hci_cp_user_passkey_reply cp;
3074 bacpy(&cp.bdaddr, &addr->bdaddr);
3075 cp.passkey = passkey;
3076 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3078 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3082 mgmt_pending_remove(cmd);
3085 hci_dev_unlock(hdev);
3089 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3090 void *data, u16 len)
3092 struct mgmt_cp_pin_code_neg_reply *cp = data;
3096 return user_pairing_resp(sk, hdev, &cp->addr,
3097 MGMT_OP_PIN_CODE_NEG_REPLY,
3098 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3101 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3104 struct mgmt_cp_user_confirm_reply *cp = data;
3108 if (len != sizeof(*cp))
3109 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3110 MGMT_STATUS_INVALID_PARAMS);
3112 return user_pairing_resp(sk, hdev, &cp->addr,
3113 MGMT_OP_USER_CONFIRM_REPLY,
3114 HCI_OP_USER_CONFIRM_REPLY, 0);
3117 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3118 void *data, u16 len)
3120 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3124 return user_pairing_resp(sk, hdev, &cp->addr,
3125 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3126 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3129 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3132 struct mgmt_cp_user_passkey_reply *cp = data;
3136 return user_pairing_resp(sk, hdev, &cp->addr,
3137 MGMT_OP_USER_PASSKEY_REPLY,
3138 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3141 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3142 void *data, u16 len)
3144 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3148 return user_pairing_resp(sk, hdev, &cp->addr,
3149 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3150 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3153 static void adv_expire(struct hci_dev *hdev, u32 flags)
3155 struct adv_info *adv_instance;
3156 struct hci_request req;
3159 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3163 /* stop if current instance doesn't need to be changed */
3164 if (!(adv_instance->flags & flags))
3167 cancel_adv_timeout(hdev);
3169 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3173 hci_req_init(&req, hdev);
3174 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3179 hci_req_run(&req, NULL);
3182 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3184 struct mgmt_cp_set_local_name *cp;
3185 struct mgmt_pending_cmd *cmd;
3187 BT_DBG("status 0x%02x", status);
3191 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3198 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3199 mgmt_status(status));
3201 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3204 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3205 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3208 mgmt_pending_remove(cmd);
3211 hci_dev_unlock(hdev);
3214 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3217 struct mgmt_cp_set_local_name *cp = data;
3218 struct mgmt_pending_cmd *cmd;
3219 struct hci_request req;
3226 /* If the old values are the same as the new ones just return a
3227 * direct command complete event.
3229 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3230 !memcmp(hdev->short_name, cp->short_name,
3231 sizeof(hdev->short_name))) {
3232 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3237 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3239 if (!hdev_is_powered(hdev)) {
3240 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3242 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3247 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3248 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3249 ext_info_changed(hdev, sk);
3254 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3260 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3262 hci_req_init(&req, hdev);
3264 if (lmp_bredr_capable(hdev)) {
3265 __hci_req_update_name(&req);
3266 __hci_req_update_eir(&req);
3269 /* The name is stored in the scan response data and so
3270 * no need to udpate the advertising data here.
3272 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3273 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3275 err = hci_req_run(&req, set_name_complete);
3277 mgmt_pending_remove(cmd);
3280 hci_dev_unlock(hdev);
3284 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3287 struct mgmt_cp_set_appearance *cp = data;
3293 if (!lmp_le_capable(hdev))
3294 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3295 MGMT_STATUS_NOT_SUPPORTED);
3297 apperance = le16_to_cpu(cp->appearance);
3301 if (hdev->appearance != apperance) {
3302 hdev->appearance = apperance;
3304 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3305 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3307 ext_info_changed(hdev, sk);
3310 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3313 hci_dev_unlock(hdev);
3318 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3319 void *data, u16 len)
3321 struct mgmt_rp_get_phy_confguration rp;
3323 BT_DBG("sock %p %s", sk, hdev->name);
3327 memset(&rp, 0, sizeof(rp));
3329 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3330 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3331 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3333 hci_dev_unlock(hdev);
3335 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3339 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3341 struct mgmt_ev_phy_configuration_changed ev;
3343 memset(&ev, 0, sizeof(ev));
3345 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3347 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3351 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3352 u16 opcode, struct sk_buff *skb)
3354 struct mgmt_pending_cmd *cmd;
3356 BT_DBG("status 0x%02x", status);
3360 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3365 mgmt_cmd_status(cmd->sk, hdev->id,
3366 MGMT_OP_SET_PHY_CONFIGURATION,
3367 mgmt_status(status));
3369 mgmt_cmd_complete(cmd->sk, hdev->id,
3370 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3373 mgmt_phy_configuration_changed(hdev, cmd->sk);
3376 mgmt_pending_remove(cmd);
3379 hci_dev_unlock(hdev);
3382 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3383 void *data, u16 len)
3385 struct mgmt_cp_set_phy_confguration *cp = data;
3386 struct hci_cp_le_set_default_phy cp_phy;
3387 struct mgmt_pending_cmd *cmd;
3388 struct hci_request req;
3389 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3390 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3391 bool changed = false;
3394 BT_DBG("sock %p %s", sk, hdev->name);
3396 configurable_phys = get_configurable_phys(hdev);
3397 supported_phys = get_supported_phys(hdev);
3398 selected_phys = __le32_to_cpu(cp->selected_phys);
3400 if (selected_phys & ~supported_phys)
3401 return mgmt_cmd_status(sk, hdev->id,
3402 MGMT_OP_SET_PHY_CONFIGURATION,
3403 MGMT_STATUS_INVALID_PARAMS);
3405 unconfigure_phys = supported_phys & ~configurable_phys;
3407 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3408 return mgmt_cmd_status(sk, hdev->id,
3409 MGMT_OP_SET_PHY_CONFIGURATION,
3410 MGMT_STATUS_INVALID_PARAMS);
3412 if (selected_phys == get_selected_phys(hdev))
3413 return mgmt_cmd_complete(sk, hdev->id,
3414 MGMT_OP_SET_PHY_CONFIGURATION,
3419 if (!hdev_is_powered(hdev)) {
3420 err = mgmt_cmd_status(sk, hdev->id,
3421 MGMT_OP_SET_PHY_CONFIGURATION,
3422 MGMT_STATUS_REJECTED);
3426 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3427 err = mgmt_cmd_status(sk, hdev->id,
3428 MGMT_OP_SET_PHY_CONFIGURATION,
3433 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3434 pkt_type |= (HCI_DH3 | HCI_DM3);
3436 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3438 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3439 pkt_type |= (HCI_DH5 | HCI_DM5);
3441 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3443 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3444 pkt_type &= ~HCI_2DH1;
3446 pkt_type |= HCI_2DH1;
3448 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3449 pkt_type &= ~HCI_2DH3;
3451 pkt_type |= HCI_2DH3;
3453 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3454 pkt_type &= ~HCI_2DH5;
3456 pkt_type |= HCI_2DH5;
3458 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3459 pkt_type &= ~HCI_3DH1;
3461 pkt_type |= HCI_3DH1;
3463 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3464 pkt_type &= ~HCI_3DH3;
3466 pkt_type |= HCI_3DH3;
3468 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3469 pkt_type &= ~HCI_3DH5;
3471 pkt_type |= HCI_3DH5;
3473 if (pkt_type != hdev->pkt_type) {
3474 hdev->pkt_type = pkt_type;
3478 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3479 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3481 mgmt_phy_configuration_changed(hdev, sk);
3483 err = mgmt_cmd_complete(sk, hdev->id,
3484 MGMT_OP_SET_PHY_CONFIGURATION,
3490 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3497 hci_req_init(&req, hdev);
3499 memset(&cp_phy, 0, sizeof(cp_phy));
3501 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3502 cp_phy.all_phys |= 0x01;
3504 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3505 cp_phy.all_phys |= 0x02;
3507 if (selected_phys & MGMT_PHY_LE_1M_TX)
3508 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3510 if (selected_phys & MGMT_PHY_LE_2M_TX)
3511 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3513 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3514 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3516 if (selected_phys & MGMT_PHY_LE_1M_RX)
3517 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3519 if (selected_phys & MGMT_PHY_LE_2M_RX)
3520 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3522 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3523 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3525 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3527 err = hci_req_run_skb(&req, set_default_phy_complete);
3529 mgmt_pending_remove(cmd);
3532 hci_dev_unlock(hdev);
3537 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3538 u16 opcode, struct sk_buff *skb)
3540 struct mgmt_rp_read_local_oob_data mgmt_rp;
3541 size_t rp_size = sizeof(mgmt_rp);
3542 struct mgmt_pending_cmd *cmd;
3544 BT_DBG("%s status %u", hdev->name, status);
3546 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3550 if (status || !skb) {
3551 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3552 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3556 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3558 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3559 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3561 if (skb->len < sizeof(*rp)) {
3562 mgmt_cmd_status(cmd->sk, hdev->id,
3563 MGMT_OP_READ_LOCAL_OOB_DATA,
3564 MGMT_STATUS_FAILED);
3568 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3569 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3571 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3573 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3575 if (skb->len < sizeof(*rp)) {
3576 mgmt_cmd_status(cmd->sk, hdev->id,
3577 MGMT_OP_READ_LOCAL_OOB_DATA,
3578 MGMT_STATUS_FAILED);
3582 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3583 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3585 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3586 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3589 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3590 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3593 mgmt_pending_remove(cmd);
3596 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3597 void *data, u16 data_len)
3599 struct mgmt_pending_cmd *cmd;
3600 struct hci_request req;
3603 BT_DBG("%s", hdev->name);
3607 if (!hdev_is_powered(hdev)) {
3608 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3609 MGMT_STATUS_NOT_POWERED);
3613 if (!lmp_ssp_capable(hdev)) {
3614 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3615 MGMT_STATUS_NOT_SUPPORTED);
3619 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3620 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3625 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3631 hci_req_init(&req, hdev);
3633 if (bredr_sc_enabled(hdev))
3634 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3636 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3638 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3640 mgmt_pending_remove(cmd);
3643 hci_dev_unlock(hdev);
3647 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3648 void *data, u16 len)
3650 struct mgmt_addr_info *addr = data;
3653 BT_DBG("%s ", hdev->name);
3655 if (!bdaddr_type_is_valid(addr->type))
3656 return mgmt_cmd_complete(sk, hdev->id,
3657 MGMT_OP_ADD_REMOTE_OOB_DATA,
3658 MGMT_STATUS_INVALID_PARAMS,
3659 addr, sizeof(*addr));
3663 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3664 struct mgmt_cp_add_remote_oob_data *cp = data;
3667 if (cp->addr.type != BDADDR_BREDR) {
3668 err = mgmt_cmd_complete(sk, hdev->id,
3669 MGMT_OP_ADD_REMOTE_OOB_DATA,
3670 MGMT_STATUS_INVALID_PARAMS,
3671 &cp->addr, sizeof(cp->addr));
3675 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3676 cp->addr.type, cp->hash,
3677 cp->rand, NULL, NULL);
3679 status = MGMT_STATUS_FAILED;
3681 status = MGMT_STATUS_SUCCESS;
3683 err = mgmt_cmd_complete(sk, hdev->id,
3684 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3685 &cp->addr, sizeof(cp->addr));
3686 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3687 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3688 u8 *rand192, *hash192, *rand256, *hash256;
3691 if (bdaddr_type_is_le(cp->addr.type)) {
3692 /* Enforce zero-valued 192-bit parameters as
3693 * long as legacy SMP OOB isn't implemented.
3695 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3696 memcmp(cp->hash192, ZERO_KEY, 16)) {
3697 err = mgmt_cmd_complete(sk, hdev->id,
3698 MGMT_OP_ADD_REMOTE_OOB_DATA,
3699 MGMT_STATUS_INVALID_PARAMS,
3700 addr, sizeof(*addr));
3707 /* In case one of the P-192 values is set to zero,
3708 * then just disable OOB data for P-192.
3710 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3711 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3715 rand192 = cp->rand192;
3716 hash192 = cp->hash192;
3720 /* In case one of the P-256 values is set to zero, then just
3721 * disable OOB data for P-256.
3723 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3724 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3728 rand256 = cp->rand256;
3729 hash256 = cp->hash256;
3732 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3733 cp->addr.type, hash192, rand192,
3736 status = MGMT_STATUS_FAILED;
3738 status = MGMT_STATUS_SUCCESS;
3740 err = mgmt_cmd_complete(sk, hdev->id,
3741 MGMT_OP_ADD_REMOTE_OOB_DATA,
3742 status, &cp->addr, sizeof(cp->addr));
3744 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3746 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3747 MGMT_STATUS_INVALID_PARAMS);
3751 hci_dev_unlock(hdev);
3755 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3756 void *data, u16 len)
3758 struct mgmt_cp_remove_remote_oob_data *cp = data;
3762 BT_DBG("%s", hdev->name);
3764 if (cp->addr.type != BDADDR_BREDR)
3765 return mgmt_cmd_complete(sk, hdev->id,
3766 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3767 MGMT_STATUS_INVALID_PARAMS,
3768 &cp->addr, sizeof(cp->addr));
3772 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3773 hci_remote_oob_data_clear(hdev);
3774 status = MGMT_STATUS_SUCCESS;
3778 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3780 status = MGMT_STATUS_INVALID_PARAMS;
3782 status = MGMT_STATUS_SUCCESS;
3785 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3786 status, &cp->addr, sizeof(cp->addr));
3788 hci_dev_unlock(hdev);
3792 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3794 struct mgmt_pending_cmd *cmd;
3796 BT_DBG("status %d", status);
3800 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3802 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3805 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3808 cmd->cmd_complete(cmd, mgmt_status(status));
3809 mgmt_pending_remove(cmd);
3812 hci_dev_unlock(hdev);
3815 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3816 uint8_t *mgmt_status)
3819 case DISCOV_TYPE_LE:
3820 *mgmt_status = mgmt_le_support(hdev);
3824 case DISCOV_TYPE_INTERLEAVED:
3825 *mgmt_status = mgmt_le_support(hdev);
3828 /* Intentional fall-through */
3829 case DISCOV_TYPE_BREDR:
3830 *mgmt_status = mgmt_bredr_support(hdev);
3835 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3842 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3843 u16 op, void *data, u16 len)
3845 struct mgmt_cp_start_discovery *cp = data;
3846 struct mgmt_pending_cmd *cmd;
3850 BT_DBG("%s", hdev->name);
3854 if (!hdev_is_powered(hdev)) {
3855 err = mgmt_cmd_complete(sk, hdev->id, op,
3856 MGMT_STATUS_NOT_POWERED,
3857 &cp->type, sizeof(cp->type));
3861 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3862 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3863 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3864 &cp->type, sizeof(cp->type));
3868 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3869 err = mgmt_cmd_complete(sk, hdev->id, op, status,
3870 &cp->type, sizeof(cp->type));
3874 /* Clear the discovery filter first to free any previously
3875 * allocated memory for the UUID list.
3877 hci_discovery_filter_clear(hdev);
3879 hdev->discovery.type = cp->type;
3880 hdev->discovery.report_invalid_rssi = false;
3881 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3882 hdev->discovery.limited = true;
3884 hdev->discovery.limited = false;
3886 cmd = mgmt_pending_add(sk, op, hdev, data, len);
3892 cmd->cmd_complete = generic_cmd_complete;
3894 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3895 queue_work(hdev->req_workqueue, &hdev->discov_update);
3899 hci_dev_unlock(hdev);
3903 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3904 void *data, u16 len)
3906 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3910 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3911 void *data, u16 len)
3913 return start_discovery_internal(sk, hdev,
3914 MGMT_OP_START_LIMITED_DISCOVERY,
3918 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3921 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3925 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3926 void *data, u16 len)
3928 struct mgmt_cp_start_service_discovery *cp = data;
3929 struct mgmt_pending_cmd *cmd;
3930 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3931 u16 uuid_count, expected_len;
3935 BT_DBG("%s", hdev->name);
3939 if (!hdev_is_powered(hdev)) {
3940 err = mgmt_cmd_complete(sk, hdev->id,
3941 MGMT_OP_START_SERVICE_DISCOVERY,
3942 MGMT_STATUS_NOT_POWERED,
3943 &cp->type, sizeof(cp->type));
3947 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3948 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3949 err = mgmt_cmd_complete(sk, hdev->id,
3950 MGMT_OP_START_SERVICE_DISCOVERY,
3951 MGMT_STATUS_BUSY, &cp->type,
3956 uuid_count = __le16_to_cpu(cp->uuid_count);
3957 if (uuid_count > max_uuid_count) {
3958 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
3960 err = mgmt_cmd_complete(sk, hdev->id,
3961 MGMT_OP_START_SERVICE_DISCOVERY,
3962 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3967 expected_len = sizeof(*cp) + uuid_count * 16;
3968 if (expected_len != len) {
3969 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
3971 err = mgmt_cmd_complete(sk, hdev->id,
3972 MGMT_OP_START_SERVICE_DISCOVERY,
3973 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3978 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3979 err = mgmt_cmd_complete(sk, hdev->id,
3980 MGMT_OP_START_SERVICE_DISCOVERY,
3981 status, &cp->type, sizeof(cp->type));
3985 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3992 cmd->cmd_complete = service_discovery_cmd_complete;
3994 /* Clear the discovery filter first to free any previously
3995 * allocated memory for the UUID list.
3997 hci_discovery_filter_clear(hdev);
3999 hdev->discovery.result_filtering = true;
4000 hdev->discovery.type = cp->type;
4001 hdev->discovery.rssi = cp->rssi;
4002 hdev->discovery.uuid_count = uuid_count;
4004 if (uuid_count > 0) {
4005 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4007 if (!hdev->discovery.uuids) {
4008 err = mgmt_cmd_complete(sk, hdev->id,
4009 MGMT_OP_START_SERVICE_DISCOVERY,
4011 &cp->type, sizeof(cp->type));
4012 mgmt_pending_remove(cmd);
4017 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4018 queue_work(hdev->req_workqueue, &hdev->discov_update);
4022 hci_dev_unlock(hdev);
4026 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4028 struct mgmt_pending_cmd *cmd;
4030 BT_DBG("status %d", status);
4034 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4036 cmd->cmd_complete(cmd, mgmt_status(status));
4037 mgmt_pending_remove(cmd);
4040 hci_dev_unlock(hdev);
4043 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4046 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4047 struct mgmt_pending_cmd *cmd;
4050 BT_DBG("%s", hdev->name);
4054 if (!hci_discovery_active(hdev)) {
4055 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4056 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4057 sizeof(mgmt_cp->type));
4061 if (hdev->discovery.type != mgmt_cp->type) {
4062 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4063 MGMT_STATUS_INVALID_PARAMS,
4064 &mgmt_cp->type, sizeof(mgmt_cp->type));
4068 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4074 cmd->cmd_complete = generic_cmd_complete;
4076 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4077 queue_work(hdev->req_workqueue, &hdev->discov_update);
4081 hci_dev_unlock(hdev);
4085 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4088 struct mgmt_cp_confirm_name *cp = data;
4089 struct inquiry_entry *e;
4092 BT_DBG("%s", hdev->name);
4096 if (!hci_discovery_active(hdev)) {
4097 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4098 MGMT_STATUS_FAILED, &cp->addr,
4103 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4105 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4106 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4111 if (cp->name_known) {
4112 e->name_state = NAME_KNOWN;
4115 e->name_state = NAME_NEEDED;
4116 hci_inquiry_cache_update_resolve(hdev, e);
4119 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4120 &cp->addr, sizeof(cp->addr));
4123 hci_dev_unlock(hdev);
4127 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4130 struct mgmt_cp_block_device *cp = data;
4134 BT_DBG("%s", hdev->name);
4136 if (!bdaddr_type_is_valid(cp->addr.type))
4137 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4138 MGMT_STATUS_INVALID_PARAMS,
4139 &cp->addr, sizeof(cp->addr));
4143 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4146 status = MGMT_STATUS_FAILED;
4150 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4152 status = MGMT_STATUS_SUCCESS;
4155 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4156 &cp->addr, sizeof(cp->addr));
4158 hci_dev_unlock(hdev);
4163 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4166 struct mgmt_cp_unblock_device *cp = data;
4170 BT_DBG("%s", hdev->name);
4172 if (!bdaddr_type_is_valid(cp->addr.type))
4173 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4174 MGMT_STATUS_INVALID_PARAMS,
4175 &cp->addr, sizeof(cp->addr));
4179 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4182 status = MGMT_STATUS_INVALID_PARAMS;
4186 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4188 status = MGMT_STATUS_SUCCESS;
4191 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4192 &cp->addr, sizeof(cp->addr));
4194 hci_dev_unlock(hdev);
4199 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4202 struct mgmt_cp_set_device_id *cp = data;
4203 struct hci_request req;
4207 BT_DBG("%s", hdev->name);
4209 source = __le16_to_cpu(cp->source);
4211 if (source > 0x0002)
4212 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4213 MGMT_STATUS_INVALID_PARAMS);
4217 hdev->devid_source = source;
4218 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4219 hdev->devid_product = __le16_to_cpu(cp->product);
4220 hdev->devid_version = __le16_to_cpu(cp->version);
4222 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4225 hci_req_init(&req, hdev);
4226 __hci_req_update_eir(&req);
4227 hci_req_run(&req, NULL);
4229 hci_dev_unlock(hdev);
4234 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4237 BT_DBG("status %d", status);
4240 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4243 struct cmd_lookup match = { NULL, hdev };
4244 struct hci_request req;
4246 struct adv_info *adv_instance;
4252 u8 mgmt_err = mgmt_status(status);
4254 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4255 cmd_status_rsp, &mgmt_err);
4259 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4260 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4262 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4264 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4267 new_settings(hdev, match.sk);
4272 /* If "Set Advertising" was just disabled and instance advertising was
4273 * set up earlier, then re-enable multi-instance advertising.
4275 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4276 list_empty(&hdev->adv_instances))
4279 instance = hdev->cur_adv_instance;
4281 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4282 struct adv_info, list);
4286 instance = adv_instance->instance;
4289 hci_req_init(&req, hdev);
4291 err = __hci_req_schedule_adv_instance(&req, instance, true);
4294 err = hci_req_run(&req, enable_advertising_instance);
4297 bt_dev_err(hdev, "failed to re-configure advertising");
4300 hci_dev_unlock(hdev);
4303 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4306 struct mgmt_mode *cp = data;
4307 struct mgmt_pending_cmd *cmd;
4308 struct hci_request req;
4312 BT_DBG("request for %s", hdev->name);
4314 status = mgmt_le_support(hdev);
4316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4319 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4320 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4321 MGMT_STATUS_INVALID_PARAMS);
4327 /* The following conditions are ones which mean that we should
4328 * not do any HCI communication but directly send a mgmt
4329 * response to user space (after toggling the flag if
4332 if (!hdev_is_powered(hdev) ||
4333 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4334 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4335 hci_conn_num(hdev, LE_LINK) > 0 ||
4336 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4337 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4341 hdev->cur_adv_instance = 0x00;
4342 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4343 if (cp->val == 0x02)
4344 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4346 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4348 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4349 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4352 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4357 err = new_settings(hdev, sk);
4362 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4363 pending_find(MGMT_OP_SET_LE, hdev)) {
4364 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4369 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4375 hci_req_init(&req, hdev);
4377 if (cp->val == 0x02)
4378 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4380 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4382 cancel_adv_timeout(hdev);
4385 /* Switch to instance "0" for the Set Advertising setting.
4386 * We cannot use update_[adv|scan_rsp]_data() here as the
4387 * HCI_ADVERTISING flag is not yet set.
4389 hdev->cur_adv_instance = 0x00;
4391 if (ext_adv_capable(hdev)) {
4392 __hci_req_start_ext_adv(&req, 0x00);
4394 __hci_req_update_adv_data(&req, 0x00);
4395 __hci_req_update_scan_rsp_data(&req, 0x00);
4396 __hci_req_enable_advertising(&req);
4399 __hci_req_disable_advertising(&req);
4402 err = hci_req_run(&req, set_advertising_complete);
4404 mgmt_pending_remove(cmd);
4407 hci_dev_unlock(hdev);
4411 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4412 void *data, u16 len)
4414 struct mgmt_cp_set_static_address *cp = data;
4417 BT_DBG("%s", hdev->name);
4419 if (!lmp_le_capable(hdev))
4420 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4421 MGMT_STATUS_NOT_SUPPORTED);
4423 if (hdev_is_powered(hdev))
4424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4425 MGMT_STATUS_REJECTED);
4427 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4428 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4429 return mgmt_cmd_status(sk, hdev->id,
4430 MGMT_OP_SET_STATIC_ADDRESS,
4431 MGMT_STATUS_INVALID_PARAMS);
4433 /* Two most significant bits shall be set */
4434 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4435 return mgmt_cmd_status(sk, hdev->id,
4436 MGMT_OP_SET_STATIC_ADDRESS,
4437 MGMT_STATUS_INVALID_PARAMS);
4442 bacpy(&hdev->static_addr, &cp->bdaddr);
4444 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4448 err = new_settings(hdev, sk);
4451 hci_dev_unlock(hdev);
4455 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4456 void *data, u16 len)
4458 struct mgmt_cp_set_scan_params *cp = data;
4459 __u16 interval, window;
4462 BT_DBG("%s", hdev->name);
4464 if (!lmp_le_capable(hdev))
4465 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4466 MGMT_STATUS_NOT_SUPPORTED);
4468 interval = __le16_to_cpu(cp->interval);
4470 if (interval < 0x0004 || interval > 0x4000)
4471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4472 MGMT_STATUS_INVALID_PARAMS);
4474 window = __le16_to_cpu(cp->window);
4476 if (window < 0x0004 || window > 0x4000)
4477 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4478 MGMT_STATUS_INVALID_PARAMS);
4480 if (window > interval)
4481 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4482 MGMT_STATUS_INVALID_PARAMS);
4486 hdev->le_scan_interval = interval;
4487 hdev->le_scan_window = window;
4489 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4492 /* If background scan is running, restart it so new parameters are
4495 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4496 hdev->discovery.state == DISCOVERY_STOPPED) {
4497 struct hci_request req;
4499 hci_req_init(&req, hdev);
4501 hci_req_add_le_scan_disable(&req);
4502 hci_req_add_le_passive_scan(&req);
4504 hci_req_run(&req, NULL);
4507 hci_dev_unlock(hdev);
4512 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4515 struct mgmt_pending_cmd *cmd;
4517 BT_DBG("status 0x%02x", status);
4521 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4526 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4527 mgmt_status(status));
4529 struct mgmt_mode *cp = cmd->param;
4532 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4534 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4536 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4537 new_settings(hdev, cmd->sk);
4540 mgmt_pending_remove(cmd);
4543 hci_dev_unlock(hdev);
4546 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4547 void *data, u16 len)
4549 struct mgmt_mode *cp = data;
4550 struct mgmt_pending_cmd *cmd;
4551 struct hci_request req;
4554 BT_DBG("%s", hdev->name);
4556 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4557 hdev->hci_ver < BLUETOOTH_VER_1_2)
4558 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4559 MGMT_STATUS_NOT_SUPPORTED);
4561 if (cp->val != 0x00 && cp->val != 0x01)
4562 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4563 MGMT_STATUS_INVALID_PARAMS);
4567 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4568 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4573 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4574 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4579 if (!hdev_is_powered(hdev)) {
4580 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4581 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4583 new_settings(hdev, sk);
4587 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4594 hci_req_init(&req, hdev);
4596 __hci_req_write_fast_connectable(&req, cp->val);
4598 err = hci_req_run(&req, fast_connectable_complete);
4600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4601 MGMT_STATUS_FAILED);
4602 mgmt_pending_remove(cmd);
4606 hci_dev_unlock(hdev);
4611 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4613 struct mgmt_pending_cmd *cmd;
4615 BT_DBG("status 0x%02x", status);
4619 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4624 u8 mgmt_err = mgmt_status(status);
4626 /* We need to restore the flag if related HCI commands
4629 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4631 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4633 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4634 new_settings(hdev, cmd->sk);
4637 mgmt_pending_remove(cmd);
4640 hci_dev_unlock(hdev);
4643 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4645 struct mgmt_mode *cp = data;
4646 struct mgmt_pending_cmd *cmd;
4647 struct hci_request req;
4650 BT_DBG("request for %s", hdev->name);
4652 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4653 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4654 MGMT_STATUS_NOT_SUPPORTED);
4656 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4657 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4658 MGMT_STATUS_REJECTED);
4660 if (cp->val != 0x00 && cp->val != 0x01)
4661 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4662 MGMT_STATUS_INVALID_PARAMS);
4666 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4667 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4671 if (!hdev_is_powered(hdev)) {
4673 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4674 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4675 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4676 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4677 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4680 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4682 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4686 err = new_settings(hdev, sk);
4690 /* Reject disabling when powered on */
4692 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4693 MGMT_STATUS_REJECTED);
4696 /* When configuring a dual-mode controller to operate
4697 * with LE only and using a static address, then switching
4698 * BR/EDR back on is not allowed.
4700 * Dual-mode controllers shall operate with the public
4701 * address as its identity address for BR/EDR and LE. So
4702 * reject the attempt to create an invalid configuration.
4704 * The same restrictions applies when secure connections
4705 * has been enabled. For BR/EDR this is a controller feature
4706 * while for LE it is a host stack feature. This means that
4707 * switching BR/EDR back on when secure connections has been
4708 * enabled is not a supported transaction.
4710 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4711 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4712 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4713 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4714 MGMT_STATUS_REJECTED);
4719 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4720 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4725 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4731 /* We need to flip the bit already here so that
4732 * hci_req_update_adv_data generates the correct flags.
4734 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4736 hci_req_init(&req, hdev);
4738 __hci_req_write_fast_connectable(&req, false);
4739 __hci_req_update_scan(&req);
4741 /* Since only the advertising data flags will change, there
4742 * is no need to update the scan response data.
4744 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4746 err = hci_req_run(&req, set_bredr_complete);
4748 mgmt_pending_remove(cmd);
4751 hci_dev_unlock(hdev);
4755 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4757 struct mgmt_pending_cmd *cmd;
4758 struct mgmt_mode *cp;
4760 BT_DBG("%s status %u", hdev->name, status);
4764 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4769 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4770 mgmt_status(status));
4778 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4779 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4782 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4783 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4786 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4787 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4791 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4792 new_settings(hdev, cmd->sk);
4795 mgmt_pending_remove(cmd);
4797 hci_dev_unlock(hdev);
4800 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4801 void *data, u16 len)
4803 struct mgmt_mode *cp = data;
4804 struct mgmt_pending_cmd *cmd;
4805 struct hci_request req;
4809 BT_DBG("request for %s", hdev->name);
4811 if (!lmp_sc_capable(hdev) &&
4812 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4814 MGMT_STATUS_NOT_SUPPORTED);
4816 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4817 lmp_sc_capable(hdev) &&
4818 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4819 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4820 MGMT_STATUS_REJECTED);
4822 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4823 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4824 MGMT_STATUS_INVALID_PARAMS);
4828 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4829 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4833 changed = !hci_dev_test_and_set_flag(hdev,
4835 if (cp->val == 0x02)
4836 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4838 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4840 changed = hci_dev_test_and_clear_flag(hdev,
4842 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4845 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4850 err = new_settings(hdev, sk);
4855 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4863 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4864 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4865 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4869 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4875 hci_req_init(&req, hdev);
4876 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4877 err = hci_req_run(&req, sc_enable_complete);
4879 mgmt_pending_remove(cmd);
4884 hci_dev_unlock(hdev);
4888 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4889 void *data, u16 len)
4891 struct mgmt_mode *cp = data;
4892 bool changed, use_changed;
4895 BT_DBG("request for %s", hdev->name);
4897 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4898 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4899 MGMT_STATUS_INVALID_PARAMS);
4904 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4906 changed = hci_dev_test_and_clear_flag(hdev,
4907 HCI_KEEP_DEBUG_KEYS);
4909 if (cp->val == 0x02)
4910 use_changed = !hci_dev_test_and_set_flag(hdev,
4911 HCI_USE_DEBUG_KEYS);
4913 use_changed = hci_dev_test_and_clear_flag(hdev,
4914 HCI_USE_DEBUG_KEYS);
4916 if (hdev_is_powered(hdev) && use_changed &&
4917 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4918 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4919 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4920 sizeof(mode), &mode);
4923 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4928 err = new_settings(hdev, sk);
4931 hci_dev_unlock(hdev);
4935 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4938 struct mgmt_cp_set_privacy *cp = cp_data;
4942 BT_DBG("request for %s", hdev->name);
4944 if (!lmp_le_capable(hdev))
4945 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4946 MGMT_STATUS_NOT_SUPPORTED);
4948 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4949 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4950 MGMT_STATUS_INVALID_PARAMS);
4952 if (hdev_is_powered(hdev))
4953 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4954 MGMT_STATUS_REJECTED);
4958 /* If user space supports this command it is also expected to
4959 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4961 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4964 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4965 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4966 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4967 hci_adv_instances_set_rpa_expired(hdev, true);
4968 if (cp->privacy == 0x02)
4969 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4971 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4973 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4974 memset(hdev->irk, 0, sizeof(hdev->irk));
4975 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4976 hci_adv_instances_set_rpa_expired(hdev, false);
4977 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4980 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4985 err = new_settings(hdev, sk);
4988 hci_dev_unlock(hdev);
4992 static bool irk_is_valid(struct mgmt_irk_info *irk)
4994 switch (irk->addr.type) {
4995 case BDADDR_LE_PUBLIC:
4998 case BDADDR_LE_RANDOM:
4999 /* Two most significant bits shall be set */
5000 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5008 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5011 struct mgmt_cp_load_irks *cp = cp_data;
5012 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5013 sizeof(struct mgmt_irk_info));
5014 u16 irk_count, expected_len;
5017 BT_DBG("request for %s", hdev->name);
5019 if (!lmp_le_capable(hdev))
5020 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5021 MGMT_STATUS_NOT_SUPPORTED);
5023 irk_count = __le16_to_cpu(cp->irk_count);
5024 if (irk_count > max_irk_count) {
5025 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5027 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5028 MGMT_STATUS_INVALID_PARAMS);
5031 expected_len = struct_size(cp, irks, irk_count);
5032 if (expected_len != len) {
5033 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5035 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5036 MGMT_STATUS_INVALID_PARAMS);
5039 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5041 for (i = 0; i < irk_count; i++) {
5042 struct mgmt_irk_info *key = &cp->irks[i];
5044 if (!irk_is_valid(key))
5045 return mgmt_cmd_status(sk, hdev->id,
5047 MGMT_STATUS_INVALID_PARAMS);
5052 hci_smp_irks_clear(hdev);
5054 for (i = 0; i < irk_count; i++) {
5055 struct mgmt_irk_info *irk = &cp->irks[i];
5057 hci_add_irk(hdev, &irk->addr.bdaddr,
5058 le_addr_type(irk->addr.type), irk->val,
5062 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5064 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5066 hci_dev_unlock(hdev);
5072 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
5073 void *data, u16 len)
5075 struct mgmt_cp_set_advertising_params *cp = data;
5080 BT_DBG("%s", hdev->name);
5082 if (!lmp_le_capable(hdev))
5083 return mgmt_cmd_status(sk, hdev->id,
5084 MGMT_OP_SET_ADVERTISING_PARAMS,
5085 MGMT_STATUS_NOT_SUPPORTED);
5087 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
5088 return mgmt_cmd_status(sk, hdev->id,
5089 MGMT_OP_SET_ADVERTISING_PARAMS,
5092 min_interval = __le16_to_cpu(cp->interval_min);
5093 max_interval = __le16_to_cpu(cp->interval_max);
5095 if (min_interval > max_interval ||
5096 min_interval < 0x0020 || max_interval > 0x4000)
5097 return mgmt_cmd_status(sk, hdev->id,
5098 MGMT_OP_SET_ADVERTISING_PARAMS,
5099 MGMT_STATUS_INVALID_PARAMS);
5103 hdev->le_adv_min_interval = min_interval;
5104 hdev->le_adv_max_interval = max_interval;
5105 hdev->adv_filter_policy = cp->filter_policy;
5106 hdev->adv_type = cp->type;
5108 err = mgmt_cmd_complete(sk, hdev->id,
5109 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
5111 hci_dev_unlock(hdev);
5116 static void set_advertising_data_complete(struct hci_dev *hdev,
5117 u8 status, u16 opcode)
5119 struct mgmt_cp_set_advertising_data *cp;
5120 struct mgmt_pending_cmd *cmd;
5122 BT_DBG("status 0x%02x", status);
5126 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
5133 mgmt_cmd_status(cmd->sk, hdev->id,
5134 MGMT_OP_SET_ADVERTISING_DATA,
5135 mgmt_status(status));
5137 mgmt_cmd_complete(cmd->sk, hdev->id,
5138 MGMT_OP_SET_ADVERTISING_DATA, 0,
5141 mgmt_pending_remove(cmd);
5144 hci_dev_unlock(hdev);
5147 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
5148 void *data, u16 len)
5150 struct mgmt_pending_cmd *cmd;
5151 struct hci_request req;
5152 struct mgmt_cp_set_advertising_data *cp = data;
5153 struct hci_cp_le_set_adv_data adv;
5156 BT_DBG("%s", hdev->name);
5158 if (!lmp_le_capable(hdev)) {
5159 return mgmt_cmd_status(sk, hdev->id,
5160 MGMT_OP_SET_ADVERTISING_DATA,
5161 MGMT_STATUS_NOT_SUPPORTED);
5166 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
5167 err = mgmt_cmd_status(sk, hdev->id,
5168 MGMT_OP_SET_ADVERTISING_DATA,
5173 if (len > HCI_MAX_AD_LENGTH) {
5174 err = mgmt_cmd_status(sk, hdev->id,
5175 MGMT_OP_SET_ADVERTISING_DATA,
5176 MGMT_STATUS_INVALID_PARAMS);
5180 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
5187 hci_req_init(&req, hdev);
5189 memset(&adv, 0, sizeof(adv));
5190 memcpy(adv.data, cp->data, len);
5193 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
5195 err = hci_req_run(&req, set_advertising_data_complete);
5197 mgmt_pending_remove(cmd);
5200 hci_dev_unlock(hdev);
5205 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
5208 struct mgmt_cp_set_scan_rsp_data *cp;
5209 struct mgmt_pending_cmd *cmd;
5211 BT_DBG("status 0x%02x", status);
5215 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
5222 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
5223 mgmt_status(status));
5225 mgmt_cmd_complete(cmd->sk, hdev->id,
5226 MGMT_OP_SET_SCAN_RSP_DATA, 0,
5229 mgmt_pending_remove(cmd);
5232 hci_dev_unlock(hdev);
5235 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
5238 struct mgmt_pending_cmd *cmd;
5239 struct hci_request req;
5240 struct mgmt_cp_set_scan_rsp_data *cp = data;
5241 struct hci_cp_le_set_scan_rsp_data rsp;
5244 BT_DBG("%s", hdev->name);
5246 if (!lmp_le_capable(hdev))
5247 return mgmt_cmd_status(sk, hdev->id,
5248 MGMT_OP_SET_SCAN_RSP_DATA,
5249 MGMT_STATUS_NOT_SUPPORTED);
5253 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
5254 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
5259 if (len > HCI_MAX_AD_LENGTH) {
5260 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
5261 MGMT_STATUS_INVALID_PARAMS);
5265 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
5271 hci_req_init(&req, hdev);
5273 memset(&rsp, 0, sizeof(rsp));
5274 memcpy(rsp.data, cp->data, len);
5277 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
5279 err = hci_req_run(&req, set_scan_rsp_data_complete);
5281 mgmt_pending_remove(cmd);
5284 hci_dev_unlock(hdev);
5289 /* Adv White List feature */
5290 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5292 struct mgmt_cp_add_dev_white_list *cp;
5293 struct mgmt_pending_cmd *cmd;
5295 BT_DBG("status 0x%02x", status);
5299 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
5306 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
5307 mgmt_status(status));
5309 mgmt_cmd_complete(cmd->sk, hdev->id,
5310 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
5312 mgmt_pending_remove(cmd);
5315 hci_dev_unlock(hdev);
5318 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
5319 void *data, u16 len)
5321 struct mgmt_pending_cmd *cmd;
5322 struct mgmt_cp_add_dev_white_list *cp = data;
5323 struct hci_request req;
5326 BT_DBG("%s", hdev->name);
5328 if (!lmp_le_capable(hdev))
5329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
5330 MGMT_STATUS_NOT_SUPPORTED);
5332 if (!hdev_is_powered(hdev))
5333 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
5334 MGMT_STATUS_REJECTED);
5338 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
5339 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
5344 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
5350 hci_req_init(&req, hdev);
5352 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
5354 err = hci_req_run(&req, add_white_list_complete);
5356 mgmt_pending_remove(cmd);
5361 hci_dev_unlock(hdev);
5366 static void remove_from_white_list_complete(struct hci_dev *hdev,
5367 u8 status, u16 opcode)
5369 struct mgmt_cp_remove_dev_from_white_list *cp;
5370 struct mgmt_pending_cmd *cmd;
5372 BT_DBG("status 0x%02x", status);
5376 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
5383 mgmt_cmd_status(cmd->sk, hdev->id,
5384 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5385 mgmt_status(status));
5387 mgmt_cmd_complete(cmd->sk, hdev->id,
5388 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
5391 mgmt_pending_remove(cmd);
5394 hci_dev_unlock(hdev);
5397 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
5398 void *data, u16 len)
5400 struct mgmt_pending_cmd *cmd;
5401 struct mgmt_cp_remove_dev_from_white_list *cp = data;
5402 struct hci_request req;
5405 BT_DBG("%s", hdev->name);
5407 if (!lmp_le_capable(hdev))
5408 return mgmt_cmd_status(sk, hdev->id,
5409 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5410 MGMT_STATUS_NOT_SUPPORTED);
5412 if (!hdev_is_powered(hdev))
5413 return mgmt_cmd_status(sk, hdev->id,
5414 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5415 MGMT_STATUS_REJECTED);
5419 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
5420 err = mgmt_cmd_status(sk, hdev->id,
5421 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5426 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5433 hci_req_init(&req, hdev);
5435 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
5437 err = hci_req_run(&req, remove_from_white_list_complete);
5439 mgmt_pending_remove(cmd);
5444 hci_dev_unlock(hdev);
5449 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
5452 struct mgmt_pending_cmd *cmd;
5454 BT_DBG("status 0x%02x", status);
5458 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
5463 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
5464 mgmt_status(status));
5466 mgmt_cmd_complete(cmd->sk, hdev->id,
5467 MGMT_OP_CLEAR_DEV_WHITE_LIST,
5470 mgmt_pending_remove(cmd);
5473 hci_dev_unlock(hdev);
5476 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
5477 void *data, u16 len)
5479 struct mgmt_pending_cmd *cmd;
5480 struct hci_request req;
5483 BT_DBG("%s", hdev->name);
5485 if (!lmp_le_capable(hdev))
5486 return mgmt_cmd_status(sk, hdev->id,
5487 MGMT_OP_CLEAR_DEV_WHITE_LIST,
5488 MGMT_STATUS_NOT_SUPPORTED);
5490 if (!hdev_is_powered(hdev))
5491 return mgmt_cmd_status(sk, hdev->id,
5492 MGMT_OP_CLEAR_DEV_WHITE_LIST,
5493 MGMT_STATUS_REJECTED);
5497 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
5498 err = mgmt_cmd_status(sk, hdev->id,
5499 MGMT_OP_CLEAR_DEV_WHITE_LIST,
5504 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
5511 hci_req_init(&req, hdev);
5513 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
5515 err = hci_req_run(&req, clear_white_list_complete);
5517 mgmt_pending_remove(cmd);
5522 hci_dev_unlock(hdev);
5527 static void set_rssi_threshold_complete(struct hci_dev *hdev,
5528 u8 status, u16 opcode)
5530 struct mgmt_pending_cmd *cmd;
5532 BT_DBG("status 0x%02x", status);
5536 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5541 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5542 mgmt_status(status));
5544 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
5547 mgmt_pending_remove(cmd);
5550 hci_dev_unlock(hdev);
5553 static void set_rssi_disable_complete(struct hci_dev *hdev,
5554 u8 status, u16 opcode)
5556 struct mgmt_pending_cmd *cmd;
5558 BT_DBG("status 0x%02x", status);
5562 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
5567 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5568 mgmt_status(status));
5570 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5573 mgmt_pending_remove(cmd);
5576 hci_dev_unlock(hdev);
5579 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
5580 void *data, u16 len)
5583 struct hci_cp_set_rssi_threshold th = { 0, };
5584 struct mgmt_cp_set_enable_rssi *cp = data;
5585 struct hci_conn *conn;
5586 struct mgmt_pending_cmd *cmd;
5587 struct hci_request req;
5592 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5594 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5595 MGMT_STATUS_FAILED);
5599 if (!lmp_le_capable(hdev)) {
5600 mgmt_pending_remove(cmd);
5601 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5602 MGMT_STATUS_NOT_SUPPORTED);
5606 if (!hdev_is_powered(hdev)) {
5607 BT_DBG("%s", hdev->name);
5608 mgmt_pending_remove(cmd);
5609 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5610 MGMT_STATUS_NOT_POWERED);
5614 if (cp->link_type == 0x01)
5615 dest_type = LE_LINK;
5617 dest_type = ACL_LINK;
5619 /* Get LE/ACL link handle info */
5620 conn = hci_conn_hash_lookup_ba(hdev,
5621 dest_type, &cp->bdaddr);
5624 err = mgmt_cmd_complete(sk, hdev->id,
5625 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
5626 mgmt_pending_remove(cmd);
5630 hci_req_init(&req, hdev);
5632 th.hci_le_ext_opcode = 0x0B;
5634 th.conn_handle = conn->handle;
5635 th.alert_mask = 0x07;
5636 th.low_th = cp->low_th;
5637 th.in_range_th = cp->in_range_th;
5638 th.high_th = cp->high_th;
5640 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
5641 err = hci_req_run(&req, set_rssi_threshold_complete);
5644 mgmt_pending_remove(cmd);
5645 BT_ERR("Error in requesting hci_req_run");
5650 hci_dev_unlock(hdev);
5654 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
5655 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
5657 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
5658 struct mgmt_cp_set_enable_rssi *cp = data;
5659 struct mgmt_pending_cmd *cmd;
5664 mgmt_rp.status = rp->status;
5665 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
5666 mgmt_rp.bt_address = cp->bdaddr;
5667 mgmt_rp.link_type = cp->link_type;
5669 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5670 MGMT_STATUS_SUCCESS, &mgmt_rp,
5671 sizeof(struct mgmt_cc_rsp_enable_rssi));
5673 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
5674 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
5676 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
5677 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
5678 &mgmt_rp.bt_address, true);
5682 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5684 mgmt_pending_remove(cmd);
5686 hci_dev_unlock(hdev);
5689 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
5690 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
5692 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
5693 struct mgmt_cp_disable_rssi *cp = data;
5694 struct mgmt_pending_cmd *cmd;
5699 mgmt_rp.status = rp->status;
5700 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
5701 mgmt_rp.bt_address = cp->bdaddr;
5702 mgmt_rp.link_type = cp->link_type;
5704 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5705 MGMT_STATUS_SUCCESS, &mgmt_rp,
5706 sizeof(struct mgmt_cc_rsp_enable_rssi));
5708 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
5709 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
5711 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
5712 &mgmt_rp.bt_address, false);
5716 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
5718 mgmt_pending_remove(cmd);
5720 hci_dev_unlock(hdev);
5723 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
5724 void *data, u16 len)
5726 struct mgmt_pending_cmd *cmd;
5727 struct hci_request req;
5728 struct hci_cp_set_enable_rssi cp_en = { 0, };
5731 BT_DBG("Set Disable RSSI.");
5733 cp_en.hci_le_ext_opcode = 0x01;
5734 cp_en.le_enable_cs_Features = 0x00;
5735 cp_en.data[0] = 0x00;
5736 cp_en.data[1] = 0x00;
5737 cp_en.data[2] = 0x00;
5741 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
5743 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5744 MGMT_STATUS_FAILED);
5748 if (!lmp_le_capable(hdev)) {
5749 mgmt_pending_remove(cmd);
5750 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5751 MGMT_STATUS_NOT_SUPPORTED);
5755 if (!hdev_is_powered(hdev)) {
5756 BT_DBG("%s", hdev->name);
5757 mgmt_pending_remove(cmd);
5758 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5759 MGMT_STATUS_NOT_POWERED);
5763 hci_req_init(&req, hdev);
5765 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
5766 sizeof(struct hci_cp_set_enable_rssi),
5767 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
5768 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
5770 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
5771 err = hci_req_run(&req, set_rssi_disable_complete);
5774 mgmt_pending_remove(cmd);
5775 BT_ERR("Error in requesting hci_req_run");
5780 hci_dev_unlock(hdev);
5784 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
5786 struct hci_cc_rsp_enable_rssi *rp = response;
5787 struct mgmt_pending_cmd *cmd_enable = NULL;
5788 struct mgmt_pending_cmd *cmd_disable = NULL;
5789 struct mgmt_cp_set_enable_rssi *cp_en;
5790 struct mgmt_cp_disable_rssi *cp_dis;
5793 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5794 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
5795 hci_dev_unlock(hdev);
5798 BT_DBG("Enable Request");
5801 BT_DBG("Disable Request");
5804 cp_en = cmd_enable->param;
5809 switch (rp->le_ext_opcode) {
5811 BT_DBG("RSSI enabled.. Setting Threshold...");
5812 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
5813 cp_en, sizeof(*cp_en));
5817 BT_DBG("Sending RSSI enable success");
5818 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
5819 cp_en, rp, rp->status);
5823 } else if (cmd_disable) {
5824 cp_dis = cmd_disable->param;
5829 switch (rp->le_ext_opcode) {
5831 BT_DBG("Sending RSSI disable success");
5832 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
5833 cp_dis, rp, rp->status);
5838 * Only unset RSSI Threshold values for the Link if
5839 * RSSI is monitored for other BREDR or LE Links
5841 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
5842 BT_DBG("Unset Threshold. Other links being monitored");
5843 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
5844 cp_dis, rp, rp->status);
5846 BT_DBG("Unset Threshold. Disabling...");
5847 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
5848 cp_dis, sizeof(*cp_dis));
5855 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
5858 struct mgmt_pending_cmd *cmd;
5860 BT_DBG("status 0x%02x", status);
5864 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5869 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5870 mgmt_status(status));
5872 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
5875 mgmt_pending_remove(cmd);
5878 hci_dev_unlock(hdev);
5881 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
5882 void *data, u16 len)
5884 struct mgmt_pending_cmd *cmd;
5885 struct hci_request req;
5886 struct mgmt_cp_set_enable_rssi *cp = data;
5887 struct hci_cp_set_enable_rssi cp_en = { 0, };
5890 BT_DBG("Set Enable RSSI.");
5892 cp_en.hci_le_ext_opcode = 0x01;
5893 cp_en.le_enable_cs_Features = 0x04;
5894 cp_en.data[0] = 0x00;
5895 cp_en.data[1] = 0x00;
5896 cp_en.data[2] = 0x00;
5900 if (!lmp_le_capable(hdev)) {
5901 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5902 MGMT_STATUS_NOT_SUPPORTED);
5906 if (!hdev_is_powered(hdev)) {
5907 BT_DBG("%s", hdev->name);
5908 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5909 MGMT_STATUS_NOT_POWERED);
5913 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
5914 BT_DBG("%s", hdev->name);
5915 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5920 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
5923 BT_DBG("%s", hdev->name);
5928 /* If RSSI is already enabled directly set Threshold values */
5929 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
5930 hci_dev_unlock(hdev);
5931 BT_DBG("RSSI Enabled. Directly set Threshold");
5932 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
5936 hci_req_init(&req, hdev);
5938 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
5939 sizeof(struct hci_cp_set_enable_rssi),
5940 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
5941 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
5943 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
5944 err = hci_req_run(&req, set_rssi_enable_complete);
5947 mgmt_pending_remove(cmd);
5948 BT_ERR("Error in requesting hci_req_run");
5953 hci_dev_unlock(hdev);
5958 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5960 struct mgmt_pending_cmd *cmd;
5962 BT_DBG("status 0x%02x", status);
5966 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
5970 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
5971 MGMT_STATUS_SUCCESS, &status, 1);
5973 mgmt_pending_remove(cmd);
5976 hci_dev_unlock(hdev);
5979 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
5982 struct mgmt_pending_cmd *cmd;
5983 struct hci_request req;
5984 struct mgmt_cp_get_raw_rssi *cp = data;
5985 struct hci_cp_get_raw_rssi hci_cp;
5987 struct hci_conn *conn;
5991 BT_DBG("Get Raw RSSI.");
5995 if (!lmp_le_capable(hdev)) {
5996 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
5997 MGMT_STATUS_NOT_SUPPORTED);
6001 if (cp->link_type == 0x01)
6002 dest_type = LE_LINK;
6004 dest_type = ACL_LINK;
6006 /* Get LE/BREDR link handle info */
6007 conn = hci_conn_hash_lookup_ba(hdev,
6008 dest_type, &cp->bt_address);
6010 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6011 MGMT_STATUS_NOT_CONNECTED);
6014 hci_cp.conn_handle = conn->handle;
6016 if (!hdev_is_powered(hdev)) {
6017 BT_DBG("%s", hdev->name);
6018 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6019 MGMT_STATUS_NOT_POWERED);
6023 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
6024 BT_DBG("%s", hdev->name);
6025 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6030 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
6032 BT_DBG("%s", hdev->name);
6037 hci_req_init(&req, hdev);
6039 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
6040 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
6041 err = hci_req_run(&req, get_raw_rssi_complete);
6044 mgmt_pending_remove(cmd);
6045 BT_ERR("Error in requesting hci_req_run");
6049 hci_dev_unlock(hdev);
6054 void mgmt_raw_rssi_response(struct hci_dev *hdev,
6055 struct hci_cc_rp_get_raw_rssi *rp, int success)
6057 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
6058 struct hci_conn *conn;
6060 mgmt_rp.status = rp->status;
6061 mgmt_rp.rssi_dbm = rp->rssi_dbm;
6063 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
6067 bacpy(&mgmt_rp.bt_address, &conn->dst);
6068 if (conn->type == LE_LINK)
6069 mgmt_rp.link_type = 0x01;
6071 mgmt_rp.link_type = 0x00;
6073 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
6074 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
6077 static void set_disable_threshold_complete(struct hci_dev *hdev,
6078 u8 status, u16 opcode)
6080 struct mgmt_pending_cmd *cmd;
6082 BT_DBG("status 0x%02x", status);
6086 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6090 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6091 MGMT_STATUS_SUCCESS, &status, 1);
6093 mgmt_pending_remove(cmd);
6096 hci_dev_unlock(hdev);
6099 /** Removes monitoring for a link*/
6100 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
6101 void *data, u16 len)
6104 struct hci_cp_set_rssi_threshold th = { 0, };
6105 struct mgmt_cp_disable_rssi *cp = data;
6106 struct hci_conn *conn;
6107 struct mgmt_pending_cmd *cmd;
6108 struct hci_request req;
6111 BT_DBG("Set Disable RSSI.");
6115 if (!lmp_le_capable(hdev)) {
6116 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6117 MGMT_STATUS_NOT_SUPPORTED);
6121 /* Get LE/ACL link handle info*/
6122 if (cp->link_type == 0x01)
6123 dest_type = LE_LINK;
6125 dest_type = ACL_LINK;
6127 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
6129 err = mgmt_cmd_complete(sk, hdev->id,
6130 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
6134 th.hci_le_ext_opcode = 0x0B;
6136 th.conn_handle = conn->handle;
6137 th.alert_mask = 0x00;
6139 th.in_range_th = 0x00;
6142 if (!hdev_is_powered(hdev)) {
6143 BT_DBG("%s", hdev->name);
6144 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6149 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
6150 BT_DBG("%s", hdev->name);
6151 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6156 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
6159 BT_DBG("%s", hdev->name);
6164 hci_req_init(&req, hdev);
6166 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6167 err = hci_req_run(&req, set_disable_threshold_complete);
6169 mgmt_pending_remove(cmd);
6170 BT_ERR("Error in requesting hci_req_run");
6175 hci_dev_unlock(hdev);
6180 void mgmt_rssi_alert_evt(struct hci_dev *hdev, struct sk_buff *skb)
6182 struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
6183 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
6184 struct hci_conn *conn;
6186 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
6187 ev->conn_handle, ev->alert_type, ev->rssi_dbm);
6189 conn = hci_conn_hash_lookup_handle(hdev, ev->conn_handle);
6192 BT_ERR("RSSI alert Error: Device not found for handle");
6195 bacpy(&mgmt_ev.bdaddr, &conn->dst);
6197 if (conn->type == LE_LINK)
6198 mgmt_ev.link_type = 0x01;
6200 mgmt_ev.link_type = 0x00;
6202 mgmt_ev.alert_type = ev->alert_type;
6203 mgmt_ev.rssi_dbm = ev->rssi_dbm;
6205 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
6206 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
6210 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
6212 struct mgmt_pending_cmd *cmd;
6216 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
6218 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
6222 type = hdev->le_discovery.type;
6224 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
6225 mgmt_status(status), &type, sizeof(type));
6226 mgmt_pending_remove(cmd);
6231 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
6234 unsigned long timeout = 0;
6236 BT_DBG("status %d", status);
6240 mgmt_start_le_discovery_failed(hdev, status);
6241 hci_dev_unlock(hdev);
6246 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
6247 hci_dev_unlock(hdev);
6249 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
6250 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
6255 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
6258 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
6259 void *data, u16 len)
6261 struct mgmt_cp_start_le_discovery *cp = data;
6262 struct mgmt_pending_cmd *cmd;
6263 struct hci_cp_le_set_scan_param param_cp;
6264 struct hci_cp_le_set_scan_enable enable_cp;
6265 struct hci_request req;
6266 u8 status, own_addr_type;
6269 BT_DBG("%s", hdev->name);
6273 if (!hdev_is_powered(hdev)) {
6274 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6275 MGMT_STATUS_NOT_POWERED);
6279 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
6280 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6285 if (cp->type != DISCOV_TYPE_LE) {
6286 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6287 MGMT_STATUS_INVALID_PARAMS);
6291 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
6297 hdev->le_discovery.type = cp->type;
6299 hci_req_init(&req, hdev);
6301 status = mgmt_le_support(hdev);
6303 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6305 mgmt_pending_remove(cmd);
6309 /* If controller is scanning, it means the background scanning
6310 * is running. Thus, we should temporarily stop it in order to
6311 * set the discovery scanning parameters.
6313 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
6314 hci_req_add_le_scan_disable(&req);
6316 memset(¶m_cp, 0, sizeof(param_cp));
6318 /* All active scans will be done with either a resolvable
6319 * private address (when privacy feature has been enabled)
6320 * or unresolvable private address.
6322 err = hci_update_random_address(&req, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
6324 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6325 MGMT_STATUS_FAILED);
6326 mgmt_pending_remove(cmd);
6330 param_cp.type = hdev->le_scan_type;
6331 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
6332 param_cp.window = cpu_to_le16(hdev->le_scan_window);
6333 param_cp.own_address_type = own_addr_type;
6334 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
6337 memset(&enable_cp, 0, sizeof(enable_cp));
6338 enable_cp.enable = LE_SCAN_ENABLE;
6339 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
6341 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
6344 err = hci_req_run(&req, start_le_discovery_complete);
6346 mgmt_pending_remove(cmd);
6348 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
6351 hci_dev_unlock(hdev);
6355 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
6357 struct mgmt_pending_cmd *cmd;
6360 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
6364 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
6365 mgmt_status(status), &hdev->le_discovery.type,
6366 sizeof(hdev->le_discovery.type));
6367 mgmt_pending_remove(cmd);
6372 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
6375 BT_DBG("status %d", status);
6380 mgmt_stop_le_discovery_failed(hdev, status);
6384 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
6387 hci_dev_unlock(hdev);
6390 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
6391 void *data, u16 len)
6393 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
6394 struct mgmt_pending_cmd *cmd;
6395 struct hci_request req;
6398 BT_DBG("%s", hdev->name);
6402 if (!hci_le_discovery_active(hdev)) {
6403 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
6404 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6405 sizeof(mgmt_cp->type));
6409 if (hdev->le_discovery.type != mgmt_cp->type) {
6410 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
6411 MGMT_STATUS_INVALID_PARAMS,
6412 &mgmt_cp->type, sizeof(mgmt_cp->type));
6416 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
6422 hci_req_init(&req, hdev);
6424 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
6425 BT_DBG("unknown le discovery state %u",
6426 hdev->le_discovery.state);
6428 mgmt_pending_remove(cmd);
6429 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
6430 MGMT_STATUS_FAILED, &mgmt_cp->type,
6431 sizeof(mgmt_cp->type));
6435 cancel_delayed_work(&hdev->le_scan_disable);
6436 hci_req_add_le_scan_disable(&req);
6438 err = hci_req_run(&req, stop_le_discovery_complete);
6440 mgmt_pending_remove(cmd);
6442 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
6445 hci_dev_unlock(hdev);
6449 /* Separate LE discovery */
6450 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
6452 struct mgmt_ev_discovering ev;
6453 struct mgmt_pending_cmd *cmd;
6455 BT_DBG("%s le discovering %u", hdev->name, discovering);
6458 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
6460 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
6463 u8 type = hdev->le_discovery.type;
6465 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6467 mgmt_pending_remove(cmd);
6470 memset(&ev, 0, sizeof(ev));
6471 ev.type = hdev->le_discovery.type;
6472 ev.discovering = discovering;
6474 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6477 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
6478 void *data, u16 len)
6482 BT_DBG("%s", hdev->name);
6486 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
6488 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
6490 hci_dev_unlock(hdev);
6494 #endif /* TIZEN_BT */
6496 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6498 if (key->master != 0x00 && key->master != 0x01)
6501 switch (key->addr.type) {
6502 case BDADDR_LE_PUBLIC:
6505 case BDADDR_LE_RANDOM:
6506 /* Two most significant bits shall be set */
6507 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6515 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6516 void *cp_data, u16 len)
6518 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6519 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6520 sizeof(struct mgmt_ltk_info));
6521 u16 key_count, expected_len;
6524 BT_DBG("request for %s", hdev->name);
6526 if (!lmp_le_capable(hdev))
6527 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6528 MGMT_STATUS_NOT_SUPPORTED);
6530 key_count = __le16_to_cpu(cp->key_count);
6531 if (key_count > max_key_count) {
6532 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6534 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6535 MGMT_STATUS_INVALID_PARAMS);
6538 expected_len = struct_size(cp, keys, key_count);
6539 if (expected_len != len) {
6540 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6542 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6543 MGMT_STATUS_INVALID_PARAMS);
6546 BT_DBG("%s key_count %u", hdev->name, key_count);
6548 for (i = 0; i < key_count; i++) {
6549 struct mgmt_ltk_info *key = &cp->keys[i];
6551 if (!ltk_is_valid(key))
6552 return mgmt_cmd_status(sk, hdev->id,
6553 MGMT_OP_LOAD_LONG_TERM_KEYS,
6554 MGMT_STATUS_INVALID_PARAMS);
6559 hci_smp_ltks_clear(hdev);
6561 for (i = 0; i < key_count; i++) {
6562 struct mgmt_ltk_info *key = &cp->keys[i];
6563 u8 type, authenticated;
6565 switch (key->type) {
6566 case MGMT_LTK_UNAUTHENTICATED:
6567 authenticated = 0x00;
6568 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6570 case MGMT_LTK_AUTHENTICATED:
6571 authenticated = 0x01;
6572 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6574 case MGMT_LTK_P256_UNAUTH:
6575 authenticated = 0x00;
6576 type = SMP_LTK_P256;
6578 case MGMT_LTK_P256_AUTH:
6579 authenticated = 0x01;
6580 type = SMP_LTK_P256;
6582 case MGMT_LTK_P256_DEBUG:
6583 authenticated = 0x00;
6584 type = SMP_LTK_P256_DEBUG;
6590 hci_add_ltk(hdev, &key->addr.bdaddr,
6591 le_addr_type(key->addr.type), type, authenticated,
6592 key->val, key->enc_size, key->ediv, key->rand);
6595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6598 hci_dev_unlock(hdev);
6603 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6605 struct hci_conn *conn = cmd->user_data;
6606 struct mgmt_rp_get_conn_info rp;
6609 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6611 if (status == MGMT_STATUS_SUCCESS) {
6612 rp.rssi = conn->rssi;
6613 rp.tx_power = conn->tx_power;
6614 rp.max_tx_power = conn->max_tx_power;
6616 rp.rssi = HCI_RSSI_INVALID;
6617 rp.tx_power = HCI_TX_POWER_INVALID;
6618 rp.max_tx_power = HCI_TX_POWER_INVALID;
6621 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6622 status, &rp, sizeof(rp));
6624 hci_conn_drop(conn);
6630 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6633 struct hci_cp_read_rssi *cp;
6634 struct mgmt_pending_cmd *cmd;
6635 struct hci_conn *conn;
6639 BT_DBG("status 0x%02x", hci_status);
6643 /* Commands sent in request are either Read RSSI or Read Transmit Power
6644 * Level so we check which one was last sent to retrieve connection
6645 * handle. Both commands have handle as first parameter so it's safe to
6646 * cast data on the same command struct.
6648 * First command sent is always Read RSSI and we fail only if it fails.
6649 * In other case we simply override error to indicate success as we
6650 * already remembered if TX power value is actually valid.
6652 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6654 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6655 status = MGMT_STATUS_SUCCESS;
6657 status = mgmt_status(hci_status);
6661 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6665 handle = __le16_to_cpu(cp->handle);
6666 conn = hci_conn_hash_lookup_handle(hdev, handle);
6668 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6673 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6677 cmd->cmd_complete(cmd, status);
6678 mgmt_pending_remove(cmd);
6681 hci_dev_unlock(hdev);
6684 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6687 struct mgmt_cp_get_conn_info *cp = data;
6688 struct mgmt_rp_get_conn_info rp;
6689 struct hci_conn *conn;
6690 unsigned long conn_info_age;
6693 BT_DBG("%s", hdev->name);
6695 memset(&rp, 0, sizeof(rp));
6696 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6697 rp.addr.type = cp->addr.type;
6699 if (!bdaddr_type_is_valid(cp->addr.type))
6700 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6701 MGMT_STATUS_INVALID_PARAMS,
6706 if (!hdev_is_powered(hdev)) {
6707 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6708 MGMT_STATUS_NOT_POWERED, &rp,
6713 if (cp->addr.type == BDADDR_BREDR)
6714 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6717 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6719 if (!conn || conn->state != BT_CONNECTED) {
6720 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6721 MGMT_STATUS_NOT_CONNECTED, &rp,
6726 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6727 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6728 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6732 /* To avoid client trying to guess when to poll again for information we
6733 * calculate conn info age as random value between min/max set in hdev.
6735 conn_info_age = hdev->conn_info_min_age +
6736 prandom_u32_max(hdev->conn_info_max_age -
6737 hdev->conn_info_min_age);
6739 /* Query controller to refresh cached values if they are too old or were
6742 if (time_after(jiffies, conn->conn_info_timestamp +
6743 msecs_to_jiffies(conn_info_age)) ||
6744 !conn->conn_info_timestamp) {
6745 struct hci_request req;
6746 struct hci_cp_read_tx_power req_txp_cp;
6747 struct hci_cp_read_rssi req_rssi_cp;
6748 struct mgmt_pending_cmd *cmd;
6750 hci_req_init(&req, hdev);
6751 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6752 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6755 /* For LE links TX power does not change thus we don't need to
6756 * query for it once value is known.
6758 if (!bdaddr_type_is_le(cp->addr.type) ||
6759 conn->tx_power == HCI_TX_POWER_INVALID) {
6760 req_txp_cp.handle = cpu_to_le16(conn->handle);
6761 req_txp_cp.type = 0x00;
6762 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6763 sizeof(req_txp_cp), &req_txp_cp);
6766 /* Max TX power needs to be read only once per connection */
6767 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6768 req_txp_cp.handle = cpu_to_le16(conn->handle);
6769 req_txp_cp.type = 0x01;
6770 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6771 sizeof(req_txp_cp), &req_txp_cp);
6774 err = hci_req_run(&req, conn_info_refresh_complete);
6778 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6785 hci_conn_hold(conn);
6786 cmd->user_data = hci_conn_get(conn);
6787 cmd->cmd_complete = conn_info_cmd_complete;
6789 conn->conn_info_timestamp = jiffies;
6791 /* Cache is valid, just reply with values cached in hci_conn */
6792 rp.rssi = conn->rssi;
6793 rp.tx_power = conn->tx_power;
6794 rp.max_tx_power = conn->max_tx_power;
6796 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6797 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6801 hci_dev_unlock(hdev);
6805 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6807 struct hci_conn *conn = cmd->user_data;
6808 struct mgmt_rp_get_clock_info rp;
6809 struct hci_dev *hdev;
6812 memset(&rp, 0, sizeof(rp));
6813 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6818 hdev = hci_dev_get(cmd->index);
6820 rp.local_clock = cpu_to_le32(hdev->clock);
6825 rp.piconet_clock = cpu_to_le32(conn->clock);
6826 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6830 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6834 hci_conn_drop(conn);
6841 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6843 struct hci_cp_read_clock *hci_cp;
6844 struct mgmt_pending_cmd *cmd;
6845 struct hci_conn *conn;
6847 BT_DBG("%s status %u", hdev->name, status);
6851 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6855 if (hci_cp->which) {
6856 u16 handle = __le16_to_cpu(hci_cp->handle);
6857 conn = hci_conn_hash_lookup_handle(hdev, handle);
6862 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6866 cmd->cmd_complete(cmd, mgmt_status(status));
6867 mgmt_pending_remove(cmd);
6870 hci_dev_unlock(hdev);
6873 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6876 struct mgmt_cp_get_clock_info *cp = data;
6877 struct mgmt_rp_get_clock_info rp;
6878 struct hci_cp_read_clock hci_cp;
6879 struct mgmt_pending_cmd *cmd;
6880 struct hci_request req;
6881 struct hci_conn *conn;
6884 BT_DBG("%s", hdev->name);
6886 memset(&rp, 0, sizeof(rp));
6887 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6888 rp.addr.type = cp->addr.type;
6890 if (cp->addr.type != BDADDR_BREDR)
6891 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6892 MGMT_STATUS_INVALID_PARAMS,
6897 if (!hdev_is_powered(hdev)) {
6898 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6899 MGMT_STATUS_NOT_POWERED, &rp,
6904 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6905 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6907 if (!conn || conn->state != BT_CONNECTED) {
6908 err = mgmt_cmd_complete(sk, hdev->id,
6909 MGMT_OP_GET_CLOCK_INFO,
6910 MGMT_STATUS_NOT_CONNECTED,
6918 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6924 cmd->cmd_complete = clock_info_cmd_complete;
6926 hci_req_init(&req, hdev);
6928 memset(&hci_cp, 0, sizeof(hci_cp));
6929 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6932 hci_conn_hold(conn);
6933 cmd->user_data = hci_conn_get(conn);
6935 hci_cp.handle = cpu_to_le16(conn->handle);
6936 hci_cp.which = 0x01; /* Piconet clock */
6937 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6940 err = hci_req_run(&req, get_clock_info_complete);
6942 mgmt_pending_remove(cmd);
6945 hci_dev_unlock(hdev);
6949 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6951 struct hci_conn *conn;
6953 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6957 if (conn->dst_type != type)
6960 if (conn->state != BT_CONNECTED)
6966 /* This function requires the caller holds hdev->lock */
6967 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6968 u8 addr_type, u8 auto_connect)
6970 struct hci_conn_params *params;
6972 params = hci_conn_params_add(hdev, addr, addr_type);
6976 if (params->auto_connect == auto_connect)
6979 list_del_init(¶ms->action);
6981 switch (auto_connect) {
6982 case HCI_AUTO_CONN_DISABLED:
6983 case HCI_AUTO_CONN_LINK_LOSS:
6984 /* If auto connect is being disabled when we're trying to
6985 * connect to device, keep connecting.
6987 if (params->explicit_connect)
6988 list_add(¶ms->action, &hdev->pend_le_conns);
6990 case HCI_AUTO_CONN_REPORT:
6991 if (params->explicit_connect)
6992 list_add(¶ms->action, &hdev->pend_le_conns);
6994 list_add(¶ms->action, &hdev->pend_le_reports);
6996 case HCI_AUTO_CONN_DIRECT:
6997 case HCI_AUTO_CONN_ALWAYS:
6998 if (!is_connected(hdev, addr, addr_type))
6999 list_add(¶ms->action, &hdev->pend_le_conns);
7003 params->auto_connect = auto_connect;
7005 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
7011 static void device_added(struct sock *sk, struct hci_dev *hdev,
7012 bdaddr_t *bdaddr, u8 type, u8 action)
7014 struct mgmt_ev_device_added ev;
7016 bacpy(&ev.addr.bdaddr, bdaddr);
7017 ev.addr.type = type;
7020 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7023 static int add_device(struct sock *sk, struct hci_dev *hdev,
7024 void *data, u16 len)
7026 struct mgmt_cp_add_device *cp = data;
7027 u8 auto_conn, addr_type;
7030 BT_DBG("%s", hdev->name);
7032 if (!bdaddr_type_is_valid(cp->addr.type) ||
7033 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7034 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7035 MGMT_STATUS_INVALID_PARAMS,
7036 &cp->addr, sizeof(cp->addr));
7038 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7039 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7040 MGMT_STATUS_INVALID_PARAMS,
7041 &cp->addr, sizeof(cp->addr));
7045 if (cp->addr.type == BDADDR_BREDR) {
7046 /* Only incoming connections action is supported for now */
7047 if (cp->action != 0x01) {
7048 err = mgmt_cmd_complete(sk, hdev->id,
7050 MGMT_STATUS_INVALID_PARAMS,
7051 &cp->addr, sizeof(cp->addr));
7055 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
7060 hci_req_update_scan(hdev);
7065 addr_type = le_addr_type(cp->addr.type);
7067 if (cp->action == 0x02)
7068 auto_conn = HCI_AUTO_CONN_ALWAYS;
7069 else if (cp->action == 0x01)
7070 auto_conn = HCI_AUTO_CONN_DIRECT;
7072 auto_conn = HCI_AUTO_CONN_REPORT;
7074 /* Kernel internally uses conn_params with resolvable private
7075 * address, but Add Device allows only identity addresses.
7076 * Make sure it is enforced before calling
7077 * hci_conn_params_lookup.
7079 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7080 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7081 MGMT_STATUS_INVALID_PARAMS,
7082 &cp->addr, sizeof(cp->addr));
7086 /* If the connection parameters don't exist for this device,
7087 * they will be created and configured with defaults.
7089 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7091 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7092 MGMT_STATUS_FAILED, &cp->addr,
7097 hci_update_background_scan(hdev);
7100 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7102 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7103 MGMT_STATUS_SUCCESS, &cp->addr,
7107 hci_dev_unlock(hdev);
7111 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7112 bdaddr_t *bdaddr, u8 type)
7114 struct mgmt_ev_device_removed ev;
7116 bacpy(&ev.addr.bdaddr, bdaddr);
7117 ev.addr.type = type;
7119 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7122 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7123 void *data, u16 len)
7125 struct mgmt_cp_remove_device *cp = data;
7128 BT_DBG("%s", hdev->name);
7132 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7133 struct hci_conn_params *params;
7136 if (!bdaddr_type_is_valid(cp->addr.type)) {
7137 err = mgmt_cmd_complete(sk, hdev->id,
7138 MGMT_OP_REMOVE_DEVICE,
7139 MGMT_STATUS_INVALID_PARAMS,
7140 &cp->addr, sizeof(cp->addr));
7144 if (cp->addr.type == BDADDR_BREDR) {
7145 err = hci_bdaddr_list_del(&hdev->whitelist,
7149 err = mgmt_cmd_complete(sk, hdev->id,
7150 MGMT_OP_REMOVE_DEVICE,
7151 MGMT_STATUS_INVALID_PARAMS,
7157 hci_req_update_scan(hdev);
7159 device_removed(sk, hdev, &cp->addr.bdaddr,
7164 addr_type = le_addr_type(cp->addr.type);
7166 /* Kernel internally uses conn_params with resolvable private
7167 * address, but Remove Device allows only identity addresses.
7168 * Make sure it is enforced before calling
7169 * hci_conn_params_lookup.
7171 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7172 err = mgmt_cmd_complete(sk, hdev->id,
7173 MGMT_OP_REMOVE_DEVICE,
7174 MGMT_STATUS_INVALID_PARAMS,
7175 &cp->addr, sizeof(cp->addr));
7179 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7182 err = mgmt_cmd_complete(sk, hdev->id,
7183 MGMT_OP_REMOVE_DEVICE,
7184 MGMT_STATUS_INVALID_PARAMS,
7185 &cp->addr, sizeof(cp->addr));
7189 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7190 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7191 err = mgmt_cmd_complete(sk, hdev->id,
7192 MGMT_OP_REMOVE_DEVICE,
7193 MGMT_STATUS_INVALID_PARAMS,
7194 &cp->addr, sizeof(cp->addr));
7198 list_del(¶ms->action);
7199 list_del(¶ms->list);
7201 hci_update_background_scan(hdev);
7203 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7205 struct hci_conn_params *p, *tmp;
7206 struct bdaddr_list *b, *btmp;
7208 if (cp->addr.type) {
7209 err = mgmt_cmd_complete(sk, hdev->id,
7210 MGMT_OP_REMOVE_DEVICE,
7211 MGMT_STATUS_INVALID_PARAMS,
7212 &cp->addr, sizeof(cp->addr));
7216 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
7217 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7222 hci_req_update_scan(hdev);
7224 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7225 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7227 device_removed(sk, hdev, &p->addr, p->addr_type);
7228 if (p->explicit_connect) {
7229 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7232 list_del(&p->action);
7237 BT_DBG("All LE connection parameters were removed");
7239 hci_update_background_scan(hdev);
7243 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7244 MGMT_STATUS_SUCCESS, &cp->addr,
7247 hci_dev_unlock(hdev);
7251 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7254 struct mgmt_cp_load_conn_param *cp = data;
7255 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7256 sizeof(struct mgmt_conn_param));
7257 u16 param_count, expected_len;
7260 if (!lmp_le_capable(hdev))
7261 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7262 MGMT_STATUS_NOT_SUPPORTED);
7264 param_count = __le16_to_cpu(cp->param_count);
7265 if (param_count > max_param_count) {
7266 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7268 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7269 MGMT_STATUS_INVALID_PARAMS);
7272 expected_len = struct_size(cp, params, param_count);
7273 if (expected_len != len) {
7274 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7277 MGMT_STATUS_INVALID_PARAMS);
7280 BT_DBG("%s param_count %u", hdev->name, param_count);
7284 hci_conn_params_clear_disabled(hdev);
7286 for (i = 0; i < param_count; i++) {
7287 struct mgmt_conn_param *param = &cp->params[i];
7288 struct hci_conn_params *hci_param;
7289 u16 min, max, latency, timeout;
7292 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
7295 if (param->addr.type == BDADDR_LE_PUBLIC) {
7296 addr_type = ADDR_LE_DEV_PUBLIC;
7297 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7298 addr_type = ADDR_LE_DEV_RANDOM;
7300 bt_dev_err(hdev, "ignoring invalid connection parameters");
7304 min = le16_to_cpu(param->min_interval);
7305 max = le16_to_cpu(param->max_interval);
7306 latency = le16_to_cpu(param->latency);
7307 timeout = le16_to_cpu(param->timeout);
7309 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7310 min, max, latency, timeout);
7312 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7313 bt_dev_err(hdev, "ignoring invalid connection parameters");
7317 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7320 bt_dev_err(hdev, "failed to add connection parameters");
7324 hci_param->conn_min_interval = min;
7325 hci_param->conn_max_interval = max;
7326 hci_param->conn_latency = latency;
7327 hci_param->supervision_timeout = timeout;
7330 hci_dev_unlock(hdev);
7332 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7336 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7337 void *data, u16 len)
7339 struct mgmt_cp_set_external_config *cp = data;
7343 BT_DBG("%s", hdev->name);
7345 if (hdev_is_powered(hdev))
7346 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7347 MGMT_STATUS_REJECTED);
7349 if (cp->config != 0x00 && cp->config != 0x01)
7350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7351 MGMT_STATUS_INVALID_PARAMS);
7353 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7355 MGMT_STATUS_NOT_SUPPORTED);
7360 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7362 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7364 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7371 err = new_options(hdev, sk);
7373 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7374 mgmt_index_removed(hdev);
7376 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7377 hci_dev_set_flag(hdev, HCI_CONFIG);
7378 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7380 queue_work(hdev->req_workqueue, &hdev->power_on);
7382 set_bit(HCI_RAW, &hdev->flags);
7383 mgmt_index_added(hdev);
7388 hci_dev_unlock(hdev);
7392 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7393 void *data, u16 len)
7395 struct mgmt_cp_set_public_address *cp = data;
7399 BT_DBG("%s", hdev->name);
7401 if (hdev_is_powered(hdev))
7402 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7403 MGMT_STATUS_REJECTED);
7405 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7406 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7407 MGMT_STATUS_INVALID_PARAMS);
7409 if (!hdev->set_bdaddr)
7410 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7411 MGMT_STATUS_NOT_SUPPORTED);
7415 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7416 bacpy(&hdev->public_addr, &cp->bdaddr);
7418 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7425 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7426 err = new_options(hdev, sk);
7428 if (is_configured(hdev)) {
7429 mgmt_index_removed(hdev);
7431 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7433 hci_dev_set_flag(hdev, HCI_CONFIG);
7434 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7436 queue_work(hdev->req_workqueue, &hdev->power_on);
7440 hci_dev_unlock(hdev);
7445 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
7449 struct mgmt_ev_device_name_update *ev = (void *)buf;
7455 bacpy(&ev->addr.bdaddr, bdaddr);
7456 ev->addr.type = BDADDR_BREDR;
7458 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7461 ev->eir_len = cpu_to_le16(eir_len);
7463 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
7464 sizeof(*ev) + eir_len, NULL);
7468 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7469 u16 opcode, struct sk_buff *skb)
7471 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7472 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7473 u8 *h192, *r192, *h256, *r256;
7474 struct mgmt_pending_cmd *cmd;
7478 BT_DBG("%s status %u", hdev->name, status);
7480 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7484 mgmt_cp = cmd->param;
7487 status = mgmt_status(status);
7494 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7495 struct hci_rp_read_local_oob_data *rp;
7497 if (skb->len != sizeof(*rp)) {
7498 status = MGMT_STATUS_FAILED;
7501 status = MGMT_STATUS_SUCCESS;
7502 rp = (void *)skb->data;
7504 eir_len = 5 + 18 + 18;
7511 struct hci_rp_read_local_oob_ext_data *rp;
7513 if (skb->len != sizeof(*rp)) {
7514 status = MGMT_STATUS_FAILED;
7517 status = MGMT_STATUS_SUCCESS;
7518 rp = (void *)skb->data;
7520 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7521 eir_len = 5 + 18 + 18;
7525 eir_len = 5 + 18 + 18 + 18 + 18;
7535 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7542 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7543 hdev->dev_class, 3);
7546 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7547 EIR_SSP_HASH_C192, h192, 16);
7548 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7549 EIR_SSP_RAND_R192, r192, 16);
7553 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7554 EIR_SSP_HASH_C256, h256, 16);
7555 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7556 EIR_SSP_RAND_R256, r256, 16);
7560 mgmt_rp->type = mgmt_cp->type;
7561 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7563 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7564 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7565 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7566 if (err < 0 || status)
7569 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7571 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7572 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7573 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7576 mgmt_pending_remove(cmd);
7579 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7580 struct mgmt_cp_read_local_oob_ext_data *cp)
7582 struct mgmt_pending_cmd *cmd;
7583 struct hci_request req;
7586 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7591 hci_req_init(&req, hdev);
7593 if (bredr_sc_enabled(hdev))
7594 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7596 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7598 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7600 mgmt_pending_remove(cmd);
7607 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7608 void *data, u16 data_len)
7610 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7611 struct mgmt_rp_read_local_oob_ext_data *rp;
7614 u8 status, flags, role, addr[7], hash[16], rand[16];
7617 BT_DBG("%s", hdev->name);
7619 if (hdev_is_powered(hdev)) {
7621 case BIT(BDADDR_BREDR):
7622 status = mgmt_bredr_support(hdev);
7628 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7629 status = mgmt_le_support(hdev);
7633 eir_len = 9 + 3 + 18 + 18 + 3;
7636 status = MGMT_STATUS_INVALID_PARAMS;
7641 status = MGMT_STATUS_NOT_POWERED;
7645 rp_len = sizeof(*rp) + eir_len;
7646 rp = kmalloc(rp_len, GFP_ATOMIC);
7657 case BIT(BDADDR_BREDR):
7658 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7659 err = read_local_ssp_oob_req(hdev, sk, cp);
7660 hci_dev_unlock(hdev);
7664 status = MGMT_STATUS_FAILED;
7667 eir_len = eir_append_data(rp->eir, eir_len,
7669 hdev->dev_class, 3);
7672 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7673 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7674 smp_generate_oob(hdev, hash, rand) < 0) {
7675 hci_dev_unlock(hdev);
7676 status = MGMT_STATUS_FAILED;
7680 /* This should return the active RPA, but since the RPA
7681 * is only programmed on demand, it is really hard to fill
7682 * this in at the moment. For now disallow retrieving
7683 * local out-of-band data when privacy is in use.
7685 * Returning the identity address will not help here since
7686 * pairing happens before the identity resolving key is
7687 * known and thus the connection establishment happens
7688 * based on the RPA and not the identity address.
7690 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7691 hci_dev_unlock(hdev);
7692 status = MGMT_STATUS_REJECTED;
7696 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7697 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7698 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7699 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7700 memcpy(addr, &hdev->static_addr, 6);
7703 memcpy(addr, &hdev->bdaddr, 6);
7707 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7708 addr, sizeof(addr));
7710 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7715 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7716 &role, sizeof(role));
7718 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7719 eir_len = eir_append_data(rp->eir, eir_len,
7721 hash, sizeof(hash));
7723 eir_len = eir_append_data(rp->eir, eir_len,
7725 rand, sizeof(rand));
7728 flags = mgmt_get_adv_discov_flags(hdev);
7730 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7731 flags |= LE_AD_NO_BREDR;
7733 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7734 &flags, sizeof(flags));
7738 hci_dev_unlock(hdev);
7740 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7742 status = MGMT_STATUS_SUCCESS;
7745 rp->type = cp->type;
7746 rp->eir_len = cpu_to_le16(eir_len);
7748 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7749 status, rp, sizeof(*rp) + eir_len);
7750 if (err < 0 || status)
7753 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7754 rp, sizeof(*rp) + eir_len,
7755 HCI_MGMT_OOB_DATA_EVENTS, sk);
7763 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7767 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7768 flags |= MGMT_ADV_FLAG_DISCOV;
7769 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7770 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7771 flags |= MGMT_ADV_FLAG_APPEARANCE;
7772 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7774 /* In extended adv TX_POWER returned from Set Adv Param
7775 * will be always valid.
7777 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7778 ext_adv_capable(hdev))
7779 flags |= MGMT_ADV_FLAG_TX_POWER;
7781 if (ext_adv_capable(hdev)) {
7782 flags |= MGMT_ADV_FLAG_SEC_1M;
7784 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7785 flags |= MGMT_ADV_FLAG_SEC_2M;
7787 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7788 flags |= MGMT_ADV_FLAG_SEC_CODED;
7794 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7795 void *data, u16 data_len)
7797 struct mgmt_rp_read_adv_features *rp;
7800 struct adv_info *adv_instance;
7801 u32 supported_flags;
7804 BT_DBG("%s", hdev->name);
7806 if (!lmp_le_capable(hdev))
7807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7808 MGMT_STATUS_REJECTED);
7812 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7813 rp = kmalloc(rp_len, GFP_ATOMIC);
7815 hci_dev_unlock(hdev);
7819 supported_flags = get_supported_adv_flags(hdev);
7821 rp->supported_flags = cpu_to_le32(supported_flags);
7822 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7823 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7824 rp->max_instances = HCI_MAX_ADV_INSTANCES;
7825 rp->num_instances = hdev->adv_instance_cnt;
7827 instance = rp->instance;
7828 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7829 *instance = adv_instance->instance;
7833 hci_dev_unlock(hdev);
7835 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7836 MGMT_STATUS_SUCCESS, rp, rp_len);
7843 static u8 calculate_name_len(struct hci_dev *hdev)
7845 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7847 return append_local_name(hdev, buf, 0);
7850 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7853 u8 max_len = HCI_MAX_AD_LENGTH;
7856 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7857 MGMT_ADV_FLAG_LIMITED_DISCOV |
7858 MGMT_ADV_FLAG_MANAGED_FLAGS))
7861 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7864 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7865 max_len -= calculate_name_len(hdev);
7867 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7874 static bool flags_managed(u32 adv_flags)
7876 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7877 MGMT_ADV_FLAG_LIMITED_DISCOV |
7878 MGMT_ADV_FLAG_MANAGED_FLAGS);
7881 static bool tx_power_managed(u32 adv_flags)
7883 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7886 static bool name_managed(u32 adv_flags)
7888 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7891 static bool appearance_managed(u32 adv_flags)
7893 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7896 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7897 u8 len, bool is_adv_data)
7902 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7907 /* Make sure that the data is correctly formatted. */
7908 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7911 if (data[i + 1] == EIR_FLAGS &&
7912 (!is_adv_data || flags_managed(adv_flags)))
7915 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7918 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7921 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7924 if (data[i + 1] == EIR_APPEARANCE &&
7925 appearance_managed(adv_flags))
7928 /* If the current field length would exceed the total data
7929 * length, then it's invalid.
7931 if (i + cur_len >= len)
7938 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7941 struct mgmt_pending_cmd *cmd;
7942 struct mgmt_cp_add_advertising *cp;
7943 struct mgmt_rp_add_advertising rp;
7944 struct adv_info *adv_instance, *n;
7947 BT_DBG("status %d", status);
7951 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7953 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7954 if (!adv_instance->pending)
7958 adv_instance->pending = false;
7962 instance = adv_instance->instance;
7964 if (hdev->cur_adv_instance == instance)
7965 cancel_adv_timeout(hdev);
7967 hci_remove_adv_instance(hdev, instance);
7968 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7975 rp.instance = cp->instance;
7978 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7979 mgmt_status(status));
7981 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7982 mgmt_status(status), &rp, sizeof(rp));
7984 mgmt_pending_remove(cmd);
7987 hci_dev_unlock(hdev);
7990 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7991 void *data, u16 data_len)
7993 struct mgmt_cp_add_advertising *cp = data;
7994 struct mgmt_rp_add_advertising rp;
7996 u32 supported_flags, phy_flags;
7998 u16 timeout, duration;
7999 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
8000 u8 schedule_instance = 0;
8001 struct adv_info *next_instance;
8003 struct mgmt_pending_cmd *cmd;
8004 struct hci_request req;
8006 BT_DBG("%s", hdev->name);
8008 status = mgmt_le_support(hdev);
8010 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8013 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
8014 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8015 MGMT_STATUS_INVALID_PARAMS);
8017 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8018 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8019 MGMT_STATUS_INVALID_PARAMS);
8021 flags = __le32_to_cpu(cp->flags);
8022 timeout = __le16_to_cpu(cp->timeout);
8023 duration = __le16_to_cpu(cp->duration);
8025 /* The current implementation only supports a subset of the specified
8026 * flags. Also need to check mutual exclusiveness of sec flags.
8028 supported_flags = get_supported_adv_flags(hdev);
8029 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
8030 if (flags & ~supported_flags ||
8031 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8032 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8033 MGMT_STATUS_INVALID_PARAMS);
8037 if (timeout && !hdev_is_powered(hdev)) {
8038 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8039 MGMT_STATUS_REJECTED);
8043 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8044 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8045 pending_find(MGMT_OP_SET_LE, hdev)) {
8046 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8051 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8052 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8053 cp->scan_rsp_len, false)) {
8054 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8055 MGMT_STATUS_INVALID_PARAMS);
8059 err = hci_add_adv_instance(hdev, cp->instance, flags,
8060 cp->adv_data_len, cp->data,
8062 cp->data + cp->adv_data_len,
8065 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8066 MGMT_STATUS_FAILED);
8070 /* Only trigger an advertising added event if a new instance was
8073 if (hdev->adv_instance_cnt > prev_instance_cnt)
8074 mgmt_advertising_added(sk, hdev, cp->instance);
8076 if (hdev->cur_adv_instance == cp->instance) {
8077 /* If the currently advertised instance is being changed then
8078 * cancel the current advertising and schedule the next
8079 * instance. If there is only one instance then the overridden
8080 * advertising data will be visible right away.
8082 cancel_adv_timeout(hdev);
8084 next_instance = hci_get_next_instance(hdev, cp->instance);
8086 schedule_instance = next_instance->instance;
8087 } else if (!hdev->adv_instance_timeout) {
8088 /* Immediately advertise the new instance if no other
8089 * instance is currently being advertised.
8091 schedule_instance = cp->instance;
8094 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8095 * there is no instance to be advertised then we have no HCI
8096 * communication to make. Simply return.
8098 if (!hdev_is_powered(hdev) ||
8099 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8100 !schedule_instance) {
8101 rp.instance = cp->instance;
8102 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8103 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8107 /* We're good to go, update advertising data, parameters, and start
8110 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8117 hci_req_init(&req, hdev);
8119 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
8122 err = hci_req_run(&req, add_advertising_complete);
8125 mgmt_pending_remove(cmd);
8128 hci_dev_unlock(hdev);
8133 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
8136 struct mgmt_pending_cmd *cmd;
8137 struct mgmt_cp_remove_advertising *cp;
8138 struct mgmt_rp_remove_advertising rp;
8140 BT_DBG("status %d", status);
8144 /* A failure status here only means that we failed to disable
8145 * advertising. Otherwise, the advertising instance has been removed,
8146 * so report success.
8148 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
8153 rp.instance = cp->instance;
8155 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
8157 mgmt_pending_remove(cmd);
8160 hci_dev_unlock(hdev);
8163 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8164 void *data, u16 data_len)
8166 struct mgmt_cp_remove_advertising *cp = data;
8167 struct mgmt_rp_remove_advertising rp;
8168 struct mgmt_pending_cmd *cmd;
8169 struct hci_request req;
8172 BT_DBG("%s", hdev->name);
8176 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8177 err = mgmt_cmd_status(sk, hdev->id,
8178 MGMT_OP_REMOVE_ADVERTISING,
8179 MGMT_STATUS_INVALID_PARAMS);
8183 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8184 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8185 pending_find(MGMT_OP_SET_LE, hdev)) {
8186 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8191 if (list_empty(&hdev->adv_instances)) {
8192 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8193 MGMT_STATUS_INVALID_PARAMS);
8197 hci_req_init(&req, hdev);
8199 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8201 if (list_empty(&hdev->adv_instances))
8202 __hci_req_disable_advertising(&req);
8204 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
8205 * flag is set or the device isn't powered then we have no HCI
8206 * communication to make. Simply return.
8208 if (skb_queue_empty(&req.cmd_q) ||
8209 !hdev_is_powered(hdev) ||
8210 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8211 hci_req_purge(&req);
8212 rp.instance = cp->instance;
8213 err = mgmt_cmd_complete(sk, hdev->id,
8214 MGMT_OP_REMOVE_ADVERTISING,
8215 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8219 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8226 err = hci_req_run(&req, remove_advertising_complete);
8228 mgmt_pending_remove(cmd);
8231 hci_dev_unlock(hdev);
8236 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8237 void *data, u16 data_len)
8239 struct mgmt_cp_get_adv_size_info *cp = data;
8240 struct mgmt_rp_get_adv_size_info rp;
8241 u32 flags, supported_flags;
8244 BT_DBG("%s", hdev->name);
8246 if (!lmp_le_capable(hdev))
8247 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8248 MGMT_STATUS_REJECTED);
8250 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
8251 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8252 MGMT_STATUS_INVALID_PARAMS);
8254 flags = __le32_to_cpu(cp->flags);
8256 /* The current implementation only supports a subset of the specified
8259 supported_flags = get_supported_adv_flags(hdev);
8260 if (flags & ~supported_flags)
8261 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8262 MGMT_STATUS_INVALID_PARAMS);
8264 rp.instance = cp->instance;
8265 rp.flags = cp->flags;
8266 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8267 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8269 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8270 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8275 static const struct hci_mgmt_handler mgmt_handlers[] = {
8276 { NULL }, /* 0x0000 (no command) */
8277 { read_version, MGMT_READ_VERSION_SIZE,
8279 HCI_MGMT_UNTRUSTED },
8280 { read_commands, MGMT_READ_COMMANDS_SIZE,
8282 HCI_MGMT_UNTRUSTED },
8283 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8285 HCI_MGMT_UNTRUSTED },
8286 { read_controller_info, MGMT_READ_INFO_SIZE,
8287 HCI_MGMT_UNTRUSTED },
8288 { set_powered, MGMT_SETTING_SIZE },
8289 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8290 { set_connectable, MGMT_SETTING_SIZE },
8291 { set_fast_connectable, MGMT_SETTING_SIZE },
8292 { set_bondable, MGMT_SETTING_SIZE },
8293 { set_link_security, MGMT_SETTING_SIZE },
8294 { set_ssp, MGMT_SETTING_SIZE },
8295 { set_hs, MGMT_SETTING_SIZE },
8296 { set_le, MGMT_SETTING_SIZE },
8297 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8298 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8299 { add_uuid, MGMT_ADD_UUID_SIZE },
8300 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8301 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8303 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8305 { disconnect, MGMT_DISCONNECT_SIZE },
8306 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8307 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8308 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8309 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8310 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8311 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8312 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8313 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8314 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8315 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8316 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8317 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8318 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8320 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8321 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8322 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8323 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8324 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8325 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8326 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8327 { set_advertising, MGMT_SETTING_SIZE },
8328 { set_bredr, MGMT_SETTING_SIZE },
8329 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8330 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8331 { set_secure_conn, MGMT_SETTING_SIZE },
8332 { set_debug_keys, MGMT_SETTING_SIZE },
8333 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8334 { load_irks, MGMT_LOAD_IRKS_SIZE,
8336 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8337 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8338 { add_device, MGMT_ADD_DEVICE_SIZE },
8339 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8340 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8342 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8344 HCI_MGMT_UNTRUSTED },
8345 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8346 HCI_MGMT_UNCONFIGURED |
8347 HCI_MGMT_UNTRUSTED },
8348 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8349 HCI_MGMT_UNCONFIGURED },
8350 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8351 HCI_MGMT_UNCONFIGURED },
8352 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8354 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8355 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8357 HCI_MGMT_UNTRUSTED },
8358 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8359 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8361 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8362 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8363 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8364 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8365 HCI_MGMT_UNTRUSTED },
8366 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8367 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8368 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8372 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
8373 { NULL }, /* 0x0000 (no command) */
8374 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
8375 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
8377 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
8379 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
8380 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
8381 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
8382 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
8383 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
8384 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
8385 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
8386 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
8387 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
8391 void mgmt_index_added(struct hci_dev *hdev)
8393 struct mgmt_ev_ext_index ev;
8395 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8398 switch (hdev->dev_type) {
8400 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8401 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8402 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8405 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8406 HCI_MGMT_INDEX_EVENTS);
8419 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8420 HCI_MGMT_EXT_INDEX_EVENTS);
8423 void mgmt_index_removed(struct hci_dev *hdev)
8425 struct mgmt_ev_ext_index ev;
8426 u8 status = MGMT_STATUS_INVALID_INDEX;
8428 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8431 switch (hdev->dev_type) {
8433 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8435 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8436 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8437 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8440 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8441 HCI_MGMT_INDEX_EVENTS);
8454 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8455 HCI_MGMT_EXT_INDEX_EVENTS);
8458 /* This function requires the caller holds hdev->lock */
8459 static void restart_le_actions(struct hci_dev *hdev)
8461 struct hci_conn_params *p;
8463 list_for_each_entry(p, &hdev->le_conn_params, list) {
8464 /* Needed for AUTO_OFF case where might not "really"
8465 * have been powered off.
8467 list_del_init(&p->action);
8469 switch (p->auto_connect) {
8470 case HCI_AUTO_CONN_DIRECT:
8471 case HCI_AUTO_CONN_ALWAYS:
8472 list_add(&p->action, &hdev->pend_le_conns);
8474 case HCI_AUTO_CONN_REPORT:
8475 list_add(&p->action, &hdev->pend_le_reports);
8483 void mgmt_power_on(struct hci_dev *hdev, int err)
8485 struct cmd_lookup match = { NULL, hdev };
8487 BT_DBG("err %d", err);
8492 restart_le_actions(hdev);
8493 hci_update_background_scan(hdev);
8496 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8498 new_settings(hdev, match.sk);
8503 hci_dev_unlock(hdev);
8506 void __mgmt_power_off(struct hci_dev *hdev)
8508 struct cmd_lookup match = { NULL, hdev };
8509 u8 status, zero_cod[] = { 0, 0, 0 };
8511 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8513 /* If the power off is because of hdev unregistration let
8514 * use the appropriate INVALID_INDEX status. Otherwise use
8515 * NOT_POWERED. We cover both scenarios here since later in
8516 * mgmt_index_removed() any hci_conn callbacks will have already
8517 * been triggered, potentially causing misleading DISCONNECTED
8520 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8521 status = MGMT_STATUS_INVALID_INDEX;
8523 status = MGMT_STATUS_NOT_POWERED;
8525 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8527 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8528 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8529 zero_cod, sizeof(zero_cod),
8530 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8531 ext_info_changed(hdev, NULL);
8534 new_settings(hdev, match.sk);
8540 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8542 struct mgmt_pending_cmd *cmd;
8545 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8549 if (err == -ERFKILL)
8550 status = MGMT_STATUS_RFKILLED;
8552 status = MGMT_STATUS_FAILED;
8554 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8556 mgmt_pending_remove(cmd);
8559 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8562 struct mgmt_ev_new_link_key ev;
8564 memset(&ev, 0, sizeof(ev));
8566 ev.store_hint = persistent;
8567 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8568 ev.key.addr.type = BDADDR_BREDR;
8569 ev.key.type = key->type;
8570 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8571 ev.key.pin_len = key->pin_len;
8573 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8576 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8578 switch (ltk->type) {
8581 if (ltk->authenticated)
8582 return MGMT_LTK_AUTHENTICATED;
8583 return MGMT_LTK_UNAUTHENTICATED;
8585 if (ltk->authenticated)
8586 return MGMT_LTK_P256_AUTH;
8587 return MGMT_LTK_P256_UNAUTH;
8588 case SMP_LTK_P256_DEBUG:
8589 return MGMT_LTK_P256_DEBUG;
8592 return MGMT_LTK_UNAUTHENTICATED;
8595 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8597 struct mgmt_ev_new_long_term_key ev;
8599 memset(&ev, 0, sizeof(ev));
8601 /* Devices using resolvable or non-resolvable random addresses
8602 * without providing an identity resolving key don't require
8603 * to store long term keys. Their addresses will change the
8606 * Only when a remote device provides an identity address
8607 * make sure the long term key is stored. If the remote
8608 * identity is known, the long term keys are internally
8609 * mapped to the identity address. So allow static random
8610 * and public addresses here.
8612 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8613 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8614 ev.store_hint = 0x00;
8616 ev.store_hint = persistent;
8618 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8619 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8620 ev.key.type = mgmt_ltk_type(key);
8621 ev.key.enc_size = key->enc_size;
8622 ev.key.ediv = key->ediv;
8623 ev.key.rand = key->rand;
8625 if (key->type == SMP_LTK)
8628 /* Make sure we copy only the significant bytes based on the
8629 * encryption key size, and set the rest of the value to zeroes.
8631 memcpy(ev.key.val, key->val, key->enc_size);
8632 memset(ev.key.val + key->enc_size, 0,
8633 sizeof(ev.key.val) - key->enc_size);
8635 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8638 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8640 struct mgmt_ev_new_irk ev;
8642 memset(&ev, 0, sizeof(ev));
8644 ev.store_hint = persistent;
8646 bacpy(&ev.rpa, &irk->rpa);
8647 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8648 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8649 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8651 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8654 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8657 struct mgmt_ev_new_csrk ev;
8659 memset(&ev, 0, sizeof(ev));
8661 /* Devices using resolvable or non-resolvable random addresses
8662 * without providing an identity resolving key don't require
8663 * to store signature resolving keys. Their addresses will change
8664 * the next time around.
8666 * Only when a remote device provides an identity address
8667 * make sure the signature resolving key is stored. So allow
8668 * static random and public addresses here.
8670 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8671 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8672 ev.store_hint = 0x00;
8674 ev.store_hint = persistent;
8676 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8677 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8678 ev.key.type = csrk->type;
8679 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8681 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8684 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8685 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8686 u16 max_interval, u16 latency, u16 timeout)
8688 struct mgmt_ev_new_conn_param ev;
8690 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8693 memset(&ev, 0, sizeof(ev));
8694 bacpy(&ev.addr.bdaddr, bdaddr);
8695 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8696 ev.store_hint = store_hint;
8697 ev.min_interval = cpu_to_le16(min_interval);
8698 ev.max_interval = cpu_to_le16(max_interval);
8699 ev.latency = cpu_to_le16(latency);
8700 ev.timeout = cpu_to_le16(timeout);
8702 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8705 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8706 u32 flags, u8 *name, u8 name_len)
8709 struct mgmt_ev_device_connected *ev = (void *) buf;
8712 bacpy(&ev->addr.bdaddr, &conn->dst);
8713 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8715 ev->flags = __cpu_to_le32(flags);
8717 /* We must ensure that the EIR Data fields are ordered and
8718 * unique. Keep it simple for now and avoid the problem by not
8719 * adding any BR/EDR data to the LE adv.
8721 if (conn->le_adv_data_len > 0) {
8722 memcpy(&ev->eir[eir_len],
8723 conn->le_adv_data, conn->le_adv_data_len);
8724 eir_len = conn->le_adv_data_len;
8727 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8730 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8731 eir_len = eir_append_data(ev->eir, eir_len,
8733 conn->dev_class, 3);
8736 ev->eir_len = cpu_to_le16(eir_len);
8738 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8739 sizeof(*ev) + eir_len, NULL);
8742 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8744 struct sock **sk = data;
8746 cmd->cmd_complete(cmd, 0);
8751 mgmt_pending_remove(cmd);
8754 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8756 struct hci_dev *hdev = data;
8757 struct mgmt_cp_unpair_device *cp = cmd->param;
8759 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8761 cmd->cmd_complete(cmd, 0);
8762 mgmt_pending_remove(cmd);
8765 bool mgmt_powering_down(struct hci_dev *hdev)
8767 struct mgmt_pending_cmd *cmd;
8768 struct mgmt_mode *cp;
8770 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8781 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8782 u8 link_type, u8 addr_type, u8 reason,
8783 bool mgmt_connected)
8785 struct mgmt_ev_device_disconnected ev;
8786 struct sock *sk = NULL;
8788 /* The connection is still in hci_conn_hash so test for 1
8789 * instead of 0 to know if this is the last one.
8791 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8792 cancel_delayed_work(&hdev->power_off);
8793 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8796 if (!mgmt_connected)
8799 if (link_type != ACL_LINK && link_type != LE_LINK)
8802 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8804 bacpy(&ev.addr.bdaddr, bdaddr);
8805 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8808 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8813 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8817 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8818 u8 link_type, u8 addr_type, u8 status)
8820 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8821 struct mgmt_cp_disconnect *cp;
8822 struct mgmt_pending_cmd *cmd;
8824 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8827 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8833 if (bacmp(bdaddr, &cp->addr.bdaddr))
8836 if (cp->addr.type != bdaddr_type)
8839 cmd->cmd_complete(cmd, mgmt_status(status));
8840 mgmt_pending_remove(cmd);
8843 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8844 u8 addr_type, u8 status)
8846 struct mgmt_ev_connect_failed ev;
8848 /* The connection is still in hci_conn_hash so test for 1
8849 * instead of 0 to know if this is the last one.
8851 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8852 cancel_delayed_work(&hdev->power_off);
8853 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8856 bacpy(&ev.addr.bdaddr, bdaddr);
8857 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8858 ev.status = mgmt_status(status);
8860 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8863 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8865 struct mgmt_ev_pin_code_request ev;
8867 bacpy(&ev.addr.bdaddr, bdaddr);
8868 ev.addr.type = BDADDR_BREDR;
8871 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8874 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8877 struct mgmt_pending_cmd *cmd;
8879 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8883 cmd->cmd_complete(cmd, mgmt_status(status));
8884 mgmt_pending_remove(cmd);
8887 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8890 struct mgmt_pending_cmd *cmd;
8892 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8896 cmd->cmd_complete(cmd, mgmt_status(status));
8897 mgmt_pending_remove(cmd);
8900 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8901 u8 link_type, u8 addr_type, u32 value,
8904 struct mgmt_ev_user_confirm_request ev;
8906 BT_DBG("%s", hdev->name);
8908 bacpy(&ev.addr.bdaddr, bdaddr);
8909 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8910 ev.confirm_hint = confirm_hint;
8911 ev.value = cpu_to_le32(value);
8913 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8917 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8918 u8 link_type, u8 addr_type)
8920 struct mgmt_ev_user_passkey_request ev;
8922 BT_DBG("%s", hdev->name);
8924 bacpy(&ev.addr.bdaddr, bdaddr);
8925 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8927 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8931 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8932 u8 link_type, u8 addr_type, u8 status,
8935 struct mgmt_pending_cmd *cmd;
8937 cmd = pending_find(opcode, hdev);
8941 cmd->cmd_complete(cmd, mgmt_status(status));
8942 mgmt_pending_remove(cmd);
8947 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8948 u8 link_type, u8 addr_type, u8 status)
8950 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8951 status, MGMT_OP_USER_CONFIRM_REPLY);
8954 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8955 u8 link_type, u8 addr_type, u8 status)
8957 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8959 MGMT_OP_USER_CONFIRM_NEG_REPLY);
8962 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8963 u8 link_type, u8 addr_type, u8 status)
8965 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8966 status, MGMT_OP_USER_PASSKEY_REPLY);
8969 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8970 u8 link_type, u8 addr_type, u8 status)
8972 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8974 MGMT_OP_USER_PASSKEY_NEG_REPLY);
8977 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8978 u8 link_type, u8 addr_type, u32 passkey,
8981 struct mgmt_ev_passkey_notify ev;
8983 BT_DBG("%s", hdev->name);
8985 bacpy(&ev.addr.bdaddr, bdaddr);
8986 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8987 ev.passkey = __cpu_to_le32(passkey);
8988 ev.entered = entered;
8990 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8993 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8995 struct mgmt_ev_auth_failed ev;
8996 struct mgmt_pending_cmd *cmd;
8997 u8 status = mgmt_status(hci_status);
8999 bacpy(&ev.addr.bdaddr, &conn->dst);
9000 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9003 cmd = find_pairing(conn);
9005 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9006 cmd ? cmd->sk : NULL);
9009 cmd->cmd_complete(cmd, status);
9010 mgmt_pending_remove(cmd);
9014 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9016 struct cmd_lookup match = { NULL, hdev };
9020 u8 mgmt_err = mgmt_status(status);
9021 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9022 cmd_status_rsp, &mgmt_err);
9026 if (test_bit(HCI_AUTH, &hdev->flags))
9027 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9029 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9031 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9035 new_settings(hdev, match.sk);
9041 static void clear_eir(struct hci_request *req)
9043 struct hci_dev *hdev = req->hdev;
9044 struct hci_cp_write_eir cp;
9046 if (!lmp_ext_inq_capable(hdev))
9049 memset(hdev->eir, 0, sizeof(hdev->eir));
9051 memset(&cp, 0, sizeof(cp));
9053 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9056 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9058 struct cmd_lookup match = { NULL, hdev };
9059 struct hci_request req;
9060 bool changed = false;
9063 u8 mgmt_err = mgmt_status(status);
9065 if (enable && hci_dev_test_and_clear_flag(hdev,
9067 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9068 new_settings(hdev, NULL);
9071 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9077 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9079 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9081 changed = hci_dev_test_and_clear_flag(hdev,
9084 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9087 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9090 new_settings(hdev, match.sk);
9095 hci_req_init(&req, hdev);
9097 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9098 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9099 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9100 sizeof(enable), &enable);
9101 __hci_req_update_eir(&req);
9106 hci_req_run(&req, NULL);
9109 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9111 struct cmd_lookup *match = data;
9113 if (match->sk == NULL) {
9114 match->sk = cmd->sk;
9115 sock_hold(match->sk);
9119 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9122 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9124 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9125 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9126 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9129 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9130 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9131 ext_info_changed(hdev, NULL);
9138 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9140 struct mgmt_cp_set_local_name ev;
9141 struct mgmt_pending_cmd *cmd;
9146 memset(&ev, 0, sizeof(ev));
9147 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9148 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9150 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9152 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9154 /* If this is a HCI command related to powering on the
9155 * HCI dev don't send any mgmt signals.
9157 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9161 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9162 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9163 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9166 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9170 for (i = 0; i < uuid_count; i++) {
9171 if (!memcmp(uuid, uuids[i], 16))
9178 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9182 while (parsed < eir_len) {
9183 u8 field_len = eir[0];
9190 if (eir_len - parsed < field_len + 1)
9194 case EIR_UUID16_ALL:
9195 case EIR_UUID16_SOME:
9196 for (i = 0; i + 3 <= field_len; i += 2) {
9197 memcpy(uuid, bluetooth_base_uuid, 16);
9198 uuid[13] = eir[i + 3];
9199 uuid[12] = eir[i + 2];
9200 if (has_uuid(uuid, uuid_count, uuids))
9204 case EIR_UUID32_ALL:
9205 case EIR_UUID32_SOME:
9206 for (i = 0; i + 5 <= field_len; i += 4) {
9207 memcpy(uuid, bluetooth_base_uuid, 16);
9208 uuid[15] = eir[i + 5];
9209 uuid[14] = eir[i + 4];
9210 uuid[13] = eir[i + 3];
9211 uuid[12] = eir[i + 2];
9212 if (has_uuid(uuid, uuid_count, uuids))
9216 case EIR_UUID128_ALL:
9217 case EIR_UUID128_SOME:
9218 for (i = 0; i + 17 <= field_len; i += 16) {
9219 memcpy(uuid, eir + i + 2, 16);
9220 if (has_uuid(uuid, uuid_count, uuids))
9226 parsed += field_len + 1;
9227 eir += field_len + 1;
9233 static void restart_le_scan(struct hci_dev *hdev)
9235 /* If controller is not scanning we are done. */
9236 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9239 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9240 hdev->discovery.scan_start +
9241 hdev->discovery.scan_duration))
9244 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9245 DISCOV_LE_RESTART_DELAY);
9248 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9249 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9251 /* If a RSSI threshold has been specified, and
9252 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9253 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9254 * is set, let it through for further processing, as we might need to
9257 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9258 * the results are also dropped.
9260 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9261 (rssi == HCI_RSSI_INVALID ||
9262 (rssi < hdev->discovery.rssi &&
9263 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9266 if (hdev->discovery.uuid_count != 0) {
9267 /* If a list of UUIDs is provided in filter, results with no
9268 * matching UUID should be dropped.
9270 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9271 hdev->discovery.uuids) &&
9272 !eir_has_uuids(scan_rsp, scan_rsp_len,
9273 hdev->discovery.uuid_count,
9274 hdev->discovery.uuids))
9278 /* If duplicate filtering does not report RSSI changes, then restart
9279 * scanning to ensure updated result with updated RSSI values.
9281 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9282 restart_le_scan(hdev);
9284 /* Validate RSSI value against the RSSI threshold once more. */
9285 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9286 rssi < hdev->discovery.rssi)
9293 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9294 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9295 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9298 struct mgmt_ev_device_found *ev = (void *)buf;
9301 /* Don't send events for a non-kernel initiated discovery. With
9302 * LE one exception is if we have pend_le_reports > 0 in which
9303 * case we're doing passive scanning and want these events.
9305 if (!hci_discovery_active(hdev)) {
9306 if (link_type == ACL_LINK)
9308 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
9312 if (hdev->discovery.result_filtering) {
9313 /* We are using service discovery */
9314 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9319 if (hdev->discovery.limited) {
9320 /* Check for limited discoverable bit */
9322 if (!(dev_class[1] & 0x20))
9325 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9326 if (!flags || !(flags[0] & LE_AD_LIMITED))
9331 /* Make sure that the buffer is big enough. The 5 extra bytes
9332 * are for the potential CoD field.
9334 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9337 memset(buf, 0, sizeof(buf));
9339 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9340 * RSSI value was reported as 0 when not available. This behavior
9341 * is kept when using device discovery. This is required for full
9342 * backwards compatibility with the API.
9344 * However when using service discovery, the value 127 will be
9345 * returned when the RSSI is not available.
9347 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9348 link_type == ACL_LINK)
9351 bacpy(&ev->addr.bdaddr, bdaddr);
9352 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9354 ev->flags = cpu_to_le32(flags);
9357 /* Copy EIR or advertising data into event */
9358 memcpy(ev->eir, eir, eir_len);
9360 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9362 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9365 if (scan_rsp_len > 0)
9366 /* Append scan response data to event */
9367 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9369 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9370 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9372 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9375 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9376 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9378 struct mgmt_ev_device_found *ev;
9379 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9382 ev = (struct mgmt_ev_device_found *) buf;
9384 memset(buf, 0, sizeof(buf));
9386 bacpy(&ev->addr.bdaddr, bdaddr);
9387 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9390 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9393 ev->eir_len = cpu_to_le16(eir_len);
9395 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9398 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9400 struct mgmt_ev_discovering ev;
9402 BT_DBG("%s discovering %u", hdev->name, discovering);
9404 memset(&ev, 0, sizeof(ev));
9405 ev.type = hdev->discovery.type;
9406 ev.discovering = discovering;
9408 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9411 static struct hci_mgmt_chan chan = {
9412 .channel = HCI_CHANNEL_CONTROL,
9413 .handler_count = ARRAY_SIZE(mgmt_handlers),
9414 .handlers = mgmt_handlers,
9416 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
9417 .tizen_handlers = tizen_mgmt_handlers,
9419 .hdev_init = mgmt_init_hdev,
9424 return hci_mgmt_chan_register(&chan);
9427 void mgmt_exit(void)
9429 hci_mgmt_chan_unregister(&chan);