2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 4
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_LINK_SECURITY,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
84 static const u16 mgmt_events[] = {
85 MGMT_EV_CONTROLLER_ERROR,
87 MGMT_EV_INDEX_REMOVED,
89 MGMT_EV_CLASS_OF_DEV_CHANGED,
90 MGMT_EV_LOCAL_NAME_CHANGED,
92 MGMT_EV_NEW_LONG_TERM_KEY,
93 MGMT_EV_DEVICE_CONNECTED,
94 MGMT_EV_DEVICE_DISCONNECTED,
95 MGMT_EV_CONNECT_FAILED,
96 MGMT_EV_PIN_CODE_REQUEST,
97 MGMT_EV_USER_CONFIRM_REQUEST,
98 MGMT_EV_USER_PASSKEY_REQUEST,
100 MGMT_EV_DEVICE_FOUND,
102 MGMT_EV_DEVICE_BLOCKED,
103 MGMT_EV_DEVICE_UNBLOCKED,
104 MGMT_EV_DEVICE_UNPAIRED,
105 MGMT_EV_PASSKEY_NOTIFY,
108 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
110 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
111 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
114 struct list_head list;
122 /* HCI to MGMT error code conversion table */
123 static u8 mgmt_status_table[] = {
125 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
126 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
127 MGMT_STATUS_FAILED, /* Hardware Failure */
128 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
129 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
130 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
131 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
132 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
133 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
134 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
135 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
136 MGMT_STATUS_BUSY, /* Command Disallowed */
137 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
138 MGMT_STATUS_REJECTED, /* Rejected Security */
139 MGMT_STATUS_REJECTED, /* Rejected Personal */
140 MGMT_STATUS_TIMEOUT, /* Host Timeout */
141 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
142 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
143 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
144 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
145 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
146 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
147 MGMT_STATUS_BUSY, /* Repeated Attempts */
148 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
149 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
150 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
151 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
152 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
153 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
154 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
155 MGMT_STATUS_FAILED, /* Unspecified Error */
156 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
157 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
158 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
159 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
160 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
161 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
162 MGMT_STATUS_FAILED, /* Unit Link Key Used */
163 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
164 MGMT_STATUS_TIMEOUT, /* Instant Passed */
165 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
166 MGMT_STATUS_FAILED, /* Transaction Collision */
167 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
168 MGMT_STATUS_REJECTED, /* QoS Rejected */
169 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
170 MGMT_STATUS_REJECTED, /* Insufficient Security */
171 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
172 MGMT_STATUS_BUSY, /* Role Switch Pending */
173 MGMT_STATUS_FAILED, /* Slot Violation */
174 MGMT_STATUS_FAILED, /* Role Switch Failed */
175 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
176 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
177 MGMT_STATUS_BUSY, /* Host Busy Pairing */
178 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
179 MGMT_STATUS_BUSY, /* Controller Busy */
180 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
181 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
182 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
183 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
184 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
187 static u8 mgmt_status(u8 hci_status)
189 if (hci_status < ARRAY_SIZE(mgmt_status_table))
190 return mgmt_status_table[hci_status];
192 return MGMT_STATUS_FAILED;
195 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
198 struct mgmt_hdr *hdr;
199 struct mgmt_ev_cmd_status *ev;
202 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
204 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
208 hdr = (void *) skb_put(skb, sizeof(*hdr));
210 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
211 hdr->index = cpu_to_le16(index);
212 hdr->len = cpu_to_le16(sizeof(*ev));
214 ev = (void *) skb_put(skb, sizeof(*ev));
216 ev->opcode = cpu_to_le16(cmd);
218 err = sock_queue_rcv_skb(sk, skb);
225 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
226 void *rp, size_t rp_len)
229 struct mgmt_hdr *hdr;
230 struct mgmt_ev_cmd_complete *ev;
233 BT_DBG("sock %p", sk);
235 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
239 hdr = (void *) skb_put(skb, sizeof(*hdr));
241 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
242 hdr->index = cpu_to_le16(index);
243 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
245 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
246 ev->opcode = cpu_to_le16(cmd);
250 memcpy(ev->data, rp, rp_len);
252 err = sock_queue_rcv_skb(sk, skb);
259 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
262 struct mgmt_rp_read_version rp;
264 BT_DBG("sock %p", sk);
266 rp.version = MGMT_VERSION;
267 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
269 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
273 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
276 struct mgmt_rp_read_commands *rp;
277 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
278 const u16 num_events = ARRAY_SIZE(mgmt_events);
283 BT_DBG("sock %p", sk);
285 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
287 rp = kmalloc(rp_size, GFP_KERNEL);
291 rp->num_commands = __constant_cpu_to_le16(num_commands);
292 rp->num_events = __constant_cpu_to_le16(num_events);
294 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
295 put_unaligned_le16(mgmt_commands[i], opcode);
297 for (i = 0; i < num_events; i++, opcode++)
298 put_unaligned_le16(mgmt_events[i], opcode);
300 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
307 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
310 struct mgmt_rp_read_index_list *rp;
316 BT_DBG("sock %p", sk);
318 read_lock(&hci_dev_list_lock);
321 list_for_each_entry(d, &hci_dev_list, list) {
322 if (d->dev_type == HCI_BREDR)
326 rp_len = sizeof(*rp) + (2 * count);
327 rp = kmalloc(rp_len, GFP_ATOMIC);
329 read_unlock(&hci_dev_list_lock);
334 list_for_each_entry(d, &hci_dev_list, list) {
335 if (test_bit(HCI_SETUP, &d->dev_flags))
338 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
341 if (d->dev_type == HCI_BREDR) {
342 rp->index[count++] = cpu_to_le16(d->id);
343 BT_DBG("Added hci%u", d->id);
347 rp->num_controllers = cpu_to_le16(count);
348 rp_len = sizeof(*rp) + (2 * count);
350 read_unlock(&hci_dev_list_lock);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
360 static u32 get_supported_settings(struct hci_dev *hdev)
364 settings |= MGMT_SETTING_POWERED;
365 settings |= MGMT_SETTING_PAIRABLE;
367 if (lmp_bredr_capable(hdev)) {
368 settings |= MGMT_SETTING_CONNECTABLE;
369 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
370 settings |= MGMT_SETTING_FAST_CONNECTABLE;
371 settings |= MGMT_SETTING_DISCOVERABLE;
372 settings |= MGMT_SETTING_BREDR;
373 settings |= MGMT_SETTING_LINK_SECURITY;
375 if (lmp_ssp_capable(hdev)) {
376 settings |= MGMT_SETTING_SSP;
377 settings |= MGMT_SETTING_HS;
381 if (lmp_le_capable(hdev)) {
382 settings |= MGMT_SETTING_LE;
383 settings |= MGMT_SETTING_ADVERTISING;
389 static u32 get_current_settings(struct hci_dev *hdev)
393 if (hdev_is_powered(hdev))
394 settings |= MGMT_SETTING_POWERED;
396 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
397 settings |= MGMT_SETTING_CONNECTABLE;
399 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
400 settings |= MGMT_SETTING_FAST_CONNECTABLE;
402 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
403 settings |= MGMT_SETTING_DISCOVERABLE;
405 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
406 settings |= MGMT_SETTING_PAIRABLE;
408 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
409 settings |= MGMT_SETTING_BREDR;
411 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
412 settings |= MGMT_SETTING_LE;
414 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
415 settings |= MGMT_SETTING_LINK_SECURITY;
417 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
418 settings |= MGMT_SETTING_SSP;
420 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
421 settings |= MGMT_SETTING_HS;
423 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
424 settings |= MGMT_SETTING_ADVERTISING;
429 #define PNP_INFO_SVCLASS_ID 0x1200
431 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
433 u8 *ptr = data, *uuids_start = NULL;
434 struct bt_uuid *uuid;
439 list_for_each_entry(uuid, &hdev->uuids, list) {
442 if (uuid->size != 16)
445 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
449 if (uuid16 == PNP_INFO_SVCLASS_ID)
455 uuids_start[1] = EIR_UUID16_ALL;
459 /* Stop if not enough space to put next UUID */
460 if ((ptr - data) + sizeof(u16) > len) {
461 uuids_start[1] = EIR_UUID16_SOME;
465 *ptr++ = (uuid16 & 0x00ff);
466 *ptr++ = (uuid16 & 0xff00) >> 8;
467 uuids_start[0] += sizeof(uuid16);
473 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 u8 *ptr = data, *uuids_start = NULL;
476 struct bt_uuid *uuid;
481 list_for_each_entry(uuid, &hdev->uuids, list) {
482 if (uuid->size != 32)
488 uuids_start[1] = EIR_UUID32_ALL;
492 /* Stop if not enough space to put next UUID */
493 if ((ptr - data) + sizeof(u32) > len) {
494 uuids_start[1] = EIR_UUID32_SOME;
498 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
500 uuids_start[0] += sizeof(u32);
506 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
508 u8 *ptr = data, *uuids_start = NULL;
509 struct bt_uuid *uuid;
514 list_for_each_entry(uuid, &hdev->uuids, list) {
515 if (uuid->size != 128)
521 uuids_start[1] = EIR_UUID128_ALL;
525 /* Stop if not enough space to put next UUID */
526 if ((ptr - data) + 16 > len) {
527 uuids_start[1] = EIR_UUID128_SOME;
531 memcpy(ptr, uuid->uuid, 16);
533 uuids_start[0] += 16;
539 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
541 struct pending_cmd *cmd;
543 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
544 if (cmd->opcode == opcode)
551 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
556 name_len = strlen(hdev->dev_name);
558 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
560 if (name_len > max_len) {
562 ptr[1] = EIR_NAME_SHORT;
564 ptr[1] = EIR_NAME_COMPLETE;
566 ptr[0] = name_len + 1;
568 memcpy(ptr + 2, hdev->dev_name, name_len);
570 ad_len += (name_len + 2);
571 ptr += (name_len + 2);
577 static void update_scan_rsp_data(struct hci_request *req)
579 struct hci_dev *hdev = req->hdev;
580 struct hci_cp_le_set_scan_rsp_data cp;
583 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
586 memset(&cp, 0, sizeof(cp));
588 len = create_scan_rsp_data(hdev, cp.data);
590 if (hdev->scan_rsp_data_len == len &&
591 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
594 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
595 hdev->scan_rsp_data_len = len;
599 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
602 static u8 get_adv_discov_flags(struct hci_dev *hdev)
604 struct pending_cmd *cmd;
606 /* If there's a pending mgmt command the flags will not yet have
607 * their final values, so check for this first.
609 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
611 struct mgmt_mode *cp = cmd->param;
613 return LE_AD_GENERAL;
614 else if (cp->val == 0x02)
615 return LE_AD_LIMITED;
617 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
618 return LE_AD_LIMITED;
619 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
620 return LE_AD_GENERAL;
626 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
628 u8 ad_len = 0, flags = 0;
630 flags |= get_adv_discov_flags(hdev);
632 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
633 if (lmp_le_br_capable(hdev))
634 flags |= LE_AD_SIM_LE_BREDR_CTRL;
635 if (lmp_host_le_br_capable(hdev))
636 flags |= LE_AD_SIM_LE_BREDR_HOST;
638 flags |= LE_AD_NO_BREDR;
642 BT_DBG("adv flags 0x%02x", flags);
652 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
654 ptr[1] = EIR_TX_POWER;
655 ptr[2] = (u8) hdev->adv_tx_power;
664 static void update_adv_data(struct hci_request *req)
666 struct hci_dev *hdev = req->hdev;
667 struct hci_cp_le_set_adv_data cp;
670 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
673 memset(&cp, 0, sizeof(cp));
675 len = create_adv_data(hdev, cp.data);
677 if (hdev->adv_data_len == len &&
678 memcmp(cp.data, hdev->adv_data, len) == 0)
681 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
682 hdev->adv_data_len = len;
686 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
689 static void create_eir(struct hci_dev *hdev, u8 *data)
694 name_len = strlen(hdev->dev_name);
700 ptr[1] = EIR_NAME_SHORT;
702 ptr[1] = EIR_NAME_COMPLETE;
704 /* EIR Data length */
705 ptr[0] = name_len + 1;
707 memcpy(ptr + 2, hdev->dev_name, name_len);
709 ptr += (name_len + 2);
712 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
714 ptr[1] = EIR_TX_POWER;
715 ptr[2] = (u8) hdev->inq_tx_power;
720 if (hdev->devid_source > 0) {
722 ptr[1] = EIR_DEVICE_ID;
724 put_unaligned_le16(hdev->devid_source, ptr + 2);
725 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
726 put_unaligned_le16(hdev->devid_product, ptr + 6);
727 put_unaligned_le16(hdev->devid_version, ptr + 8);
732 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
733 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
734 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
737 static void update_eir(struct hci_request *req)
739 struct hci_dev *hdev = req->hdev;
740 struct hci_cp_write_eir cp;
742 if (!hdev_is_powered(hdev))
745 if (!lmp_ext_inq_capable(hdev))
748 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
751 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
754 memset(&cp, 0, sizeof(cp));
756 create_eir(hdev, cp.data);
758 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
761 memcpy(hdev->eir, cp.data, sizeof(cp.data));
763 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
766 static u8 get_service_classes(struct hci_dev *hdev)
768 struct bt_uuid *uuid;
771 list_for_each_entry(uuid, &hdev->uuids, list)
772 val |= uuid->svc_hint;
777 static void update_class(struct hci_request *req)
779 struct hci_dev *hdev = req->hdev;
782 BT_DBG("%s", hdev->name);
784 if (!hdev_is_powered(hdev))
787 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
790 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
793 cod[0] = hdev->minor_class;
794 cod[1] = hdev->major_class;
795 cod[2] = get_service_classes(hdev);
797 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
800 if (memcmp(cod, hdev->dev_class, 3) == 0)
803 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
806 static void service_cache_off(struct work_struct *work)
808 struct hci_dev *hdev = container_of(work, struct hci_dev,
810 struct hci_request req;
812 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
815 hci_req_init(&req, hdev);
822 hci_dev_unlock(hdev);
824 hci_req_run(&req, NULL);
827 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
829 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
832 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
834 /* Non-mgmt controlled devices get this bit set
835 * implicitly so that pairing works for them, however
836 * for mgmt we require user-space to explicitly enable
839 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
842 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
843 void *data, u16 data_len)
845 struct mgmt_rp_read_info rp;
847 BT_DBG("sock %p %s", sk, hdev->name);
851 memset(&rp, 0, sizeof(rp));
853 bacpy(&rp.bdaddr, &hdev->bdaddr);
855 rp.version = hdev->hci_ver;
856 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
858 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
859 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
861 memcpy(rp.dev_class, hdev->dev_class, 3);
863 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
864 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
866 hci_dev_unlock(hdev);
868 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
872 static void mgmt_pending_free(struct pending_cmd *cmd)
879 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
880 struct hci_dev *hdev, void *data,
883 struct pending_cmd *cmd;
885 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
889 cmd->opcode = opcode;
890 cmd->index = hdev->id;
892 cmd->param = kmalloc(len, GFP_KERNEL);
899 memcpy(cmd->param, data, len);
904 list_add(&cmd->list, &hdev->mgmt_pending);
909 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
910 void (*cb)(struct pending_cmd *cmd,
914 struct pending_cmd *cmd, *tmp;
916 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
917 if (opcode > 0 && cmd->opcode != opcode)
924 static void mgmt_pending_remove(struct pending_cmd *cmd)
926 list_del(&cmd->list);
927 mgmt_pending_free(cmd);
930 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
932 __le32 settings = cpu_to_le32(get_current_settings(hdev));
934 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
938 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
941 struct mgmt_mode *cp = data;
942 struct pending_cmd *cmd;
945 BT_DBG("request for %s", hdev->name);
947 if (cp->val != 0x00 && cp->val != 0x01)
948 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
949 MGMT_STATUS_INVALID_PARAMS);
953 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
954 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
959 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
960 cancel_delayed_work(&hdev->power_off);
963 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
965 err = mgmt_powered(hdev, 1);
970 if (!!cp->val == hdev_is_powered(hdev)) {
971 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
975 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
982 queue_work(hdev->req_workqueue, &hdev->power_on);
984 queue_work(hdev->req_workqueue, &hdev->power_off.work);
989 hci_dev_unlock(hdev);
993 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
994 struct sock *skip_sk)
997 struct mgmt_hdr *hdr;
999 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1003 hdr = (void *) skb_put(skb, sizeof(*hdr));
1004 hdr->opcode = cpu_to_le16(event);
1006 hdr->index = cpu_to_le16(hdev->id);
1008 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1009 hdr->len = cpu_to_le16(data_len);
1012 memcpy(skb_put(skb, data_len), data, data_len);
1015 __net_timestamp(skb);
1017 hci_send_to_control(skb, skip_sk);
1023 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1027 ev = cpu_to_le32(get_current_settings(hdev));
1029 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1034 struct hci_dev *hdev;
1038 static void settings_rsp(struct pending_cmd *cmd, void *data)
1040 struct cmd_lookup *match = data;
1042 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1044 list_del(&cmd->list);
1046 if (match->sk == NULL) {
1047 match->sk = cmd->sk;
1048 sock_hold(match->sk);
1051 mgmt_pending_free(cmd);
1054 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1058 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1059 mgmt_pending_remove(cmd);
1062 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1064 if (!lmp_bredr_capable(hdev))
1065 return MGMT_STATUS_NOT_SUPPORTED;
1066 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1067 return MGMT_STATUS_REJECTED;
1069 return MGMT_STATUS_SUCCESS;
1072 static u8 mgmt_le_support(struct hci_dev *hdev)
1074 if (!lmp_le_capable(hdev))
1075 return MGMT_STATUS_NOT_SUPPORTED;
1076 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1077 return MGMT_STATUS_REJECTED;
1079 return MGMT_STATUS_SUCCESS;
1082 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1084 struct pending_cmd *cmd;
1085 struct mgmt_mode *cp;
1086 struct hci_request req;
1089 BT_DBG("status 0x%02x", status);
1093 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1098 u8 mgmt_err = mgmt_status(status);
1099 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1100 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1106 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1109 if (hdev->discov_timeout > 0) {
1110 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1111 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1115 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1119 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1122 new_settings(hdev, cmd->sk);
1124 /* When the discoverable mode gets changed, make sure
1125 * that class of device has the limited discoverable
1126 * bit correctly set.
1128 hci_req_init(&req, hdev);
1130 hci_req_run(&req, NULL);
1133 mgmt_pending_remove(cmd);
1136 hci_dev_unlock(hdev);
1139 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1142 struct mgmt_cp_set_discoverable *cp = data;
1143 struct pending_cmd *cmd;
1144 struct hci_request req;
1149 BT_DBG("request for %s", hdev->name);
1151 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1152 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1153 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1154 MGMT_STATUS_REJECTED);
1156 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1157 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1158 MGMT_STATUS_INVALID_PARAMS);
1160 timeout = __le16_to_cpu(cp->timeout);
1162 /* Disabling discoverable requires that no timeout is set,
1163 * and enabling limited discoverable requires a timeout.
1165 if ((cp->val == 0x00 && timeout > 0) ||
1166 (cp->val == 0x02 && timeout == 0))
1167 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1168 MGMT_STATUS_INVALID_PARAMS);
1172 if (!hdev_is_powered(hdev) && timeout > 0) {
1173 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1174 MGMT_STATUS_NOT_POWERED);
1178 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1179 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1185 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1186 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1187 MGMT_STATUS_REJECTED);
1191 if (!hdev_is_powered(hdev)) {
1192 bool changed = false;
1194 /* Setting limited discoverable when powered off is
1195 * not a valid operation since it requires a timeout
1196 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1198 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1199 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1203 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1208 err = new_settings(hdev, sk);
1213 /* If the current mode is the same, then just update the timeout
1214 * value with the new value. And if only the timeout gets updated,
1215 * then no need for any HCI transactions.
1217 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1218 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1219 &hdev->dev_flags)) {
1220 cancel_delayed_work(&hdev->discov_off);
1221 hdev->discov_timeout = timeout;
1223 if (cp->val && hdev->discov_timeout > 0) {
1224 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1225 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1229 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1233 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1239 /* Cancel any potential discoverable timeout that might be
1240 * still active and store new timeout value. The arming of
1241 * the timeout happens in the complete handler.
1243 cancel_delayed_work(&hdev->discov_off);
1244 hdev->discov_timeout = timeout;
1246 /* Limited discoverable mode */
1247 if (cp->val == 0x02)
1248 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1250 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1252 hci_req_init(&req, hdev);
1254 /* The procedure for LE-only controllers is much simpler - just
1255 * update the advertising data.
1257 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1263 struct hci_cp_write_current_iac_lap hci_cp;
1265 if (cp->val == 0x02) {
1266 /* Limited discoverable mode */
1268 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1269 hci_cp.iac_lap[1] = 0x8b;
1270 hci_cp.iac_lap[2] = 0x9e;
1271 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1272 hci_cp.iac_lap[4] = 0x8b;
1273 hci_cp.iac_lap[5] = 0x9e;
1275 /* General discoverable mode */
1277 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1278 hci_cp.iac_lap[1] = 0x8b;
1279 hci_cp.iac_lap[2] = 0x9e;
1282 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1283 (hci_cp.num_iac * 3) + 1, &hci_cp);
1285 scan |= SCAN_INQUIRY;
1287 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1290 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1293 update_adv_data(&req);
1295 err = hci_req_run(&req, set_discoverable_complete);
1297 mgmt_pending_remove(cmd);
1300 hci_dev_unlock(hdev);
1304 static void write_fast_connectable(struct hci_request *req, bool enable)
1306 struct hci_dev *hdev = req->hdev;
1307 struct hci_cp_write_page_scan_activity acp;
1310 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1313 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1317 type = PAGE_SCAN_TYPE_INTERLACED;
1319 /* 160 msec page scan interval */
1320 acp.interval = __constant_cpu_to_le16(0x0100);
1322 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1324 /* default 1.28 sec page scan */
1325 acp.interval = __constant_cpu_to_le16(0x0800);
1328 acp.window = __constant_cpu_to_le16(0x0012);
1330 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1331 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1332 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1335 if (hdev->page_scan_type != type)
1336 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1339 static u8 get_adv_type(struct hci_dev *hdev)
1341 struct pending_cmd *cmd;
1344 /* If there's a pending mgmt command the flag will not yet have
1345 * it's final value, so check for this first.
1347 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1349 struct mgmt_mode *cp = cmd->param;
1350 connectable = !!cp->val;
1352 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1355 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1358 static void enable_advertising(struct hci_request *req)
1360 struct hci_dev *hdev = req->hdev;
1361 struct hci_cp_le_set_adv_param cp;
1364 memset(&cp, 0, sizeof(cp));
1365 cp.min_interval = __constant_cpu_to_le16(0x0800);
1366 cp.max_interval = __constant_cpu_to_le16(0x0800);
1367 cp.type = get_adv_type(hdev);
1368 cp.own_address_type = hdev->own_addr_type;
1369 cp.channel_map = 0x07;
1371 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1373 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1376 static void disable_advertising(struct hci_request *req)
1380 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1383 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1385 struct pending_cmd *cmd;
1386 struct mgmt_mode *cp;
1389 BT_DBG("status 0x%02x", status);
1393 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1398 u8 mgmt_err = mgmt_status(status);
1399 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1405 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1407 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1409 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1412 new_settings(hdev, cmd->sk);
1415 mgmt_pending_remove(cmd);
1418 hci_dev_unlock(hdev);
1421 static int set_connectable_update_settings(struct hci_dev *hdev,
1422 struct sock *sk, u8 val)
1424 bool changed = false;
1427 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1431 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1433 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1434 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1437 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1442 return new_settings(hdev, sk);
1447 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1450 struct mgmt_mode *cp = data;
1451 struct pending_cmd *cmd;
1452 struct hci_request req;
1456 BT_DBG("request for %s", hdev->name);
1458 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1459 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1460 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1461 MGMT_STATUS_REJECTED);
1463 if (cp->val != 0x00 && cp->val != 0x01)
1464 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1465 MGMT_STATUS_INVALID_PARAMS);
1469 if (!hdev_is_powered(hdev)) {
1470 err = set_connectable_update_settings(hdev, sk, cp->val);
1474 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1475 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1476 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1481 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1487 hci_req_init(&req, hdev);
1489 /* If BR/EDR is not enabled and we disable advertising as a
1490 * by-product of disabling connectable, we need to update the
1491 * advertising flags.
1493 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1495 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1496 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1498 update_adv_data(&req);
1499 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1505 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1506 hdev->discov_timeout > 0)
1507 cancel_delayed_work(&hdev->discov_off);
1510 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1513 /* If we're going from non-connectable to connectable or
1514 * vice-versa when fast connectable is enabled ensure that fast
1515 * connectable gets disabled. write_fast_connectable won't do
1516 * anything if the page scan parameters are already what they
1519 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1520 write_fast_connectable(&req, false);
1522 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1523 hci_conn_num(hdev, LE_LINK) == 0) {
1524 disable_advertising(&req);
1525 enable_advertising(&req);
1528 err = hci_req_run(&req, set_connectable_complete);
1530 mgmt_pending_remove(cmd);
1531 if (err == -ENODATA)
1532 err = set_connectable_update_settings(hdev, sk,
1538 hci_dev_unlock(hdev);
1542 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1545 struct mgmt_mode *cp = data;
1549 BT_DBG("request for %s", hdev->name);
1551 if (cp->val != 0x00 && cp->val != 0x01)
1552 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1553 MGMT_STATUS_INVALID_PARAMS);
1558 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1560 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1562 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1567 err = new_settings(hdev, sk);
1570 hci_dev_unlock(hdev);
1574 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1577 struct mgmt_mode *cp = data;
1578 struct pending_cmd *cmd;
1582 BT_DBG("request for %s", hdev->name);
1584 status = mgmt_bredr_support(hdev);
1586 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1589 if (cp->val != 0x00 && cp->val != 0x01)
1590 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1591 MGMT_STATUS_INVALID_PARAMS);
1595 if (!hdev_is_powered(hdev)) {
1596 bool changed = false;
1598 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1599 &hdev->dev_flags)) {
1600 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1604 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1609 err = new_settings(hdev, sk);
1614 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1615 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1622 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1623 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1627 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1633 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1635 mgmt_pending_remove(cmd);
1640 hci_dev_unlock(hdev);
1644 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1646 struct mgmt_mode *cp = data;
1647 struct pending_cmd *cmd;
1651 BT_DBG("request for %s", hdev->name);
1653 status = mgmt_bredr_support(hdev);
1655 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1657 if (!lmp_ssp_capable(hdev))
1658 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1659 MGMT_STATUS_NOT_SUPPORTED);
1661 if (cp->val != 0x00 && cp->val != 0x01)
1662 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1663 MGMT_STATUS_INVALID_PARAMS);
1667 if (!hdev_is_powered(hdev)) {
1671 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1674 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1677 changed = test_and_clear_bit(HCI_HS_ENABLED,
1680 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1683 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1688 err = new_settings(hdev, sk);
1693 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1694 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1695 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1700 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1701 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1705 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1711 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1713 mgmt_pending_remove(cmd);
1718 hci_dev_unlock(hdev);
1722 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1724 struct mgmt_mode *cp = data;
1729 BT_DBG("request for %s", hdev->name);
1731 status = mgmt_bredr_support(hdev);
1733 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1735 if (!lmp_ssp_capable(hdev))
1736 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1737 MGMT_STATUS_NOT_SUPPORTED);
1739 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1740 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1741 MGMT_STATUS_REJECTED);
1743 if (cp->val != 0x00 && cp->val != 0x01)
1744 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1745 MGMT_STATUS_INVALID_PARAMS);
1750 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1752 if (hdev_is_powered(hdev)) {
1753 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1754 MGMT_STATUS_REJECTED);
1758 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1761 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1766 err = new_settings(hdev, sk);
1769 hci_dev_unlock(hdev);
1773 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1775 struct cmd_lookup match = { NULL, hdev };
1778 u8 mgmt_err = mgmt_status(status);
1780 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1785 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1787 new_settings(hdev, match.sk);
1792 /* Make sure the controller has a good default for
1793 * advertising data. Restrict the update to when LE
1794 * has actually been enabled. During power on, the
1795 * update in powered_update_hci will take care of it.
1797 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1798 struct hci_request req;
1802 hci_req_init(&req, hdev);
1803 update_adv_data(&req);
1804 update_scan_rsp_data(&req);
1805 hci_req_run(&req, NULL);
1807 hci_dev_unlock(hdev);
1811 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1813 struct mgmt_mode *cp = data;
1814 struct hci_cp_write_le_host_supported hci_cp;
1815 struct pending_cmd *cmd;
1816 struct hci_request req;
1820 BT_DBG("request for %s", hdev->name);
1822 if (!lmp_le_capable(hdev))
1823 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1824 MGMT_STATUS_NOT_SUPPORTED);
1826 if (cp->val != 0x00 && cp->val != 0x01)
1827 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1828 MGMT_STATUS_INVALID_PARAMS);
1830 /* LE-only devices do not allow toggling LE on/off */
1831 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1832 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1833 MGMT_STATUS_REJECTED);
1838 enabled = lmp_host_le_capable(hdev);
1840 if (!hdev_is_powered(hdev) || val == enabled) {
1841 bool changed = false;
1843 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1844 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1848 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1849 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1853 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1858 err = new_settings(hdev, sk);
1863 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1864 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1865 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1870 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1876 hci_req_init(&req, hdev);
1878 memset(&hci_cp, 0, sizeof(hci_cp));
1882 hci_cp.simul = lmp_le_br_capable(hdev);
1884 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1885 disable_advertising(&req);
1888 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1891 err = hci_req_run(&req, le_enable_complete);
1893 mgmt_pending_remove(cmd);
1896 hci_dev_unlock(hdev);
1900 /* This is a helper function to test for pending mgmt commands that can
1901 * cause CoD or EIR HCI commands. We can only allow one such pending
1902 * mgmt command at a time since otherwise we cannot easily track what
1903 * the current values are, will be, and based on that calculate if a new
1904 * HCI command needs to be sent and if yes with what value.
1906 static bool pending_eir_or_class(struct hci_dev *hdev)
1908 struct pending_cmd *cmd;
1910 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1911 switch (cmd->opcode) {
1912 case MGMT_OP_ADD_UUID:
1913 case MGMT_OP_REMOVE_UUID:
1914 case MGMT_OP_SET_DEV_CLASS:
1915 case MGMT_OP_SET_POWERED:
1923 static const u8 bluetooth_base_uuid[] = {
1924 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1925 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1928 static u8 get_uuid_size(const u8 *uuid)
1932 if (memcmp(uuid, bluetooth_base_uuid, 12))
1935 val = get_unaligned_le32(&uuid[12]);
1942 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1944 struct pending_cmd *cmd;
1948 cmd = mgmt_pending_find(mgmt_op, hdev);
1952 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1953 hdev->dev_class, 3);
1955 mgmt_pending_remove(cmd);
1958 hci_dev_unlock(hdev);
1961 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1963 BT_DBG("status 0x%02x", status);
1965 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1968 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1970 struct mgmt_cp_add_uuid *cp = data;
1971 struct pending_cmd *cmd;
1972 struct hci_request req;
1973 struct bt_uuid *uuid;
1976 BT_DBG("request for %s", hdev->name);
1980 if (pending_eir_or_class(hdev)) {
1981 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1986 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1992 memcpy(uuid->uuid, cp->uuid, 16);
1993 uuid->svc_hint = cp->svc_hint;
1994 uuid->size = get_uuid_size(cp->uuid);
1996 list_add_tail(&uuid->list, &hdev->uuids);
1998 hci_req_init(&req, hdev);
2003 err = hci_req_run(&req, add_uuid_complete);
2005 if (err != -ENODATA)
2008 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2009 hdev->dev_class, 3);
2013 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2022 hci_dev_unlock(hdev);
2026 static bool enable_service_cache(struct hci_dev *hdev)
2028 if (!hdev_is_powered(hdev))
2031 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2032 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2040 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2042 BT_DBG("status 0x%02x", status);
2044 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2047 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2050 struct mgmt_cp_remove_uuid *cp = data;
2051 struct pending_cmd *cmd;
2052 struct bt_uuid *match, *tmp;
2053 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2054 struct hci_request req;
2057 BT_DBG("request for %s", hdev->name);
2061 if (pending_eir_or_class(hdev)) {
2062 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2067 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2068 err = hci_uuids_clear(hdev);
2070 if (enable_service_cache(hdev)) {
2071 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2072 0, hdev->dev_class, 3);
2081 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2082 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2085 list_del(&match->list);
2091 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2092 MGMT_STATUS_INVALID_PARAMS);
2097 hci_req_init(&req, hdev);
2102 err = hci_req_run(&req, remove_uuid_complete);
2104 if (err != -ENODATA)
2107 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2108 hdev->dev_class, 3);
2112 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2121 hci_dev_unlock(hdev);
2125 static void set_class_complete(struct hci_dev *hdev, u8 status)
2127 BT_DBG("status 0x%02x", status);
2129 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2132 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2135 struct mgmt_cp_set_dev_class *cp = data;
2136 struct pending_cmd *cmd;
2137 struct hci_request req;
2140 BT_DBG("request for %s", hdev->name);
2142 if (!lmp_bredr_capable(hdev))
2143 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2144 MGMT_STATUS_NOT_SUPPORTED);
2148 if (pending_eir_or_class(hdev)) {
2149 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2154 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2155 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2156 MGMT_STATUS_INVALID_PARAMS);
2160 hdev->major_class = cp->major;
2161 hdev->minor_class = cp->minor;
2163 if (!hdev_is_powered(hdev)) {
2164 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2165 hdev->dev_class, 3);
2169 hci_req_init(&req, hdev);
2171 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2172 hci_dev_unlock(hdev);
2173 cancel_delayed_work_sync(&hdev->service_cache);
2180 err = hci_req_run(&req, set_class_complete);
2182 if (err != -ENODATA)
2185 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2186 hdev->dev_class, 3);
2190 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2199 hci_dev_unlock(hdev);
2203 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2206 struct mgmt_cp_load_link_keys *cp = data;
2207 u16 key_count, expected_len;
2210 BT_DBG("request for %s", hdev->name);
2212 if (!lmp_bredr_capable(hdev))
2213 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2214 MGMT_STATUS_NOT_SUPPORTED);
2216 key_count = __le16_to_cpu(cp->key_count);
2218 expected_len = sizeof(*cp) + key_count *
2219 sizeof(struct mgmt_link_key_info);
2220 if (expected_len != len) {
2221 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2223 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2224 MGMT_STATUS_INVALID_PARAMS);
2227 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2228 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2229 MGMT_STATUS_INVALID_PARAMS);
2231 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2234 for (i = 0; i < key_count; i++) {
2235 struct mgmt_link_key_info *key = &cp->keys[i];
2237 if (key->addr.type != BDADDR_BREDR)
2238 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2239 MGMT_STATUS_INVALID_PARAMS);
2244 hci_link_keys_clear(hdev);
2247 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2249 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2251 for (i = 0; i < key_count; i++) {
2252 struct mgmt_link_key_info *key = &cp->keys[i];
2254 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2255 key->type, key->pin_len);
2258 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2260 hci_dev_unlock(hdev);
2265 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2266 u8 addr_type, struct sock *skip_sk)
2268 struct mgmt_ev_device_unpaired ev;
2270 bacpy(&ev.addr.bdaddr, bdaddr);
2271 ev.addr.type = addr_type;
2273 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2277 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2280 struct mgmt_cp_unpair_device *cp = data;
2281 struct mgmt_rp_unpair_device rp;
2282 struct hci_cp_disconnect dc;
2283 struct pending_cmd *cmd;
2284 struct hci_conn *conn;
2287 memset(&rp, 0, sizeof(rp));
2288 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2289 rp.addr.type = cp->addr.type;
2291 if (!bdaddr_type_is_valid(cp->addr.type))
2292 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2293 MGMT_STATUS_INVALID_PARAMS,
2296 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2297 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2298 MGMT_STATUS_INVALID_PARAMS,
2303 if (!hdev_is_powered(hdev)) {
2304 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2305 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2309 if (cp->addr.type == BDADDR_BREDR)
2310 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2312 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2315 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2316 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2320 if (cp->disconnect) {
2321 if (cp->addr.type == BDADDR_BREDR)
2322 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2325 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2332 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2334 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2338 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2345 dc.handle = cpu_to_le16(conn->handle);
2346 dc.reason = 0x13; /* Remote User Terminated Connection */
2347 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2349 mgmt_pending_remove(cmd);
2352 hci_dev_unlock(hdev);
2356 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2359 struct mgmt_cp_disconnect *cp = data;
2360 struct mgmt_rp_disconnect rp;
2361 struct hci_cp_disconnect dc;
2362 struct pending_cmd *cmd;
2363 struct hci_conn *conn;
2368 memset(&rp, 0, sizeof(rp));
2369 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2370 rp.addr.type = cp->addr.type;
2372 if (!bdaddr_type_is_valid(cp->addr.type))
2373 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2374 MGMT_STATUS_INVALID_PARAMS,
2379 if (!test_bit(HCI_UP, &hdev->flags)) {
2380 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2381 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2385 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2386 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2387 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2391 if (cp->addr.type == BDADDR_BREDR)
2392 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2395 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2397 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2398 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2399 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2403 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2409 dc.handle = cpu_to_le16(conn->handle);
2410 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2412 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2414 mgmt_pending_remove(cmd);
2417 hci_dev_unlock(hdev);
2421 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2423 switch (link_type) {
2425 switch (addr_type) {
2426 case ADDR_LE_DEV_PUBLIC:
2427 return BDADDR_LE_PUBLIC;
2430 /* Fallback to LE Random address type */
2431 return BDADDR_LE_RANDOM;
2435 /* Fallback to BR/EDR type */
2436 return BDADDR_BREDR;
2440 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2443 struct mgmt_rp_get_connections *rp;
2453 if (!hdev_is_powered(hdev)) {
2454 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2455 MGMT_STATUS_NOT_POWERED);
2460 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2461 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2465 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2466 rp = kmalloc(rp_len, GFP_KERNEL);
2473 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2474 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2476 bacpy(&rp->addr[i].bdaddr, &c->dst);
2477 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2478 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2483 rp->conn_count = cpu_to_le16(i);
2485 /* Recalculate length in case of filtered SCO connections, etc */
2486 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2488 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2494 hci_dev_unlock(hdev);
2498 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2499 struct mgmt_cp_pin_code_neg_reply *cp)
2501 struct pending_cmd *cmd;
2504 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2509 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2510 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2512 mgmt_pending_remove(cmd);
2517 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2520 struct hci_conn *conn;
2521 struct mgmt_cp_pin_code_reply *cp = data;
2522 struct hci_cp_pin_code_reply reply;
2523 struct pending_cmd *cmd;
2530 if (!hdev_is_powered(hdev)) {
2531 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2532 MGMT_STATUS_NOT_POWERED);
2536 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2538 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2539 MGMT_STATUS_NOT_CONNECTED);
2543 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2544 struct mgmt_cp_pin_code_neg_reply ncp;
2546 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2548 BT_ERR("PIN code is not 16 bytes long");
2550 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2552 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2553 MGMT_STATUS_INVALID_PARAMS);
2558 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2564 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2565 reply.pin_len = cp->pin_len;
2566 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2568 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2570 mgmt_pending_remove(cmd);
2573 hci_dev_unlock(hdev);
2577 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2580 struct mgmt_cp_set_io_capability *cp = data;
2586 hdev->io_capability = cp->io_capability;
2588 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2589 hdev->io_capability);
2591 hci_dev_unlock(hdev);
2593 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2597 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2599 struct hci_dev *hdev = conn->hdev;
2600 struct pending_cmd *cmd;
2602 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2603 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2606 if (cmd->user_data != conn)
2615 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2617 struct mgmt_rp_pair_device rp;
2618 struct hci_conn *conn = cmd->user_data;
2620 bacpy(&rp.addr.bdaddr, &conn->dst);
2621 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2623 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2626 /* So we don't get further callbacks for this connection */
2627 conn->connect_cfm_cb = NULL;
2628 conn->security_cfm_cb = NULL;
2629 conn->disconn_cfm_cb = NULL;
2631 hci_conn_drop(conn);
2633 mgmt_pending_remove(cmd);
2636 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2638 struct pending_cmd *cmd;
2640 BT_DBG("status %u", status);
2642 cmd = find_pairing(conn);
2644 BT_DBG("Unable to find a pending command");
2646 pairing_complete(cmd, mgmt_status(status));
2649 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2651 struct pending_cmd *cmd;
2653 BT_DBG("status %u", status);
2658 cmd = find_pairing(conn);
2660 BT_DBG("Unable to find a pending command");
2662 pairing_complete(cmd, mgmt_status(status));
2665 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2668 struct mgmt_cp_pair_device *cp = data;
2669 struct mgmt_rp_pair_device rp;
2670 struct pending_cmd *cmd;
2671 u8 sec_level, auth_type;
2672 struct hci_conn *conn;
2677 memset(&rp, 0, sizeof(rp));
2678 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2679 rp.addr.type = cp->addr.type;
2681 if (!bdaddr_type_is_valid(cp->addr.type))
2682 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2683 MGMT_STATUS_INVALID_PARAMS,
2688 if (!hdev_is_powered(hdev)) {
2689 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2690 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2694 sec_level = BT_SECURITY_MEDIUM;
2695 if (cp->io_cap == 0x03)
2696 auth_type = HCI_AT_DEDICATED_BONDING;
2698 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2700 if (cp->addr.type == BDADDR_BREDR)
2701 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2702 cp->addr.type, sec_level, auth_type);
2704 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2705 cp->addr.type, sec_level, auth_type);
2710 if (PTR_ERR(conn) == -EBUSY)
2711 status = MGMT_STATUS_BUSY;
2713 status = MGMT_STATUS_CONNECT_FAILED;
2715 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2721 if (conn->connect_cfm_cb) {
2722 hci_conn_drop(conn);
2723 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2724 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2728 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2731 hci_conn_drop(conn);
2735 /* For LE, just connecting isn't a proof that the pairing finished */
2736 if (cp->addr.type == BDADDR_BREDR)
2737 conn->connect_cfm_cb = pairing_complete_cb;
2739 conn->connect_cfm_cb = le_connect_complete_cb;
2741 conn->security_cfm_cb = pairing_complete_cb;
2742 conn->disconn_cfm_cb = pairing_complete_cb;
2743 conn->io_capability = cp->io_cap;
2744 cmd->user_data = conn;
2746 if (conn->state == BT_CONNECTED &&
2747 hci_conn_security(conn, sec_level, auth_type))
2748 pairing_complete(cmd, 0);
2753 hci_dev_unlock(hdev);
2757 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2760 struct mgmt_addr_info *addr = data;
2761 struct pending_cmd *cmd;
2762 struct hci_conn *conn;
2769 if (!hdev_is_powered(hdev)) {
2770 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2771 MGMT_STATUS_NOT_POWERED);
2775 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2777 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2778 MGMT_STATUS_INVALID_PARAMS);
2782 conn = cmd->user_data;
2784 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2785 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2786 MGMT_STATUS_INVALID_PARAMS);
2790 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2792 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2793 addr, sizeof(*addr));
2795 hci_dev_unlock(hdev);
2799 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2800 struct mgmt_addr_info *addr, u16 mgmt_op,
2801 u16 hci_op, __le32 passkey)
2803 struct pending_cmd *cmd;
2804 struct hci_conn *conn;
2809 if (!hdev_is_powered(hdev)) {
2810 err = cmd_complete(sk, hdev->id, mgmt_op,
2811 MGMT_STATUS_NOT_POWERED, addr,
2816 if (addr->type == BDADDR_BREDR)
2817 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2819 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2822 err = cmd_complete(sk, hdev->id, mgmt_op,
2823 MGMT_STATUS_NOT_CONNECTED, addr,
2828 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2829 /* Continue with pairing via SMP */
2830 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2833 err = cmd_complete(sk, hdev->id, mgmt_op,
2834 MGMT_STATUS_SUCCESS, addr,
2837 err = cmd_complete(sk, hdev->id, mgmt_op,
2838 MGMT_STATUS_FAILED, addr,
2844 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2850 /* Continue with pairing via HCI */
2851 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2852 struct hci_cp_user_passkey_reply cp;
2854 bacpy(&cp.bdaddr, &addr->bdaddr);
2855 cp.passkey = passkey;
2856 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2858 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2862 mgmt_pending_remove(cmd);
2865 hci_dev_unlock(hdev);
2869 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2870 void *data, u16 len)
2872 struct mgmt_cp_pin_code_neg_reply *cp = data;
2876 return user_pairing_resp(sk, hdev, &cp->addr,
2877 MGMT_OP_PIN_CODE_NEG_REPLY,
2878 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2881 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2884 struct mgmt_cp_user_confirm_reply *cp = data;
2888 if (len != sizeof(*cp))
2889 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2890 MGMT_STATUS_INVALID_PARAMS);
2892 return user_pairing_resp(sk, hdev, &cp->addr,
2893 MGMT_OP_USER_CONFIRM_REPLY,
2894 HCI_OP_USER_CONFIRM_REPLY, 0);
2897 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2898 void *data, u16 len)
2900 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2904 return user_pairing_resp(sk, hdev, &cp->addr,
2905 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2906 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2909 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2912 struct mgmt_cp_user_passkey_reply *cp = data;
2916 return user_pairing_resp(sk, hdev, &cp->addr,
2917 MGMT_OP_USER_PASSKEY_REPLY,
2918 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2921 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2922 void *data, u16 len)
2924 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2928 return user_pairing_resp(sk, hdev, &cp->addr,
2929 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2930 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2933 static void update_name(struct hci_request *req)
2935 struct hci_dev *hdev = req->hdev;
2936 struct hci_cp_write_local_name cp;
2938 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2940 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2943 static void set_name_complete(struct hci_dev *hdev, u8 status)
2945 struct mgmt_cp_set_local_name *cp;
2946 struct pending_cmd *cmd;
2948 BT_DBG("status 0x%02x", status);
2952 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2959 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2960 mgmt_status(status));
2962 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2965 mgmt_pending_remove(cmd);
2968 hci_dev_unlock(hdev);
2971 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2974 struct mgmt_cp_set_local_name *cp = data;
2975 struct pending_cmd *cmd;
2976 struct hci_request req;
2983 /* If the old values are the same as the new ones just return a
2984 * direct command complete event.
2986 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2987 !memcmp(hdev->short_name, cp->short_name,
2988 sizeof(hdev->short_name))) {
2989 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2994 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2996 if (!hdev_is_powered(hdev)) {
2997 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2999 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3004 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3010 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3016 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3018 hci_req_init(&req, hdev);
3020 if (lmp_bredr_capable(hdev)) {
3025 /* The name is stored in the scan response data and so
3026 * no need to udpate the advertising data here.
3028 if (lmp_le_capable(hdev))
3029 update_scan_rsp_data(&req);
3031 err = hci_req_run(&req, set_name_complete);
3033 mgmt_pending_remove(cmd);
3036 hci_dev_unlock(hdev);
3040 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3041 void *data, u16 data_len)
3043 struct pending_cmd *cmd;
3046 BT_DBG("%s", hdev->name);
3050 if (!hdev_is_powered(hdev)) {
3051 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3052 MGMT_STATUS_NOT_POWERED);
3056 if (!lmp_ssp_capable(hdev)) {
3057 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3058 MGMT_STATUS_NOT_SUPPORTED);
3062 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3063 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3068 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3074 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3076 mgmt_pending_remove(cmd);
3079 hci_dev_unlock(hdev);
3083 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3084 void *data, u16 len)
3086 struct mgmt_cp_add_remote_oob_data *cp = data;
3090 BT_DBG("%s ", hdev->name);
3094 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
3097 status = MGMT_STATUS_FAILED;
3099 status = MGMT_STATUS_SUCCESS;
3101 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3102 &cp->addr, sizeof(cp->addr));
3104 hci_dev_unlock(hdev);
3108 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3109 void *data, u16 len)
3111 struct mgmt_cp_remove_remote_oob_data *cp = data;
3115 BT_DBG("%s", hdev->name);
3119 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3121 status = MGMT_STATUS_INVALID_PARAMS;
3123 status = MGMT_STATUS_SUCCESS;
3125 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3126 status, &cp->addr, sizeof(cp->addr));
3128 hci_dev_unlock(hdev);
3132 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3134 struct pending_cmd *cmd;
3138 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3140 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3144 type = hdev->discovery.type;
3146 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3147 &type, sizeof(type));
3148 mgmt_pending_remove(cmd);
3153 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3155 BT_DBG("status %d", status);
3159 mgmt_start_discovery_failed(hdev, status);
3160 hci_dev_unlock(hdev);
3165 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3166 hci_dev_unlock(hdev);
3168 switch (hdev->discovery.type) {
3169 case DISCOV_TYPE_LE:
3170 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3174 case DISCOV_TYPE_INTERLEAVED:
3175 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3176 DISCOV_INTERLEAVED_TIMEOUT);
3179 case DISCOV_TYPE_BREDR:
3183 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3187 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3188 void *data, u16 len)
3190 struct mgmt_cp_start_discovery *cp = data;
3191 struct pending_cmd *cmd;
3192 struct hci_cp_le_set_scan_param param_cp;
3193 struct hci_cp_le_set_scan_enable enable_cp;
3194 struct hci_cp_inquiry inq_cp;
3195 struct hci_request req;
3196 /* General inquiry access code (GIAC) */
3197 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3201 BT_DBG("%s", hdev->name);
3205 if (!hdev_is_powered(hdev)) {
3206 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3207 MGMT_STATUS_NOT_POWERED);
3211 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3212 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3217 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3218 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3223 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3229 hdev->discovery.type = cp->type;
3231 hci_req_init(&req, hdev);
3233 switch (hdev->discovery.type) {
3234 case DISCOV_TYPE_BREDR:
3235 status = mgmt_bredr_support(hdev);
3237 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3239 mgmt_pending_remove(cmd);
3243 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3244 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3246 mgmt_pending_remove(cmd);
3250 hci_inquiry_cache_flush(hdev);
3252 memset(&inq_cp, 0, sizeof(inq_cp));
3253 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3254 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3255 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3258 case DISCOV_TYPE_LE:
3259 case DISCOV_TYPE_INTERLEAVED:
3260 status = mgmt_le_support(hdev);
3262 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3264 mgmt_pending_remove(cmd);
3268 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3269 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3270 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3271 MGMT_STATUS_NOT_SUPPORTED);
3272 mgmt_pending_remove(cmd);
3276 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3277 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3278 MGMT_STATUS_REJECTED);
3279 mgmt_pending_remove(cmd);
3283 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3284 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3286 mgmt_pending_remove(cmd);
3290 memset(¶m_cp, 0, sizeof(param_cp));
3291 param_cp.type = LE_SCAN_ACTIVE;
3292 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3293 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3294 param_cp.own_address_type = hdev->own_addr_type;
3295 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3298 memset(&enable_cp, 0, sizeof(enable_cp));
3299 enable_cp.enable = LE_SCAN_ENABLE;
3300 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3301 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3306 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3307 MGMT_STATUS_INVALID_PARAMS);
3308 mgmt_pending_remove(cmd);
3312 err = hci_req_run(&req, start_discovery_complete);
3314 mgmt_pending_remove(cmd);
3316 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3319 hci_dev_unlock(hdev);
3323 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3325 struct pending_cmd *cmd;
3328 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3332 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3333 &hdev->discovery.type, sizeof(hdev->discovery.type));
3334 mgmt_pending_remove(cmd);
3339 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3341 BT_DBG("status %d", status);
3346 mgmt_stop_discovery_failed(hdev, status);
3350 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3353 hci_dev_unlock(hdev);
3356 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3359 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3360 struct pending_cmd *cmd;
3361 struct hci_cp_remote_name_req_cancel cp;
3362 struct inquiry_entry *e;
3363 struct hci_request req;
3364 struct hci_cp_le_set_scan_enable enable_cp;
3367 BT_DBG("%s", hdev->name);
3371 if (!hci_discovery_active(hdev)) {
3372 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3373 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3374 sizeof(mgmt_cp->type));
3378 if (hdev->discovery.type != mgmt_cp->type) {
3379 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3380 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3381 sizeof(mgmt_cp->type));
3385 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3391 hci_req_init(&req, hdev);
3393 switch (hdev->discovery.state) {
3394 case DISCOVERY_FINDING:
3395 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3396 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3398 cancel_delayed_work(&hdev->le_scan_disable);
3400 memset(&enable_cp, 0, sizeof(enable_cp));
3401 enable_cp.enable = LE_SCAN_DISABLE;
3402 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3403 sizeof(enable_cp), &enable_cp);
3408 case DISCOVERY_RESOLVING:
3409 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3412 mgmt_pending_remove(cmd);
3413 err = cmd_complete(sk, hdev->id,
3414 MGMT_OP_STOP_DISCOVERY, 0,
3416 sizeof(mgmt_cp->type));
3417 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3421 bacpy(&cp.bdaddr, &e->data.bdaddr);
3422 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3428 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3430 mgmt_pending_remove(cmd);
3431 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3432 MGMT_STATUS_FAILED, &mgmt_cp->type,
3433 sizeof(mgmt_cp->type));
3437 err = hci_req_run(&req, stop_discovery_complete);
3439 mgmt_pending_remove(cmd);
3441 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3444 hci_dev_unlock(hdev);
3448 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3451 struct mgmt_cp_confirm_name *cp = data;
3452 struct inquiry_entry *e;
3455 BT_DBG("%s", hdev->name);
3459 if (!hci_discovery_active(hdev)) {
3460 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3461 MGMT_STATUS_FAILED);
3465 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3467 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3468 MGMT_STATUS_INVALID_PARAMS);
3472 if (cp->name_known) {
3473 e->name_state = NAME_KNOWN;
3476 e->name_state = NAME_NEEDED;
3477 hci_inquiry_cache_update_resolve(hdev, e);
3480 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3484 hci_dev_unlock(hdev);
3488 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3491 struct mgmt_cp_block_device *cp = data;
3495 BT_DBG("%s", hdev->name);
3497 if (!bdaddr_type_is_valid(cp->addr.type))
3498 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3499 MGMT_STATUS_INVALID_PARAMS,
3500 &cp->addr, sizeof(cp->addr));
3504 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3506 status = MGMT_STATUS_FAILED;
3508 status = MGMT_STATUS_SUCCESS;
3510 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3511 &cp->addr, sizeof(cp->addr));
3513 hci_dev_unlock(hdev);
3518 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3521 struct mgmt_cp_unblock_device *cp = data;
3525 BT_DBG("%s", hdev->name);
3527 if (!bdaddr_type_is_valid(cp->addr.type))
3528 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3529 MGMT_STATUS_INVALID_PARAMS,
3530 &cp->addr, sizeof(cp->addr));
3534 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3536 status = MGMT_STATUS_INVALID_PARAMS;
3538 status = MGMT_STATUS_SUCCESS;
3540 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3541 &cp->addr, sizeof(cp->addr));
3543 hci_dev_unlock(hdev);
3548 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3551 struct mgmt_cp_set_device_id *cp = data;
3552 struct hci_request req;
3556 BT_DBG("%s", hdev->name);
3558 source = __le16_to_cpu(cp->source);
3560 if (source > 0x0002)
3561 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3562 MGMT_STATUS_INVALID_PARAMS);
3566 hdev->devid_source = source;
3567 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3568 hdev->devid_product = __le16_to_cpu(cp->product);
3569 hdev->devid_version = __le16_to_cpu(cp->version);
3571 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3573 hci_req_init(&req, hdev);
3575 hci_req_run(&req, NULL);
3577 hci_dev_unlock(hdev);
3582 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3584 struct cmd_lookup match = { NULL, hdev };
3587 u8 mgmt_err = mgmt_status(status);
3589 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3590 cmd_status_rsp, &mgmt_err);
3594 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3597 new_settings(hdev, match.sk);
3603 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3606 struct mgmt_mode *cp = data;
3607 struct pending_cmd *cmd;
3608 struct hci_request req;
3609 u8 val, enabled, status;
3612 BT_DBG("request for %s", hdev->name);
3614 status = mgmt_le_support(hdev);
3616 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3619 if (cp->val != 0x00 && cp->val != 0x01)
3620 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3621 MGMT_STATUS_INVALID_PARAMS);
3626 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3628 /* The following conditions are ones which mean that we should
3629 * not do any HCI communication but directly send a mgmt
3630 * response to user space (after toggling the flag if
3633 if (!hdev_is_powered(hdev) || val == enabled ||
3634 hci_conn_num(hdev, LE_LINK) > 0) {
3635 bool changed = false;
3637 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3638 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3642 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3647 err = new_settings(hdev, sk);
3652 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3653 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3654 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3659 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3665 hci_req_init(&req, hdev);
3668 enable_advertising(&req);
3670 disable_advertising(&req);
3672 err = hci_req_run(&req, set_advertising_complete);
3674 mgmt_pending_remove(cmd);
3677 hci_dev_unlock(hdev);
3681 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3682 void *data, u16 len)
3684 struct mgmt_cp_set_static_address *cp = data;
3687 BT_DBG("%s", hdev->name);
3689 if (!lmp_le_capable(hdev))
3690 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3691 MGMT_STATUS_NOT_SUPPORTED);
3693 if (hdev_is_powered(hdev))
3694 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3695 MGMT_STATUS_REJECTED);
3697 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3698 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3699 return cmd_status(sk, hdev->id,
3700 MGMT_OP_SET_STATIC_ADDRESS,
3701 MGMT_STATUS_INVALID_PARAMS);
3703 /* Two most significant bits shall be set */
3704 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3705 return cmd_status(sk, hdev->id,
3706 MGMT_OP_SET_STATIC_ADDRESS,
3707 MGMT_STATUS_INVALID_PARAMS);
3712 bacpy(&hdev->static_addr, &cp->bdaddr);
3714 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3716 hci_dev_unlock(hdev);
3721 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3722 void *data, u16 len)
3724 struct mgmt_cp_set_scan_params *cp = data;
3725 __u16 interval, window;
3728 BT_DBG("%s", hdev->name);
3730 if (!lmp_le_capable(hdev))
3731 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3732 MGMT_STATUS_NOT_SUPPORTED);
3734 interval = __le16_to_cpu(cp->interval);
3736 if (interval < 0x0004 || interval > 0x4000)
3737 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3738 MGMT_STATUS_INVALID_PARAMS);
3740 window = __le16_to_cpu(cp->window);
3742 if (window < 0x0004 || window > 0x4000)
3743 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3744 MGMT_STATUS_INVALID_PARAMS);
3746 if (window > interval)
3747 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3748 MGMT_STATUS_INVALID_PARAMS);
3752 hdev->le_scan_interval = interval;
3753 hdev->le_scan_window = window;
3755 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3757 hci_dev_unlock(hdev);
3762 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3764 struct pending_cmd *cmd;
3766 BT_DBG("status 0x%02x", status);
3770 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3775 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3776 mgmt_status(status));
3778 struct mgmt_mode *cp = cmd->param;
3781 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3783 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3785 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3786 new_settings(hdev, cmd->sk);
3789 mgmt_pending_remove(cmd);
3792 hci_dev_unlock(hdev);
3795 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3796 void *data, u16 len)
3798 struct mgmt_mode *cp = data;
3799 struct pending_cmd *cmd;
3800 struct hci_request req;
3803 BT_DBG("%s", hdev->name);
3805 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3806 hdev->hci_ver < BLUETOOTH_VER_1_2)
3807 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3808 MGMT_STATUS_NOT_SUPPORTED);
3810 if (cp->val != 0x00 && cp->val != 0x01)
3811 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3812 MGMT_STATUS_INVALID_PARAMS);
3814 if (!hdev_is_powered(hdev))
3815 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3816 MGMT_STATUS_NOT_POWERED);
3818 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3819 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3820 MGMT_STATUS_REJECTED);
3824 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3825 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3830 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3831 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3836 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3843 hci_req_init(&req, hdev);
3845 write_fast_connectable(&req, cp->val);
3847 err = hci_req_run(&req, fast_connectable_complete);
3849 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3850 MGMT_STATUS_FAILED);
3851 mgmt_pending_remove(cmd);
3855 hci_dev_unlock(hdev);
3860 static void set_bredr_scan(struct hci_request *req)
3862 struct hci_dev *hdev = req->hdev;
3865 /* Ensure that fast connectable is disabled. This function will
3866 * not do anything if the page scan parameters are already what
3869 write_fast_connectable(req, false);
3871 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3873 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3874 scan |= SCAN_INQUIRY;
3877 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3880 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3882 struct pending_cmd *cmd;
3884 BT_DBG("status 0x%02x", status);
3888 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3893 u8 mgmt_err = mgmt_status(status);
3895 /* We need to restore the flag if related HCI commands
3898 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3900 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3902 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3903 new_settings(hdev, cmd->sk);
3906 mgmt_pending_remove(cmd);
3909 hci_dev_unlock(hdev);
3912 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3914 struct mgmt_mode *cp = data;
3915 struct pending_cmd *cmd;
3916 struct hci_request req;
3919 BT_DBG("request for %s", hdev->name);
3921 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3922 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3923 MGMT_STATUS_NOT_SUPPORTED);
3925 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3926 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3927 MGMT_STATUS_REJECTED);
3929 if (cp->val != 0x00 && cp->val != 0x01)
3930 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3931 MGMT_STATUS_INVALID_PARAMS);
3935 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3936 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3940 if (!hdev_is_powered(hdev)) {
3942 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
3943 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
3944 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3945 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3946 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
3949 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3951 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3955 err = new_settings(hdev, sk);
3959 /* Reject disabling when powered on */
3961 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3962 MGMT_STATUS_REJECTED);
3966 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
3967 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3972 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
3978 /* We need to flip the bit already here so that update_adv_data
3979 * generates the correct flags.
3981 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3983 hci_req_init(&req, hdev);
3985 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3986 set_bredr_scan(&req);
3988 /* Since only the advertising data flags will change, there
3989 * is no need to update the scan response data.
3991 update_adv_data(&req);
3993 err = hci_req_run(&req, set_bredr_complete);
3995 mgmt_pending_remove(cmd);
3998 hci_dev_unlock(hdev);
4002 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4004 if (key->authenticated != 0x00 && key->authenticated != 0x01)
4006 if (key->master != 0x00 && key->master != 0x01)
4008 if (!bdaddr_type_is_le(key->addr.type))
4013 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4014 void *cp_data, u16 len)
4016 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4017 u16 key_count, expected_len;
4020 BT_DBG("request for %s", hdev->name);
4022 if (!lmp_le_capable(hdev))
4023 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4024 MGMT_STATUS_NOT_SUPPORTED);
4026 key_count = __le16_to_cpu(cp->key_count);
4028 expected_len = sizeof(*cp) + key_count *
4029 sizeof(struct mgmt_ltk_info);
4030 if (expected_len != len) {
4031 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4033 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4034 MGMT_STATUS_INVALID_PARAMS);
4037 BT_DBG("%s key_count %u", hdev->name, key_count);
4039 for (i = 0; i < key_count; i++) {
4040 struct mgmt_ltk_info *key = &cp->keys[i];
4042 if (!ltk_is_valid(key))
4043 return cmd_status(sk, hdev->id,
4044 MGMT_OP_LOAD_LONG_TERM_KEYS,
4045 MGMT_STATUS_INVALID_PARAMS);
4050 hci_smp_ltks_clear(hdev);
4052 for (i = 0; i < key_count; i++) {
4053 struct mgmt_ltk_info *key = &cp->keys[i];
4056 if (key->addr.type == BDADDR_LE_PUBLIC)
4057 addr_type = ADDR_LE_DEV_PUBLIC;
4059 addr_type = ADDR_LE_DEV_RANDOM;
4064 type = HCI_SMP_LTK_SLAVE;
4066 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
4067 type, 0, key->authenticated, key->val,
4068 key->enc_size, key->ediv, key->rand);
4071 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4074 hci_dev_unlock(hdev);
4079 static const struct mgmt_handler {
4080 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4084 } mgmt_handlers[] = {
4085 { NULL }, /* 0x0000 (no command) */
4086 { read_version, false, MGMT_READ_VERSION_SIZE },
4087 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4088 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4089 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4090 { set_powered, false, MGMT_SETTING_SIZE },
4091 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4092 { set_connectable, false, MGMT_SETTING_SIZE },
4093 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4094 { set_pairable, false, MGMT_SETTING_SIZE },
4095 { set_link_security, false, MGMT_SETTING_SIZE },
4096 { set_ssp, false, MGMT_SETTING_SIZE },
4097 { set_hs, false, MGMT_SETTING_SIZE },
4098 { set_le, false, MGMT_SETTING_SIZE },
4099 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4100 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4101 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4102 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4103 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4104 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4105 { disconnect, false, MGMT_DISCONNECT_SIZE },
4106 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4107 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4108 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4109 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4110 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4111 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4112 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4113 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4114 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4115 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4116 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4117 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4118 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4119 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4120 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4121 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4122 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4123 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4124 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4125 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4126 { set_advertising, false, MGMT_SETTING_SIZE },
4127 { set_bredr, false, MGMT_SETTING_SIZE },
4128 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4129 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4133 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4137 struct mgmt_hdr *hdr;
4138 u16 opcode, index, len;
4139 struct hci_dev *hdev = NULL;
4140 const struct mgmt_handler *handler;
4143 BT_DBG("got %zu bytes", msglen);
4145 if (msglen < sizeof(*hdr))
4148 buf = kmalloc(msglen, GFP_KERNEL);
4152 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4158 opcode = __le16_to_cpu(hdr->opcode);
4159 index = __le16_to_cpu(hdr->index);
4160 len = __le16_to_cpu(hdr->len);
4162 if (len != msglen - sizeof(*hdr)) {
4167 if (index != MGMT_INDEX_NONE) {
4168 hdev = hci_dev_get(index);
4170 err = cmd_status(sk, index, opcode,
4171 MGMT_STATUS_INVALID_INDEX);
4175 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4176 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4177 err = cmd_status(sk, index, opcode,
4178 MGMT_STATUS_INVALID_INDEX);
4183 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4184 mgmt_handlers[opcode].func == NULL) {
4185 BT_DBG("Unknown op %u", opcode);
4186 err = cmd_status(sk, index, opcode,
4187 MGMT_STATUS_UNKNOWN_COMMAND);
4191 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4192 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4193 err = cmd_status(sk, index, opcode,
4194 MGMT_STATUS_INVALID_INDEX);
4198 handler = &mgmt_handlers[opcode];
4200 if ((handler->var_len && len < handler->data_len) ||
4201 (!handler->var_len && len != handler->data_len)) {
4202 err = cmd_status(sk, index, opcode,
4203 MGMT_STATUS_INVALID_PARAMS);
4208 mgmt_init_hdev(sk, hdev);
4210 cp = buf + sizeof(*hdr);
4212 err = handler->func(sk, hdev, cp, len);
4226 void mgmt_index_added(struct hci_dev *hdev)
4228 if (hdev->dev_type != HCI_BREDR)
4231 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4234 void mgmt_index_removed(struct hci_dev *hdev)
4236 u8 status = MGMT_STATUS_INVALID_INDEX;
4238 if (hdev->dev_type != HCI_BREDR)
4241 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4243 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4246 static void powered_complete(struct hci_dev *hdev, u8 status)
4248 struct cmd_lookup match = { NULL, hdev };
4250 BT_DBG("status 0x%02x", status);
4254 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4256 new_settings(hdev, match.sk);
4258 hci_dev_unlock(hdev);
4264 static int powered_update_hci(struct hci_dev *hdev)
4266 struct hci_request req;
4269 hci_req_init(&req, hdev);
4271 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4272 !lmp_host_ssp_capable(hdev)) {
4275 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4278 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4279 lmp_bredr_capable(hdev)) {
4280 struct hci_cp_write_le_host_supported cp;
4283 cp.simul = lmp_le_br_capable(hdev);
4285 /* Check first if we already have the right
4286 * host state (host features set)
4288 if (cp.le != lmp_host_le_capable(hdev) ||
4289 cp.simul != lmp_host_le_br_capable(hdev))
4290 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4294 if (lmp_le_capable(hdev)) {
4295 /* Set random address to static address if configured */
4296 if (bacmp(&hdev->static_addr, BDADDR_ANY))
4297 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4298 &hdev->static_addr);
4300 /* Make sure the controller has a good default for
4301 * advertising data. This also applies to the case
4302 * where BR/EDR was toggled during the AUTO_OFF phase.
4304 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4305 update_adv_data(&req);
4306 update_scan_rsp_data(&req);
4309 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4310 enable_advertising(&req);
4313 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4314 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4315 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4316 sizeof(link_sec), &link_sec);
4318 if (lmp_bredr_capable(hdev)) {
4319 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4320 set_bredr_scan(&req);
4326 return hci_req_run(&req, powered_complete);
4329 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4331 struct cmd_lookup match = { NULL, hdev };
4332 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4333 u8 zero_cod[] = { 0, 0, 0 };
4336 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4340 if (powered_update_hci(hdev) == 0)
4343 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4348 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4349 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4351 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4352 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4353 zero_cod, sizeof(zero_cod), NULL);
4356 err = new_settings(hdev, match.sk);
4364 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4366 struct pending_cmd *cmd;
4369 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4373 if (err == -ERFKILL)
4374 status = MGMT_STATUS_RFKILLED;
4376 status = MGMT_STATUS_FAILED;
4378 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4380 mgmt_pending_remove(cmd);
4383 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4385 struct hci_request req;
4389 /* When discoverable timeout triggers, then just make sure
4390 * the limited discoverable flag is cleared. Even in the case
4391 * of a timeout triggered from general discoverable, it is
4392 * safe to unconditionally clear the flag.
4394 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4395 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4397 hci_req_init(&req, hdev);
4398 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4399 u8 scan = SCAN_PAGE;
4400 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4401 sizeof(scan), &scan);
4404 update_adv_data(&req);
4405 hci_req_run(&req, NULL);
4407 hdev->discov_timeout = 0;
4409 new_settings(hdev, NULL);
4411 hci_dev_unlock(hdev);
4414 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4418 /* Nothing needed here if there's a pending command since that
4419 * commands request completion callback takes care of everything
4422 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4426 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4428 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4429 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4433 struct hci_request req;
4435 /* In case this change in discoverable was triggered by
4436 * a disabling of connectable there could be a need to
4437 * update the advertising flags.
4439 hci_req_init(&req, hdev);
4440 update_adv_data(&req);
4441 hci_req_run(&req, NULL);
4443 new_settings(hdev, NULL);
4447 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4451 /* Nothing needed here if there's a pending command since that
4452 * commands request completion callback takes care of everything
4455 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4459 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4461 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4464 new_settings(hdev, NULL);
4467 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4469 u8 mgmt_err = mgmt_status(status);
4471 if (scan & SCAN_PAGE)
4472 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4473 cmd_status_rsp, &mgmt_err);
4475 if (scan & SCAN_INQUIRY)
4476 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4477 cmd_status_rsp, &mgmt_err);
4480 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4483 struct mgmt_ev_new_link_key ev;
4485 memset(&ev, 0, sizeof(ev));
4487 ev.store_hint = persistent;
4488 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4489 ev.key.addr.type = BDADDR_BREDR;
4490 ev.key.type = key->type;
4491 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4492 ev.key.pin_len = key->pin_len;
4494 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4497 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4499 struct mgmt_ev_new_long_term_key ev;
4501 memset(&ev, 0, sizeof(ev));
4503 ev.store_hint = persistent;
4504 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4505 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4506 ev.key.authenticated = key->authenticated;
4507 ev.key.enc_size = key->enc_size;
4508 ev.key.ediv = key->ediv;
4510 if (key->type == HCI_SMP_LTK)
4513 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4514 memcpy(ev.key.val, key->val, sizeof(key->val));
4516 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4519 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4522 eir[eir_len++] = sizeof(type) + data_len;
4523 eir[eir_len++] = type;
4524 memcpy(&eir[eir_len], data, data_len);
4525 eir_len += data_len;
4530 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4531 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4535 struct mgmt_ev_device_connected *ev = (void *) buf;
4538 bacpy(&ev->addr.bdaddr, bdaddr);
4539 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4541 ev->flags = __cpu_to_le32(flags);
4544 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4547 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4548 eir_len = eir_append_data(ev->eir, eir_len,
4549 EIR_CLASS_OF_DEV, dev_class, 3);
4551 ev->eir_len = cpu_to_le16(eir_len);
4553 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4554 sizeof(*ev) + eir_len, NULL);
4557 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4559 struct mgmt_cp_disconnect *cp = cmd->param;
4560 struct sock **sk = data;
4561 struct mgmt_rp_disconnect rp;
4563 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4564 rp.addr.type = cp->addr.type;
4566 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4572 mgmt_pending_remove(cmd);
4575 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4577 struct hci_dev *hdev = data;
4578 struct mgmt_cp_unpair_device *cp = cmd->param;
4579 struct mgmt_rp_unpair_device rp;
4581 memset(&rp, 0, sizeof(rp));
4582 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4583 rp.addr.type = cp->addr.type;
4585 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4587 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4589 mgmt_pending_remove(cmd);
4592 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4593 u8 link_type, u8 addr_type, u8 reason)
4595 struct mgmt_ev_device_disconnected ev;
4596 struct sock *sk = NULL;
4598 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4600 bacpy(&ev.addr.bdaddr, bdaddr);
4601 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4604 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4609 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4613 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4614 u8 link_type, u8 addr_type, u8 status)
4616 struct mgmt_rp_disconnect rp;
4617 struct pending_cmd *cmd;
4619 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4622 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4626 bacpy(&rp.addr.bdaddr, bdaddr);
4627 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4629 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4630 mgmt_status(status), &rp, sizeof(rp));
4632 mgmt_pending_remove(cmd);
4635 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4636 u8 addr_type, u8 status)
4638 struct mgmt_ev_connect_failed ev;
4640 bacpy(&ev.addr.bdaddr, bdaddr);
4641 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4642 ev.status = mgmt_status(status);
4644 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4647 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4649 struct mgmt_ev_pin_code_request ev;
4651 bacpy(&ev.addr.bdaddr, bdaddr);
4652 ev.addr.type = BDADDR_BREDR;
4655 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4658 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4661 struct pending_cmd *cmd;
4662 struct mgmt_rp_pin_code_reply rp;
4664 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4668 bacpy(&rp.addr.bdaddr, bdaddr);
4669 rp.addr.type = BDADDR_BREDR;
4671 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4672 mgmt_status(status), &rp, sizeof(rp));
4674 mgmt_pending_remove(cmd);
4677 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4680 struct pending_cmd *cmd;
4681 struct mgmt_rp_pin_code_reply rp;
4683 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4687 bacpy(&rp.addr.bdaddr, bdaddr);
4688 rp.addr.type = BDADDR_BREDR;
4690 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4691 mgmt_status(status), &rp, sizeof(rp));
4693 mgmt_pending_remove(cmd);
4696 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4697 u8 link_type, u8 addr_type, __le32 value,
4700 struct mgmt_ev_user_confirm_request ev;
4702 BT_DBG("%s", hdev->name);
4704 bacpy(&ev.addr.bdaddr, bdaddr);
4705 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4706 ev.confirm_hint = confirm_hint;
4709 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4713 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4714 u8 link_type, u8 addr_type)
4716 struct mgmt_ev_user_passkey_request ev;
4718 BT_DBG("%s", hdev->name);
4720 bacpy(&ev.addr.bdaddr, bdaddr);
4721 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4723 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4727 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4728 u8 link_type, u8 addr_type, u8 status,
4731 struct pending_cmd *cmd;
4732 struct mgmt_rp_user_confirm_reply rp;
4735 cmd = mgmt_pending_find(opcode, hdev);
4739 bacpy(&rp.addr.bdaddr, bdaddr);
4740 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4741 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4744 mgmt_pending_remove(cmd);
4749 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4750 u8 link_type, u8 addr_type, u8 status)
4752 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4753 status, MGMT_OP_USER_CONFIRM_REPLY);
4756 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4757 u8 link_type, u8 addr_type, u8 status)
4759 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4761 MGMT_OP_USER_CONFIRM_NEG_REPLY);
4764 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4765 u8 link_type, u8 addr_type, u8 status)
4767 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4768 status, MGMT_OP_USER_PASSKEY_REPLY);
4771 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4772 u8 link_type, u8 addr_type, u8 status)
4774 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4776 MGMT_OP_USER_PASSKEY_NEG_REPLY);
4779 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4780 u8 link_type, u8 addr_type, u32 passkey,
4783 struct mgmt_ev_passkey_notify ev;
4785 BT_DBG("%s", hdev->name);
4787 bacpy(&ev.addr.bdaddr, bdaddr);
4788 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4789 ev.passkey = __cpu_to_le32(passkey);
4790 ev.entered = entered;
4792 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4795 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4796 u8 addr_type, u8 status)
4798 struct mgmt_ev_auth_failed ev;
4800 bacpy(&ev.addr.bdaddr, bdaddr);
4801 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4802 ev.status = mgmt_status(status);
4804 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4807 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4809 struct cmd_lookup match = { NULL, hdev };
4813 u8 mgmt_err = mgmt_status(status);
4814 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4815 cmd_status_rsp, &mgmt_err);
4819 if (test_bit(HCI_AUTH, &hdev->flags))
4820 changed = !test_and_set_bit(HCI_LINK_SECURITY,
4823 changed = test_and_clear_bit(HCI_LINK_SECURITY,
4826 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4830 new_settings(hdev, match.sk);
4836 static void clear_eir(struct hci_request *req)
4838 struct hci_dev *hdev = req->hdev;
4839 struct hci_cp_write_eir cp;
4841 if (!lmp_ext_inq_capable(hdev))
4844 memset(hdev->eir, 0, sizeof(hdev->eir));
4846 memset(&cp, 0, sizeof(cp));
4848 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4851 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4853 struct cmd_lookup match = { NULL, hdev };
4854 struct hci_request req;
4855 bool changed = false;
4858 u8 mgmt_err = mgmt_status(status);
4860 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4861 &hdev->dev_flags)) {
4862 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4863 new_settings(hdev, NULL);
4866 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4872 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4874 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4876 changed = test_and_clear_bit(HCI_HS_ENABLED,
4879 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4882 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4885 new_settings(hdev, match.sk);
4890 hci_req_init(&req, hdev);
4892 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4897 hci_req_run(&req, NULL);
4900 static void sk_lookup(struct pending_cmd *cmd, void *data)
4902 struct cmd_lookup *match = data;
4904 if (match->sk == NULL) {
4905 match->sk = cmd->sk;
4906 sock_hold(match->sk);
4910 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4913 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4915 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4916 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4917 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4920 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
4927 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4929 struct mgmt_cp_set_local_name ev;
4930 struct pending_cmd *cmd;
4935 memset(&ev, 0, sizeof(ev));
4936 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4937 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4939 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4941 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4943 /* If this is a HCI command related to powering on the
4944 * HCI dev don't send any mgmt signals.
4946 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4950 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4951 cmd ? cmd->sk : NULL);
4954 void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4955 u8 *randomizer, u8 status)
4957 struct pending_cmd *cmd;
4959 BT_DBG("%s status %u", hdev->name, status);
4961 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4966 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4967 mgmt_status(status));
4969 struct mgmt_rp_read_local_oob_data rp;
4971 memcpy(rp.hash, hash, sizeof(rp.hash));
4972 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4974 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4975 0, &rp, sizeof(rp));
4978 mgmt_pending_remove(cmd);
4981 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4982 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4983 ssp, u8 *eir, u16 eir_len)
4986 struct mgmt_ev_device_found *ev = (void *) buf;
4989 if (!hci_discovery_active(hdev))
4992 /* Leave 5 bytes for a potential CoD field */
4993 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4996 memset(buf, 0, sizeof(buf));
4998 bacpy(&ev->addr.bdaddr, bdaddr);
4999 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5002 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5004 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5007 memcpy(ev->eir, eir, eir_len);
5009 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5010 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5013 ev->eir_len = cpu_to_le16(eir_len);
5014 ev_size = sizeof(*ev) + eir_len;
5016 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5019 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5020 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5022 struct mgmt_ev_device_found *ev;
5023 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5026 ev = (struct mgmt_ev_device_found *) buf;
5028 memset(buf, 0, sizeof(buf));
5030 bacpy(&ev->addr.bdaddr, bdaddr);
5031 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5034 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5037 ev->eir_len = cpu_to_le16(eir_len);
5039 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5042 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5044 struct mgmt_ev_discovering ev;
5045 struct pending_cmd *cmd;
5047 BT_DBG("%s discovering %u", hdev->name, discovering);
5050 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5052 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5055 u8 type = hdev->discovery.type;
5057 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5059 mgmt_pending_remove(cmd);
5062 memset(&ev, 0, sizeof(ev));
5063 ev.type = hdev->discovery.type;
5064 ev.discovering = discovering;
5066 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5069 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5071 struct pending_cmd *cmd;
5072 struct mgmt_ev_device_blocked ev;
5074 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5076 bacpy(&ev.addr.bdaddr, bdaddr);
5077 ev.addr.type = type;
5079 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5080 cmd ? cmd->sk : NULL);
5083 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5085 struct pending_cmd *cmd;
5086 struct mgmt_ev_device_unblocked ev;
5088 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5090 bacpy(&ev.addr.bdaddr, bdaddr);
5091 ev.addr.type = type;
5093 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5094 cmd ? cmd->sk : NULL);
5097 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5099 BT_DBG("%s status %u", hdev->name, status);
5101 /* Clear the advertising mgmt setting if we failed to re-enable it */
5103 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5104 new_settings(hdev, NULL);
5108 void mgmt_reenable_advertising(struct hci_dev *hdev)
5110 struct hci_request req;
5112 if (hci_conn_num(hdev, LE_LINK) > 0)
5115 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5118 hci_req_init(&req, hdev);
5119 enable_advertising(&req);
5121 /* If this fails we have no option but to let user space know
5122 * that we've disabled advertising.
5124 if (hci_req_run(&req, adv_enable_complete) < 0) {
5125 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5126 new_settings(hdev, NULL);