2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
105 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
107 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
108 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
111 struct list_head list;
119 /* HCI to MGMT error code conversion table */
120 static u8 mgmt_status_table[] = {
122 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
123 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
124 MGMT_STATUS_FAILED, /* Hardware Failure */
125 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
126 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
127 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
128 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
129 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
130 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
131 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
132 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
133 MGMT_STATUS_BUSY, /* Command Disallowed */
134 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
135 MGMT_STATUS_REJECTED, /* Rejected Security */
136 MGMT_STATUS_REJECTED, /* Rejected Personal */
137 MGMT_STATUS_TIMEOUT, /* Host Timeout */
138 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
139 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
140 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
141 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
142 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
143 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
144 MGMT_STATUS_BUSY, /* Repeated Attempts */
145 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
146 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
147 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
148 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
149 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
150 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
151 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
152 MGMT_STATUS_FAILED, /* Unspecified Error */
153 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
154 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
155 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
156 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
157 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
158 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
159 MGMT_STATUS_FAILED, /* Unit Link Key Used */
160 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
161 MGMT_STATUS_TIMEOUT, /* Instant Passed */
162 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
163 MGMT_STATUS_FAILED, /* Transaction Collision */
164 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
165 MGMT_STATUS_REJECTED, /* QoS Rejected */
166 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
167 MGMT_STATUS_REJECTED, /* Insufficient Security */
168 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
169 MGMT_STATUS_BUSY, /* Role Switch Pending */
170 MGMT_STATUS_FAILED, /* Slot Violation */
171 MGMT_STATUS_FAILED, /* Role Switch Failed */
172 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
173 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
174 MGMT_STATUS_BUSY, /* Host Busy Pairing */
175 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
176 MGMT_STATUS_BUSY, /* Controller Busy */
177 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
178 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
179 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
180 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
181 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
184 bool mgmt_valid_hdev(struct hci_dev *hdev)
186 return hdev->dev_type == HCI_BREDR;
189 static u8 mgmt_status(u8 hci_status)
191 if (hci_status < ARRAY_SIZE(mgmt_status_table))
192 return mgmt_status_table[hci_status];
194 return MGMT_STATUS_FAILED;
197 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
200 struct mgmt_hdr *hdr;
201 struct mgmt_ev_cmd_status *ev;
204 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
206 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
210 hdr = (void *) skb_put(skb, sizeof(*hdr));
212 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
213 hdr->index = cpu_to_le16(index);
214 hdr->len = cpu_to_le16(sizeof(*ev));
216 ev = (void *) skb_put(skb, sizeof(*ev));
218 ev->opcode = cpu_to_le16(cmd);
220 err = sock_queue_rcv_skb(sk, skb);
227 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
228 void *rp, size_t rp_len)
231 struct mgmt_hdr *hdr;
232 struct mgmt_ev_cmd_complete *ev;
235 BT_DBG("sock %p", sk);
237 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
241 hdr = (void *) skb_put(skb, sizeof(*hdr));
243 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
244 hdr->index = cpu_to_le16(index);
245 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
247 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
248 ev->opcode = cpu_to_le16(cmd);
252 memcpy(ev->data, rp, rp_len);
254 err = sock_queue_rcv_skb(sk, skb);
261 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
264 struct mgmt_rp_read_version rp;
266 BT_DBG("sock %p", sk);
268 rp.version = MGMT_VERSION;
269 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
271 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
275 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
278 struct mgmt_rp_read_commands *rp;
279 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
280 const u16 num_events = ARRAY_SIZE(mgmt_events);
285 BT_DBG("sock %p", sk);
287 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
289 rp = kmalloc(rp_size, GFP_KERNEL);
293 rp->num_commands = __constant_cpu_to_le16(num_commands);
294 rp->num_events = __constant_cpu_to_le16(num_events);
296 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
297 put_unaligned_le16(mgmt_commands[i], opcode);
299 for (i = 0; i < num_events; i++, opcode++)
300 put_unaligned_le16(mgmt_events[i], opcode);
302 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
309 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
312 struct mgmt_rp_read_index_list *rp;
318 BT_DBG("sock %p", sk);
320 read_lock(&hci_dev_list_lock);
323 list_for_each_entry(d, &hci_dev_list, list) {
324 if (!mgmt_valid_hdev(d))
330 rp_len = sizeof(*rp) + (2 * count);
331 rp = kmalloc(rp_len, GFP_ATOMIC);
333 read_unlock(&hci_dev_list_lock);
338 list_for_each_entry(d, &hci_dev_list, list) {
339 if (test_bit(HCI_SETUP, &d->dev_flags))
342 if (!mgmt_valid_hdev(d))
345 rp->index[count++] = cpu_to_le16(d->id);
346 BT_DBG("Added hci%u", d->id);
349 rp->num_controllers = cpu_to_le16(count);
350 rp_len = sizeof(*rp) + (2 * count);
352 read_unlock(&hci_dev_list_lock);
354 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
362 static u32 get_supported_settings(struct hci_dev *hdev)
366 settings |= MGMT_SETTING_POWERED;
367 settings |= MGMT_SETTING_PAIRABLE;
369 if (lmp_ssp_capable(hdev))
370 settings |= MGMT_SETTING_SSP;
372 if (lmp_bredr_capable(hdev)) {
373 settings |= MGMT_SETTING_CONNECTABLE;
374 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
375 settings |= MGMT_SETTING_FAST_CONNECTABLE;
376 settings |= MGMT_SETTING_DISCOVERABLE;
377 settings |= MGMT_SETTING_BREDR;
378 settings |= MGMT_SETTING_LINK_SECURITY;
382 settings |= MGMT_SETTING_HS;
384 if (lmp_le_capable(hdev))
385 settings |= MGMT_SETTING_LE;
390 static u32 get_current_settings(struct hci_dev *hdev)
394 if (hdev_is_powered(hdev))
395 settings |= MGMT_SETTING_POWERED;
397 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
398 settings |= MGMT_SETTING_CONNECTABLE;
400 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
401 settings |= MGMT_SETTING_FAST_CONNECTABLE;
403 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
404 settings |= MGMT_SETTING_DISCOVERABLE;
406 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
407 settings |= MGMT_SETTING_PAIRABLE;
409 if (lmp_bredr_capable(hdev))
410 settings |= MGMT_SETTING_BREDR;
412 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
413 settings |= MGMT_SETTING_LE;
415 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
416 settings |= MGMT_SETTING_LINK_SECURITY;
418 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
419 settings |= MGMT_SETTING_SSP;
421 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_HS;
427 #define PNP_INFO_SVCLASS_ID 0x1200
429 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
431 u8 *ptr = data, *uuids_start = NULL;
432 struct bt_uuid *uuid;
437 list_for_each_entry(uuid, &hdev->uuids, list) {
440 if (uuid->size != 16)
443 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
447 if (uuid16 == PNP_INFO_SVCLASS_ID)
453 uuids_start[1] = EIR_UUID16_ALL;
457 /* Stop if not enough space to put next UUID */
458 if ((ptr - data) + sizeof(u16) > len) {
459 uuids_start[1] = EIR_UUID16_SOME;
463 *ptr++ = (uuid16 & 0x00ff);
464 *ptr++ = (uuid16 & 0xff00) >> 8;
465 uuids_start[0] += sizeof(uuid16);
471 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473 u8 *ptr = data, *uuids_start = NULL;
474 struct bt_uuid *uuid;
479 list_for_each_entry(uuid, &hdev->uuids, list) {
480 if (uuid->size != 32)
486 uuids_start[1] = EIR_UUID32_ALL;
490 /* Stop if not enough space to put next UUID */
491 if ((ptr - data) + sizeof(u32) > len) {
492 uuids_start[1] = EIR_UUID32_SOME;
496 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
498 uuids_start[0] += sizeof(u32);
504 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
506 u8 *ptr = data, *uuids_start = NULL;
507 struct bt_uuid *uuid;
512 list_for_each_entry(uuid, &hdev->uuids, list) {
513 if (uuid->size != 128)
519 uuids_start[1] = EIR_UUID128_ALL;
523 /* Stop if not enough space to put next UUID */
524 if ((ptr - data) + 16 > len) {
525 uuids_start[1] = EIR_UUID128_SOME;
529 memcpy(ptr, uuid->uuid, 16);
531 uuids_start[0] += 16;
537 static void create_eir(struct hci_dev *hdev, u8 *data)
542 name_len = strlen(hdev->dev_name);
548 ptr[1] = EIR_NAME_SHORT;
550 ptr[1] = EIR_NAME_COMPLETE;
552 /* EIR Data length */
553 ptr[0] = name_len + 1;
555 memcpy(ptr + 2, hdev->dev_name, name_len);
557 ptr += (name_len + 2);
560 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
562 ptr[1] = EIR_TX_POWER;
563 ptr[2] = (u8) hdev->inq_tx_power;
568 if (hdev->devid_source > 0) {
570 ptr[1] = EIR_DEVICE_ID;
572 put_unaligned_le16(hdev->devid_source, ptr + 2);
573 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
574 put_unaligned_le16(hdev->devid_product, ptr + 6);
575 put_unaligned_le16(hdev->devid_version, ptr + 8);
580 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
581 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
582 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
585 static void update_eir(struct hci_request *req)
587 struct hci_dev *hdev = req->hdev;
588 struct hci_cp_write_eir cp;
590 if (!hdev_is_powered(hdev))
593 if (!lmp_ext_inq_capable(hdev))
596 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
599 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
602 memset(&cp, 0, sizeof(cp));
604 create_eir(hdev, cp.data);
606 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
609 memcpy(hdev->eir, cp.data, sizeof(cp.data));
611 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
614 static u8 get_service_classes(struct hci_dev *hdev)
616 struct bt_uuid *uuid;
619 list_for_each_entry(uuid, &hdev->uuids, list)
620 val |= uuid->svc_hint;
625 static void update_class(struct hci_request *req)
627 struct hci_dev *hdev = req->hdev;
630 BT_DBG("%s", hdev->name);
632 if (!hdev_is_powered(hdev))
635 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
638 cod[0] = hdev->minor_class;
639 cod[1] = hdev->major_class;
640 cod[2] = get_service_classes(hdev);
642 if (memcmp(cod, hdev->dev_class, 3) == 0)
645 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
648 static void service_cache_off(struct work_struct *work)
650 struct hci_dev *hdev = container_of(work, struct hci_dev,
652 struct hci_request req;
654 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
657 hci_req_init(&req, hdev);
664 hci_dev_unlock(hdev);
666 hci_req_run(&req, NULL);
669 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
671 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
674 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
676 /* Non-mgmt controlled devices get this bit set
677 * implicitly so that pairing works for them, however
678 * for mgmt we require user-space to explicitly enable
681 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
684 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
685 void *data, u16 data_len)
687 struct mgmt_rp_read_info rp;
689 BT_DBG("sock %p %s", sk, hdev->name);
693 memset(&rp, 0, sizeof(rp));
695 bacpy(&rp.bdaddr, &hdev->bdaddr);
697 rp.version = hdev->hci_ver;
698 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
700 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
701 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
703 memcpy(rp.dev_class, hdev->dev_class, 3);
705 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
706 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
708 hci_dev_unlock(hdev);
710 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
714 static void mgmt_pending_free(struct pending_cmd *cmd)
721 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
722 struct hci_dev *hdev, void *data,
725 struct pending_cmd *cmd;
727 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
731 cmd->opcode = opcode;
732 cmd->index = hdev->id;
734 cmd->param = kmalloc(len, GFP_KERNEL);
741 memcpy(cmd->param, data, len);
746 list_add(&cmd->list, &hdev->mgmt_pending);
751 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
752 void (*cb)(struct pending_cmd *cmd,
756 struct pending_cmd *cmd, *tmp;
758 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
759 if (opcode > 0 && cmd->opcode != opcode)
766 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
768 struct pending_cmd *cmd;
770 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
771 if (cmd->opcode == opcode)
778 static void mgmt_pending_remove(struct pending_cmd *cmd)
780 list_del(&cmd->list);
781 mgmt_pending_free(cmd);
784 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
786 __le32 settings = cpu_to_le32(get_current_settings(hdev));
788 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
792 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
795 struct mgmt_mode *cp = data;
796 struct pending_cmd *cmd;
799 BT_DBG("request for %s", hdev->name);
801 if (cp->val != 0x00 && cp->val != 0x01)
802 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
803 MGMT_STATUS_INVALID_PARAMS);
807 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
808 cancel_delayed_work(&hdev->power_off);
811 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
813 err = mgmt_powered(hdev, 1);
818 if (!!cp->val == hdev_is_powered(hdev)) {
819 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
823 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
824 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
829 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
836 queue_work(hdev->req_workqueue, &hdev->power_on);
838 queue_work(hdev->req_workqueue, &hdev->power_off.work);
843 hci_dev_unlock(hdev);
847 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
848 struct sock *skip_sk)
851 struct mgmt_hdr *hdr;
853 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
857 hdr = (void *) skb_put(skb, sizeof(*hdr));
858 hdr->opcode = cpu_to_le16(event);
860 hdr->index = cpu_to_le16(hdev->id);
862 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
863 hdr->len = cpu_to_le16(data_len);
866 memcpy(skb_put(skb, data_len), data, data_len);
869 __net_timestamp(skb);
871 hci_send_to_control(skb, skip_sk);
877 static int new_settings(struct hci_dev *hdev, struct sock *skip)
881 ev = cpu_to_le32(get_current_settings(hdev));
883 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
886 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
889 struct mgmt_cp_set_discoverable *cp = data;
890 struct pending_cmd *cmd;
895 BT_DBG("request for %s", hdev->name);
897 if (!lmp_bredr_capable(hdev))
898 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
899 MGMT_STATUS_NOT_SUPPORTED);
901 if (cp->val != 0x00 && cp->val != 0x01)
902 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
903 MGMT_STATUS_INVALID_PARAMS);
905 timeout = __le16_to_cpu(cp->timeout);
906 if (!cp->val && timeout > 0)
907 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
908 MGMT_STATUS_INVALID_PARAMS);
912 if (!hdev_is_powered(hdev) && timeout > 0) {
913 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
914 MGMT_STATUS_NOT_POWERED);
918 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
919 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
920 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
925 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
926 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
927 MGMT_STATUS_REJECTED);
931 if (!hdev_is_powered(hdev)) {
932 bool changed = false;
934 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
935 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
939 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
944 err = new_settings(hdev, sk);
949 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
950 if (hdev->discov_timeout > 0) {
951 cancel_delayed_work(&hdev->discov_off);
952 hdev->discov_timeout = 0;
955 if (cp->val && timeout > 0) {
956 hdev->discov_timeout = timeout;
957 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
958 msecs_to_jiffies(hdev->discov_timeout * 1000));
961 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
965 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
974 scan |= SCAN_INQUIRY;
976 cancel_delayed_work(&hdev->discov_off);
978 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
980 mgmt_pending_remove(cmd);
983 hdev->discov_timeout = timeout;
986 hci_dev_unlock(hdev);
990 static void write_fast_connectable(struct hci_request *req, bool enable)
992 struct hci_dev *hdev = req->hdev;
993 struct hci_cp_write_page_scan_activity acp;
996 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1000 type = PAGE_SCAN_TYPE_INTERLACED;
1002 /* 160 msec page scan interval */
1003 acp.interval = __constant_cpu_to_le16(0x0100);
1005 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1007 /* default 1.28 sec page scan */
1008 acp.interval = __constant_cpu_to_le16(0x0800);
1011 acp.window = __constant_cpu_to_le16(0x0012);
1013 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1014 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1015 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1018 if (hdev->page_scan_type != type)
1019 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1022 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1024 struct pending_cmd *cmd;
1026 BT_DBG("status 0x%02x", status);
1030 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1034 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1036 mgmt_pending_remove(cmd);
1039 hci_dev_unlock(hdev);
1042 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1045 struct mgmt_mode *cp = data;
1046 struct pending_cmd *cmd;
1047 struct hci_request req;
1051 BT_DBG("request for %s", hdev->name);
1053 if (!lmp_bredr_capable(hdev))
1054 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1055 MGMT_STATUS_NOT_SUPPORTED);
1057 if (cp->val != 0x00 && cp->val != 0x01)
1058 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1059 MGMT_STATUS_INVALID_PARAMS);
1063 if (!hdev_is_powered(hdev)) {
1064 bool changed = false;
1066 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1070 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1072 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1073 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1076 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1081 err = new_settings(hdev, sk);
1086 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1087 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1088 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1093 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1094 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1098 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1109 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1110 hdev->discov_timeout > 0)
1111 cancel_delayed_work(&hdev->discov_off);
1114 hci_req_init(&req, hdev);
1116 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1118 /* If we're going from non-connectable to connectable or
1119 * vice-versa when fast connectable is enabled ensure that fast
1120 * connectable gets disabled. write_fast_connectable won't do
1121 * anything if the page scan parameters are already what they
1124 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1125 write_fast_connectable(&req, false);
1127 err = hci_req_run(&req, set_connectable_complete);
1129 mgmt_pending_remove(cmd);
1132 hci_dev_unlock(hdev);
1136 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1139 struct mgmt_mode *cp = data;
1142 BT_DBG("request for %s", hdev->name);
1144 if (cp->val != 0x00 && cp->val != 0x01)
1145 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1146 MGMT_STATUS_INVALID_PARAMS);
1151 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1153 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1155 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1159 err = new_settings(hdev, sk);
1162 hci_dev_unlock(hdev);
1166 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1169 struct mgmt_mode *cp = data;
1170 struct pending_cmd *cmd;
1174 BT_DBG("request for %s", hdev->name);
1176 if (!lmp_bredr_capable(hdev))
1177 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1178 MGMT_STATUS_NOT_SUPPORTED);
1180 if (cp->val != 0x00 && cp->val != 0x01)
1181 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1182 MGMT_STATUS_INVALID_PARAMS);
1186 if (!hdev_is_powered(hdev)) {
1187 bool changed = false;
1189 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1190 &hdev->dev_flags)) {
1191 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1195 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1200 err = new_settings(hdev, sk);
1205 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1206 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1213 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1214 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1218 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1224 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1226 mgmt_pending_remove(cmd);
1231 hci_dev_unlock(hdev);
1235 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1237 struct mgmt_mode *cp = data;
1238 struct pending_cmd *cmd;
1242 BT_DBG("request for %s", hdev->name);
1244 if (!lmp_ssp_capable(hdev))
1245 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1246 MGMT_STATUS_NOT_SUPPORTED);
1248 if (cp->val != 0x00 && cp->val != 0x01)
1249 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1250 MGMT_STATUS_INVALID_PARAMS);
1256 if (!hdev_is_powered(hdev)) {
1257 bool changed = false;
1259 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1260 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1264 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1269 err = new_settings(hdev, sk);
1274 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1275 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1280 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1281 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1285 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1291 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1293 mgmt_pending_remove(cmd);
1298 hci_dev_unlock(hdev);
1302 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1304 struct mgmt_mode *cp = data;
1306 BT_DBG("request for %s", hdev->name);
1309 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1310 MGMT_STATUS_NOT_SUPPORTED);
1312 if (cp->val != 0x00 && cp->val != 0x01)
1313 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1314 MGMT_STATUS_INVALID_PARAMS);
1317 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1319 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1321 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1324 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1326 struct mgmt_mode *cp = data;
1327 struct hci_cp_write_le_host_supported hci_cp;
1328 struct pending_cmd *cmd;
1332 BT_DBG("request for %s", hdev->name);
1334 if (!lmp_le_capable(hdev))
1335 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1336 MGMT_STATUS_NOT_SUPPORTED);
1338 if (cp->val != 0x00 && cp->val != 0x01)
1339 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1340 MGMT_STATUS_INVALID_PARAMS);
1342 /* LE-only devices do not allow toggling LE on/off */
1343 if (!lmp_bredr_capable(hdev))
1344 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1345 MGMT_STATUS_REJECTED);
1350 enabled = lmp_host_le_capable(hdev);
1352 if (!hdev_is_powered(hdev) || val == enabled) {
1353 bool changed = false;
1355 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1356 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1360 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1365 err = new_settings(hdev, sk);
1370 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1371 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1376 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1382 memset(&hci_cp, 0, sizeof(hci_cp));
1386 hci_cp.simul = lmp_le_br_capable(hdev);
1389 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1392 mgmt_pending_remove(cmd);
1395 hci_dev_unlock(hdev);
1399 /* This is a helper function to test for pending mgmt commands that can
1400 * cause CoD or EIR HCI commands. We can only allow one such pending
1401 * mgmt command at a time since otherwise we cannot easily track what
1402 * the current values are, will be, and based on that calculate if a new
1403 * HCI command needs to be sent and if yes with what value.
1405 static bool pending_eir_or_class(struct hci_dev *hdev)
1407 struct pending_cmd *cmd;
1409 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1410 switch (cmd->opcode) {
1411 case MGMT_OP_ADD_UUID:
1412 case MGMT_OP_REMOVE_UUID:
1413 case MGMT_OP_SET_DEV_CLASS:
1414 case MGMT_OP_SET_POWERED:
1422 static const u8 bluetooth_base_uuid[] = {
1423 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1424 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1427 static u8 get_uuid_size(const u8 *uuid)
1431 if (memcmp(uuid, bluetooth_base_uuid, 12))
1434 val = get_unaligned_le32(&uuid[12]);
1441 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1443 struct pending_cmd *cmd;
1447 cmd = mgmt_pending_find(mgmt_op, hdev);
1451 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1452 hdev->dev_class, 3);
1454 mgmt_pending_remove(cmd);
1457 hci_dev_unlock(hdev);
1460 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1462 BT_DBG("status 0x%02x", status);
1464 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1467 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1469 struct mgmt_cp_add_uuid *cp = data;
1470 struct pending_cmd *cmd;
1471 struct hci_request req;
1472 struct bt_uuid *uuid;
1475 BT_DBG("request for %s", hdev->name);
1479 if (pending_eir_or_class(hdev)) {
1480 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1485 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1491 memcpy(uuid->uuid, cp->uuid, 16);
1492 uuid->svc_hint = cp->svc_hint;
1493 uuid->size = get_uuid_size(cp->uuid);
1495 list_add_tail(&uuid->list, &hdev->uuids);
1497 hci_req_init(&req, hdev);
1502 err = hci_req_run(&req, add_uuid_complete);
1504 if (err != -ENODATA)
1507 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1508 hdev->dev_class, 3);
1512 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1521 hci_dev_unlock(hdev);
1525 static bool enable_service_cache(struct hci_dev *hdev)
1527 if (!hdev_is_powered(hdev))
1530 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1531 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1539 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1541 BT_DBG("status 0x%02x", status);
1543 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1546 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1549 struct mgmt_cp_remove_uuid *cp = data;
1550 struct pending_cmd *cmd;
1551 struct bt_uuid *match, *tmp;
1552 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1553 struct hci_request req;
1556 BT_DBG("request for %s", hdev->name);
1560 if (pending_eir_or_class(hdev)) {
1561 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1566 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1567 err = hci_uuids_clear(hdev);
1569 if (enable_service_cache(hdev)) {
1570 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1571 0, hdev->dev_class, 3);
1580 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1581 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1584 list_del(&match->list);
1590 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1591 MGMT_STATUS_INVALID_PARAMS);
1596 hci_req_init(&req, hdev);
1601 err = hci_req_run(&req, remove_uuid_complete);
1603 if (err != -ENODATA)
1606 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1607 hdev->dev_class, 3);
1611 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1620 hci_dev_unlock(hdev);
1624 static void set_class_complete(struct hci_dev *hdev, u8 status)
1626 BT_DBG("status 0x%02x", status);
1628 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1631 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1634 struct mgmt_cp_set_dev_class *cp = data;
1635 struct pending_cmd *cmd;
1636 struct hci_request req;
1639 BT_DBG("request for %s", hdev->name);
1641 if (!lmp_bredr_capable(hdev))
1642 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1643 MGMT_STATUS_NOT_SUPPORTED);
1647 if (pending_eir_or_class(hdev)) {
1648 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1653 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1654 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1655 MGMT_STATUS_INVALID_PARAMS);
1659 hdev->major_class = cp->major;
1660 hdev->minor_class = cp->minor;
1662 if (!hdev_is_powered(hdev)) {
1663 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1664 hdev->dev_class, 3);
1668 hci_req_init(&req, hdev);
1670 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1671 hci_dev_unlock(hdev);
1672 cancel_delayed_work_sync(&hdev->service_cache);
1679 err = hci_req_run(&req, set_class_complete);
1681 if (err != -ENODATA)
1684 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1685 hdev->dev_class, 3);
1689 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1698 hci_dev_unlock(hdev);
1702 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1705 struct mgmt_cp_load_link_keys *cp = data;
1706 u16 key_count, expected_len;
1709 key_count = __le16_to_cpu(cp->key_count);
1711 expected_len = sizeof(*cp) + key_count *
1712 sizeof(struct mgmt_link_key_info);
1713 if (expected_len != len) {
1714 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1716 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1717 MGMT_STATUS_INVALID_PARAMS);
1720 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1721 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1722 MGMT_STATUS_INVALID_PARAMS);
1724 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1727 for (i = 0; i < key_count; i++) {
1728 struct mgmt_link_key_info *key = &cp->keys[i];
1730 if (key->addr.type != BDADDR_BREDR)
1731 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1732 MGMT_STATUS_INVALID_PARAMS);
1737 hci_link_keys_clear(hdev);
1740 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1742 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1744 for (i = 0; i < key_count; i++) {
1745 struct mgmt_link_key_info *key = &cp->keys[i];
1747 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1748 key->type, key->pin_len);
1751 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1753 hci_dev_unlock(hdev);
1758 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1759 u8 addr_type, struct sock *skip_sk)
1761 struct mgmt_ev_device_unpaired ev;
1763 bacpy(&ev.addr.bdaddr, bdaddr);
1764 ev.addr.type = addr_type;
1766 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1770 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1773 struct mgmt_cp_unpair_device *cp = data;
1774 struct mgmt_rp_unpair_device rp;
1775 struct hci_cp_disconnect dc;
1776 struct pending_cmd *cmd;
1777 struct hci_conn *conn;
1780 memset(&rp, 0, sizeof(rp));
1781 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1782 rp.addr.type = cp->addr.type;
1784 if (!bdaddr_type_is_valid(cp->addr.type))
1785 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1786 MGMT_STATUS_INVALID_PARAMS,
1789 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1790 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1791 MGMT_STATUS_INVALID_PARAMS,
1796 if (!hdev_is_powered(hdev)) {
1797 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1798 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1802 if (cp->addr.type == BDADDR_BREDR)
1803 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1805 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1808 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1809 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1813 if (cp->disconnect) {
1814 if (cp->addr.type == BDADDR_BREDR)
1815 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1818 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1825 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1827 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1831 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1838 dc.handle = cpu_to_le16(conn->handle);
1839 dc.reason = 0x13; /* Remote User Terminated Connection */
1840 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1842 mgmt_pending_remove(cmd);
1845 hci_dev_unlock(hdev);
1849 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1852 struct mgmt_cp_disconnect *cp = data;
1853 struct mgmt_rp_disconnect rp;
1854 struct hci_cp_disconnect dc;
1855 struct pending_cmd *cmd;
1856 struct hci_conn *conn;
1861 memset(&rp, 0, sizeof(rp));
1862 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1863 rp.addr.type = cp->addr.type;
1865 if (!bdaddr_type_is_valid(cp->addr.type))
1866 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1867 MGMT_STATUS_INVALID_PARAMS,
1872 if (!test_bit(HCI_UP, &hdev->flags)) {
1873 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1874 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1878 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1879 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1880 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1884 if (cp->addr.type == BDADDR_BREDR)
1885 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1888 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1890 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1891 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1892 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1896 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1902 dc.handle = cpu_to_le16(conn->handle);
1903 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1905 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1907 mgmt_pending_remove(cmd);
1910 hci_dev_unlock(hdev);
1914 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1916 switch (link_type) {
1918 switch (addr_type) {
1919 case ADDR_LE_DEV_PUBLIC:
1920 return BDADDR_LE_PUBLIC;
1923 /* Fallback to LE Random address type */
1924 return BDADDR_LE_RANDOM;
1928 /* Fallback to BR/EDR type */
1929 return BDADDR_BREDR;
1933 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1936 struct mgmt_rp_get_connections *rp;
1946 if (!hdev_is_powered(hdev)) {
1947 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1948 MGMT_STATUS_NOT_POWERED);
1953 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1954 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1958 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1959 rp = kmalloc(rp_len, GFP_KERNEL);
1966 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1967 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1969 bacpy(&rp->addr[i].bdaddr, &c->dst);
1970 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1971 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1976 rp->conn_count = cpu_to_le16(i);
1978 /* Recalculate length in case of filtered SCO connections, etc */
1979 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1981 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1987 hci_dev_unlock(hdev);
1991 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1992 struct mgmt_cp_pin_code_neg_reply *cp)
1994 struct pending_cmd *cmd;
1997 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2002 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2003 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2005 mgmt_pending_remove(cmd);
2010 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2013 struct hci_conn *conn;
2014 struct mgmt_cp_pin_code_reply *cp = data;
2015 struct hci_cp_pin_code_reply reply;
2016 struct pending_cmd *cmd;
2023 if (!hdev_is_powered(hdev)) {
2024 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2025 MGMT_STATUS_NOT_POWERED);
2029 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2031 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2032 MGMT_STATUS_NOT_CONNECTED);
2036 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2037 struct mgmt_cp_pin_code_neg_reply ncp;
2039 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2041 BT_ERR("PIN code is not 16 bytes long");
2043 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2045 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2046 MGMT_STATUS_INVALID_PARAMS);
2051 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2057 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2058 reply.pin_len = cp->pin_len;
2059 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2061 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2063 mgmt_pending_remove(cmd);
2066 hci_dev_unlock(hdev);
2070 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2073 struct mgmt_cp_set_io_capability *cp = data;
2079 hdev->io_capability = cp->io_capability;
2081 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2082 hdev->io_capability);
2084 hci_dev_unlock(hdev);
2086 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2090 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2092 struct hci_dev *hdev = conn->hdev;
2093 struct pending_cmd *cmd;
2095 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2096 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2099 if (cmd->user_data != conn)
2108 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2110 struct mgmt_rp_pair_device rp;
2111 struct hci_conn *conn = cmd->user_data;
2113 bacpy(&rp.addr.bdaddr, &conn->dst);
2114 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2116 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2119 /* So we don't get further callbacks for this connection */
2120 conn->connect_cfm_cb = NULL;
2121 conn->security_cfm_cb = NULL;
2122 conn->disconn_cfm_cb = NULL;
2124 hci_conn_drop(conn);
2126 mgmt_pending_remove(cmd);
2129 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2131 struct pending_cmd *cmd;
2133 BT_DBG("status %u", status);
2135 cmd = find_pairing(conn);
2137 BT_DBG("Unable to find a pending command");
2139 pairing_complete(cmd, mgmt_status(status));
2142 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2144 struct pending_cmd *cmd;
2146 BT_DBG("status %u", status);
2151 cmd = find_pairing(conn);
2153 BT_DBG("Unable to find a pending command");
2155 pairing_complete(cmd, mgmt_status(status));
2158 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2161 struct mgmt_cp_pair_device *cp = data;
2162 struct mgmt_rp_pair_device rp;
2163 struct pending_cmd *cmd;
2164 u8 sec_level, auth_type;
2165 struct hci_conn *conn;
2170 memset(&rp, 0, sizeof(rp));
2171 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2172 rp.addr.type = cp->addr.type;
2174 if (!bdaddr_type_is_valid(cp->addr.type))
2175 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2176 MGMT_STATUS_INVALID_PARAMS,
2181 if (!hdev_is_powered(hdev)) {
2182 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2183 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2187 sec_level = BT_SECURITY_MEDIUM;
2188 if (cp->io_cap == 0x03)
2189 auth_type = HCI_AT_DEDICATED_BONDING;
2191 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2193 if (cp->addr.type == BDADDR_BREDR)
2194 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2195 cp->addr.type, sec_level, auth_type);
2197 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2198 cp->addr.type, sec_level, auth_type);
2203 if (PTR_ERR(conn) == -EBUSY)
2204 status = MGMT_STATUS_BUSY;
2206 status = MGMT_STATUS_CONNECT_FAILED;
2208 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2214 if (conn->connect_cfm_cb) {
2215 hci_conn_drop(conn);
2216 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2217 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2221 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2224 hci_conn_drop(conn);
2228 /* For LE, just connecting isn't a proof that the pairing finished */
2229 if (cp->addr.type == BDADDR_BREDR)
2230 conn->connect_cfm_cb = pairing_complete_cb;
2232 conn->connect_cfm_cb = le_connect_complete_cb;
2234 conn->security_cfm_cb = pairing_complete_cb;
2235 conn->disconn_cfm_cb = pairing_complete_cb;
2236 conn->io_capability = cp->io_cap;
2237 cmd->user_data = conn;
2239 if (conn->state == BT_CONNECTED &&
2240 hci_conn_security(conn, sec_level, auth_type))
2241 pairing_complete(cmd, 0);
2246 hci_dev_unlock(hdev);
2250 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2253 struct mgmt_addr_info *addr = data;
2254 struct pending_cmd *cmd;
2255 struct hci_conn *conn;
2262 if (!hdev_is_powered(hdev)) {
2263 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2264 MGMT_STATUS_NOT_POWERED);
2268 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2270 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2271 MGMT_STATUS_INVALID_PARAMS);
2275 conn = cmd->user_data;
2277 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2278 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2279 MGMT_STATUS_INVALID_PARAMS);
2283 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2285 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2286 addr, sizeof(*addr));
2288 hci_dev_unlock(hdev);
2292 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2293 struct mgmt_addr_info *addr, u16 mgmt_op,
2294 u16 hci_op, __le32 passkey)
2296 struct pending_cmd *cmd;
2297 struct hci_conn *conn;
2302 if (!hdev_is_powered(hdev)) {
2303 err = cmd_complete(sk, hdev->id, mgmt_op,
2304 MGMT_STATUS_NOT_POWERED, addr,
2309 if (addr->type == BDADDR_BREDR)
2310 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2312 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2315 err = cmd_complete(sk, hdev->id, mgmt_op,
2316 MGMT_STATUS_NOT_CONNECTED, addr,
2321 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2322 /* Continue with pairing via SMP */
2323 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2326 err = cmd_complete(sk, hdev->id, mgmt_op,
2327 MGMT_STATUS_SUCCESS, addr,
2330 err = cmd_complete(sk, hdev->id, mgmt_op,
2331 MGMT_STATUS_FAILED, addr,
2337 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2343 /* Continue with pairing via HCI */
2344 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2345 struct hci_cp_user_passkey_reply cp;
2347 bacpy(&cp.bdaddr, &addr->bdaddr);
2348 cp.passkey = passkey;
2349 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2351 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2355 mgmt_pending_remove(cmd);
2358 hci_dev_unlock(hdev);
2362 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2363 void *data, u16 len)
2365 struct mgmt_cp_pin_code_neg_reply *cp = data;
2369 return user_pairing_resp(sk, hdev, &cp->addr,
2370 MGMT_OP_PIN_CODE_NEG_REPLY,
2371 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2374 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2377 struct mgmt_cp_user_confirm_reply *cp = data;
2381 if (len != sizeof(*cp))
2382 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2383 MGMT_STATUS_INVALID_PARAMS);
2385 return user_pairing_resp(sk, hdev, &cp->addr,
2386 MGMT_OP_USER_CONFIRM_REPLY,
2387 HCI_OP_USER_CONFIRM_REPLY, 0);
2390 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2391 void *data, u16 len)
2393 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2397 return user_pairing_resp(sk, hdev, &cp->addr,
2398 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2399 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2402 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2405 struct mgmt_cp_user_passkey_reply *cp = data;
2409 return user_pairing_resp(sk, hdev, &cp->addr,
2410 MGMT_OP_USER_PASSKEY_REPLY,
2411 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2414 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2415 void *data, u16 len)
2417 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2421 return user_pairing_resp(sk, hdev, &cp->addr,
2422 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2423 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2426 static void update_name(struct hci_request *req)
2428 struct hci_dev *hdev = req->hdev;
2429 struct hci_cp_write_local_name cp;
2431 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2433 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2436 static void set_name_complete(struct hci_dev *hdev, u8 status)
2438 struct mgmt_cp_set_local_name *cp;
2439 struct pending_cmd *cmd;
2441 BT_DBG("status 0x%02x", status);
2445 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2452 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2453 mgmt_status(status));
2455 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2458 mgmt_pending_remove(cmd);
2461 hci_dev_unlock(hdev);
2464 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2467 struct mgmt_cp_set_local_name *cp = data;
2468 struct pending_cmd *cmd;
2469 struct hci_request req;
2476 /* If the old values are the same as the new ones just return a
2477 * direct command complete event.
2479 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2480 !memcmp(hdev->short_name, cp->short_name,
2481 sizeof(hdev->short_name))) {
2482 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2487 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2489 if (!hdev_is_powered(hdev)) {
2490 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2492 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2497 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2503 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2509 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2511 hci_req_init(&req, hdev);
2513 if (lmp_bredr_capable(hdev)) {
2518 if (lmp_le_capable(hdev))
2519 hci_update_ad(&req);
2521 err = hci_req_run(&req, set_name_complete);
2523 mgmt_pending_remove(cmd);
2526 hci_dev_unlock(hdev);
2530 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2531 void *data, u16 data_len)
2533 struct pending_cmd *cmd;
2536 BT_DBG("%s", hdev->name);
2540 if (!hdev_is_powered(hdev)) {
2541 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2542 MGMT_STATUS_NOT_POWERED);
2546 if (!lmp_ssp_capable(hdev)) {
2547 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2548 MGMT_STATUS_NOT_SUPPORTED);
2552 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2553 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2558 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2564 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2566 mgmt_pending_remove(cmd);
2569 hci_dev_unlock(hdev);
2573 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2574 void *data, u16 len)
2576 struct mgmt_cp_add_remote_oob_data *cp = data;
2580 BT_DBG("%s ", hdev->name);
2584 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2587 status = MGMT_STATUS_FAILED;
2589 status = MGMT_STATUS_SUCCESS;
2591 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2592 &cp->addr, sizeof(cp->addr));
2594 hci_dev_unlock(hdev);
2598 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2599 void *data, u16 len)
2601 struct mgmt_cp_remove_remote_oob_data *cp = data;
2605 BT_DBG("%s", hdev->name);
2609 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2611 status = MGMT_STATUS_INVALID_PARAMS;
2613 status = MGMT_STATUS_SUCCESS;
2615 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2616 status, &cp->addr, sizeof(cp->addr));
2618 hci_dev_unlock(hdev);
2622 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
2624 struct pending_cmd *cmd;
2628 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2630 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2634 type = hdev->discovery.type;
2636 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2637 &type, sizeof(type));
2638 mgmt_pending_remove(cmd);
2643 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
2645 BT_DBG("status %d", status);
2649 mgmt_start_discovery_failed(hdev, status);
2650 hci_dev_unlock(hdev);
2655 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2656 hci_dev_unlock(hdev);
2658 switch (hdev->discovery.type) {
2659 case DISCOV_TYPE_LE:
2660 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2664 case DISCOV_TYPE_INTERLEAVED:
2665 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2666 DISCOV_INTERLEAVED_TIMEOUT);
2669 case DISCOV_TYPE_BREDR:
2673 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
2677 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2678 void *data, u16 len)
2680 struct mgmt_cp_start_discovery *cp = data;
2681 struct pending_cmd *cmd;
2682 struct hci_cp_le_set_scan_param param_cp;
2683 struct hci_cp_le_set_scan_enable enable_cp;
2684 struct hci_cp_inquiry inq_cp;
2685 struct hci_request req;
2686 /* General inquiry access code (GIAC) */
2687 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2690 BT_DBG("%s", hdev->name);
2694 if (!hdev_is_powered(hdev)) {
2695 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2696 MGMT_STATUS_NOT_POWERED);
2700 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2701 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2706 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2707 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2712 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2718 hdev->discovery.type = cp->type;
2720 hci_req_init(&req, hdev);
2722 switch (hdev->discovery.type) {
2723 case DISCOV_TYPE_BREDR:
2724 if (!lmp_bredr_capable(hdev)) {
2725 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2726 MGMT_STATUS_NOT_SUPPORTED);
2727 mgmt_pending_remove(cmd);
2731 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2732 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2734 mgmt_pending_remove(cmd);
2738 hci_inquiry_cache_flush(hdev);
2740 memset(&inq_cp, 0, sizeof(inq_cp));
2741 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
2742 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
2743 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
2746 case DISCOV_TYPE_LE:
2747 case DISCOV_TYPE_INTERLEAVED:
2748 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2749 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2750 MGMT_STATUS_NOT_SUPPORTED);
2751 mgmt_pending_remove(cmd);
2755 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
2756 !lmp_bredr_capable(hdev)) {
2757 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2758 MGMT_STATUS_NOT_SUPPORTED);
2759 mgmt_pending_remove(cmd);
2763 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
2764 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2765 MGMT_STATUS_REJECTED);
2766 mgmt_pending_remove(cmd);
2770 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
2771 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2773 mgmt_pending_remove(cmd);
2777 memset(¶m_cp, 0, sizeof(param_cp));
2778 param_cp.type = LE_SCAN_ACTIVE;
2779 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
2780 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2781 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2784 memset(&enable_cp, 0, sizeof(enable_cp));
2785 enable_cp.enable = LE_SCAN_ENABLE;
2786 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2787 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2792 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2793 MGMT_STATUS_INVALID_PARAMS);
2794 mgmt_pending_remove(cmd);
2798 err = hci_req_run(&req, start_discovery_complete);
2800 mgmt_pending_remove(cmd);
2802 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2805 hci_dev_unlock(hdev);
2809 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
2811 struct pending_cmd *cmd;
2814 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
2818 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2819 &hdev->discovery.type, sizeof(hdev->discovery.type));
2820 mgmt_pending_remove(cmd);
2825 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
2827 BT_DBG("status %d", status);
2832 mgmt_stop_discovery_failed(hdev, status);
2836 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2839 hci_dev_unlock(hdev);
2842 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2845 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2846 struct pending_cmd *cmd;
2847 struct hci_cp_remote_name_req_cancel cp;
2848 struct inquiry_entry *e;
2849 struct hci_request req;
2850 struct hci_cp_le_set_scan_enable enable_cp;
2853 BT_DBG("%s", hdev->name);
2857 if (!hci_discovery_active(hdev)) {
2858 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2859 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2860 sizeof(mgmt_cp->type));
2864 if (hdev->discovery.type != mgmt_cp->type) {
2865 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2866 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2867 sizeof(mgmt_cp->type));
2871 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2877 hci_req_init(&req, hdev);
2879 switch (hdev->discovery.state) {
2880 case DISCOVERY_FINDING:
2881 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2882 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2884 cancel_delayed_work(&hdev->le_scan_disable);
2886 memset(&enable_cp, 0, sizeof(enable_cp));
2887 enable_cp.enable = LE_SCAN_DISABLE;
2888 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
2889 sizeof(enable_cp), &enable_cp);
2894 case DISCOVERY_RESOLVING:
2895 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2898 mgmt_pending_remove(cmd);
2899 err = cmd_complete(sk, hdev->id,
2900 MGMT_OP_STOP_DISCOVERY, 0,
2902 sizeof(mgmt_cp->type));
2903 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2907 bacpy(&cp.bdaddr, &e->data.bdaddr);
2908 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2914 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2916 mgmt_pending_remove(cmd);
2917 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2918 MGMT_STATUS_FAILED, &mgmt_cp->type,
2919 sizeof(mgmt_cp->type));
2923 err = hci_req_run(&req, stop_discovery_complete);
2925 mgmt_pending_remove(cmd);
2927 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2930 hci_dev_unlock(hdev);
2934 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2937 struct mgmt_cp_confirm_name *cp = data;
2938 struct inquiry_entry *e;
2941 BT_DBG("%s", hdev->name);
2945 if (!hci_discovery_active(hdev)) {
2946 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2947 MGMT_STATUS_FAILED);
2951 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2953 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2954 MGMT_STATUS_INVALID_PARAMS);
2958 if (cp->name_known) {
2959 e->name_state = NAME_KNOWN;
2962 e->name_state = NAME_NEEDED;
2963 hci_inquiry_cache_update_resolve(hdev, e);
2966 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2970 hci_dev_unlock(hdev);
2974 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2977 struct mgmt_cp_block_device *cp = data;
2981 BT_DBG("%s", hdev->name);
2983 if (!bdaddr_type_is_valid(cp->addr.type))
2984 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2985 MGMT_STATUS_INVALID_PARAMS,
2986 &cp->addr, sizeof(cp->addr));
2990 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2992 status = MGMT_STATUS_FAILED;
2994 status = MGMT_STATUS_SUCCESS;
2996 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2997 &cp->addr, sizeof(cp->addr));
2999 hci_dev_unlock(hdev);
3004 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3007 struct mgmt_cp_unblock_device *cp = data;
3011 BT_DBG("%s", hdev->name);
3013 if (!bdaddr_type_is_valid(cp->addr.type))
3014 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3015 MGMT_STATUS_INVALID_PARAMS,
3016 &cp->addr, sizeof(cp->addr));
3020 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3022 status = MGMT_STATUS_INVALID_PARAMS;
3024 status = MGMT_STATUS_SUCCESS;
3026 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3027 &cp->addr, sizeof(cp->addr));
3029 hci_dev_unlock(hdev);
3034 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3037 struct mgmt_cp_set_device_id *cp = data;
3038 struct hci_request req;
3042 BT_DBG("%s", hdev->name);
3044 source = __le16_to_cpu(cp->source);
3046 if (source > 0x0002)
3047 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3048 MGMT_STATUS_INVALID_PARAMS);
3052 hdev->devid_source = source;
3053 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3054 hdev->devid_product = __le16_to_cpu(cp->product);
3055 hdev->devid_version = __le16_to_cpu(cp->version);
3057 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3059 hci_req_init(&req, hdev);
3061 hci_req_run(&req, NULL);
3063 hci_dev_unlock(hdev);
3068 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3070 struct pending_cmd *cmd;
3072 BT_DBG("status 0x%02x", status);
3076 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3081 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3082 mgmt_status(status));
3084 struct mgmt_mode *cp = cmd->param;
3087 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3089 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3091 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3092 new_settings(hdev, cmd->sk);
3095 mgmt_pending_remove(cmd);
3098 hci_dev_unlock(hdev);
3101 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3102 void *data, u16 len)
3104 struct mgmt_mode *cp = data;
3105 struct pending_cmd *cmd;
3106 struct hci_request req;
3109 BT_DBG("%s", hdev->name);
3111 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
3112 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3113 MGMT_STATUS_NOT_SUPPORTED);
3115 if (cp->val != 0x00 && cp->val != 0x01)
3116 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3117 MGMT_STATUS_INVALID_PARAMS);
3119 if (!hdev_is_powered(hdev))
3120 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3121 MGMT_STATUS_NOT_POWERED);
3123 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3124 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3125 MGMT_STATUS_REJECTED);
3129 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3130 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3135 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3136 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3141 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3148 hci_req_init(&req, hdev);
3150 write_fast_connectable(&req, cp->val);
3152 err = hci_req_run(&req, fast_connectable_complete);
3154 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3155 MGMT_STATUS_FAILED);
3156 mgmt_pending_remove(cmd);
3160 hci_dev_unlock(hdev);
3165 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3167 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3169 if (key->master != 0x00 && key->master != 0x01)
3171 if (!bdaddr_type_is_le(key->addr.type))
3176 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3177 void *cp_data, u16 len)
3179 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3180 u16 key_count, expected_len;
3183 key_count = __le16_to_cpu(cp->key_count);
3185 expected_len = sizeof(*cp) + key_count *
3186 sizeof(struct mgmt_ltk_info);
3187 if (expected_len != len) {
3188 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3190 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3191 MGMT_STATUS_INVALID_PARAMS);
3194 BT_DBG("%s key_count %u", hdev->name, key_count);
3196 for (i = 0; i < key_count; i++) {
3197 struct mgmt_ltk_info *key = &cp->keys[i];
3199 if (!ltk_is_valid(key))
3200 return cmd_status(sk, hdev->id,
3201 MGMT_OP_LOAD_LONG_TERM_KEYS,
3202 MGMT_STATUS_INVALID_PARAMS);
3207 hci_smp_ltks_clear(hdev);
3209 for (i = 0; i < key_count; i++) {
3210 struct mgmt_ltk_info *key = &cp->keys[i];
3216 type = HCI_SMP_LTK_SLAVE;
3218 hci_add_ltk(hdev, &key->addr.bdaddr,
3219 bdaddr_to_le(key->addr.type),
3220 type, 0, key->authenticated, key->val,
3221 key->enc_size, key->ediv, key->rand);
3224 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3227 hci_dev_unlock(hdev);
3232 static const struct mgmt_handler {
3233 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3237 } mgmt_handlers[] = {
3238 { NULL }, /* 0x0000 (no command) */
3239 { read_version, false, MGMT_READ_VERSION_SIZE },
3240 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3241 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3242 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3243 { set_powered, false, MGMT_SETTING_SIZE },
3244 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3245 { set_connectable, false, MGMT_SETTING_SIZE },
3246 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3247 { set_pairable, false, MGMT_SETTING_SIZE },
3248 { set_link_security, false, MGMT_SETTING_SIZE },
3249 { set_ssp, false, MGMT_SETTING_SIZE },
3250 { set_hs, false, MGMT_SETTING_SIZE },
3251 { set_le, false, MGMT_SETTING_SIZE },
3252 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3253 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3254 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3255 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3256 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3257 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3258 { disconnect, false, MGMT_DISCONNECT_SIZE },
3259 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3260 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3261 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3262 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3263 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3264 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3265 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3266 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3267 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3268 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3269 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3270 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3271 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3272 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3273 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3274 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3275 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3276 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3277 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3278 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3282 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3286 struct mgmt_hdr *hdr;
3287 u16 opcode, index, len;
3288 struct hci_dev *hdev = NULL;
3289 const struct mgmt_handler *handler;
3292 BT_DBG("got %zu bytes", msglen);
3294 if (msglen < sizeof(*hdr))
3297 buf = kmalloc(msglen, GFP_KERNEL);
3301 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3307 opcode = __le16_to_cpu(hdr->opcode);
3308 index = __le16_to_cpu(hdr->index);
3309 len = __le16_to_cpu(hdr->len);
3311 if (len != msglen - sizeof(*hdr)) {
3316 if (index != MGMT_INDEX_NONE) {
3317 hdev = hci_dev_get(index);
3319 err = cmd_status(sk, index, opcode,
3320 MGMT_STATUS_INVALID_INDEX);
3325 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3326 mgmt_handlers[opcode].func == NULL) {
3327 BT_DBG("Unknown op %u", opcode);
3328 err = cmd_status(sk, index, opcode,
3329 MGMT_STATUS_UNKNOWN_COMMAND);
3333 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3334 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3335 err = cmd_status(sk, index, opcode,
3336 MGMT_STATUS_INVALID_INDEX);
3340 handler = &mgmt_handlers[opcode];
3342 if ((handler->var_len && len < handler->data_len) ||
3343 (!handler->var_len && len != handler->data_len)) {
3344 err = cmd_status(sk, index, opcode,
3345 MGMT_STATUS_INVALID_PARAMS);
3350 mgmt_init_hdev(sk, hdev);
3352 cp = buf + sizeof(*hdr);
3354 err = handler->func(sk, hdev, cp, len);
3368 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3372 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3373 mgmt_pending_remove(cmd);
3376 int mgmt_index_added(struct hci_dev *hdev)
3378 if (!mgmt_valid_hdev(hdev))
3381 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3384 int mgmt_index_removed(struct hci_dev *hdev)
3386 u8 status = MGMT_STATUS_INVALID_INDEX;
3388 if (!mgmt_valid_hdev(hdev))
3391 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3393 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3398 struct hci_dev *hdev;
3402 static void settings_rsp(struct pending_cmd *cmd, void *data)
3404 struct cmd_lookup *match = data;
3406 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3408 list_del(&cmd->list);
3410 if (match->sk == NULL) {
3411 match->sk = cmd->sk;
3412 sock_hold(match->sk);
3415 mgmt_pending_free(cmd);
3418 static void set_bredr_scan(struct hci_request *req)
3420 struct hci_dev *hdev = req->hdev;
3423 /* Ensure that fast connectable is disabled. This function will
3424 * not do anything if the page scan parameters are already what
3427 write_fast_connectable(req, false);
3429 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3431 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3432 scan |= SCAN_INQUIRY;
3435 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3438 static void powered_complete(struct hci_dev *hdev, u8 status)
3440 struct cmd_lookup match = { NULL, hdev };
3442 BT_DBG("status 0x%02x", status);
3446 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3448 new_settings(hdev, match.sk);
3450 hci_dev_unlock(hdev);
3456 static int powered_update_hci(struct hci_dev *hdev)
3458 struct hci_request req;
3461 hci_req_init(&req, hdev);
3463 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3464 !lmp_host_ssp_capable(hdev)) {
3467 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3470 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
3471 lmp_bredr_capable(hdev)) {
3472 struct hci_cp_write_le_host_supported cp;
3475 cp.simul = lmp_le_br_capable(hdev);
3477 /* Check first if we already have the right
3478 * host state (host features set)
3480 if (cp.le != lmp_host_le_capable(hdev) ||
3481 cp.simul != lmp_host_le_br_capable(hdev))
3482 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3486 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3487 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3488 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3489 sizeof(link_sec), &link_sec);
3491 if (lmp_bredr_capable(hdev)) {
3492 set_bredr_scan(&req);
3498 return hci_req_run(&req, powered_complete);
3501 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3503 struct cmd_lookup match = { NULL, hdev };
3504 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3505 u8 zero_cod[] = { 0, 0, 0 };
3508 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3512 if (powered_update_hci(hdev) == 0)
3515 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3520 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3521 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3523 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3524 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3525 zero_cod, sizeof(zero_cod), NULL);
3528 err = new_settings(hdev, match.sk);
3536 int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
3538 struct pending_cmd *cmd;
3541 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
3545 if (err == -ERFKILL)
3546 status = MGMT_STATUS_RFKILLED;
3548 status = MGMT_STATUS_FAILED;
3550 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
3552 mgmt_pending_remove(cmd);
3557 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3559 struct cmd_lookup match = { NULL, hdev };
3560 bool changed = false;
3564 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3567 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3571 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3575 err = new_settings(hdev, match.sk);
3583 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3585 struct pending_cmd *cmd;
3586 bool changed = false;
3590 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3593 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3597 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3600 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3605 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3607 u8 mgmt_err = mgmt_status(status);
3609 if (scan & SCAN_PAGE)
3610 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3611 cmd_status_rsp, &mgmt_err);
3613 if (scan & SCAN_INQUIRY)
3614 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3615 cmd_status_rsp, &mgmt_err);
3620 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3623 struct mgmt_ev_new_link_key ev;
3625 memset(&ev, 0, sizeof(ev));
3627 ev.store_hint = persistent;
3628 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3629 ev.key.addr.type = BDADDR_BREDR;
3630 ev.key.type = key->type;
3631 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3632 ev.key.pin_len = key->pin_len;
3634 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3637 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3639 struct mgmt_ev_new_long_term_key ev;
3641 memset(&ev, 0, sizeof(ev));
3643 ev.store_hint = persistent;
3644 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3645 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3646 ev.key.authenticated = key->authenticated;
3647 ev.key.enc_size = key->enc_size;
3648 ev.key.ediv = key->ediv;
3650 if (key->type == HCI_SMP_LTK)
3653 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3654 memcpy(ev.key.val, key->val, sizeof(key->val));
3656 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3660 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3661 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3665 struct mgmt_ev_device_connected *ev = (void *) buf;
3668 bacpy(&ev->addr.bdaddr, bdaddr);
3669 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3671 ev->flags = __cpu_to_le32(flags);
3674 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3677 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3678 eir_len = eir_append_data(ev->eir, eir_len,
3679 EIR_CLASS_OF_DEV, dev_class, 3);
3681 ev->eir_len = cpu_to_le16(eir_len);
3683 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3684 sizeof(*ev) + eir_len, NULL);
3687 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3689 struct mgmt_cp_disconnect *cp = cmd->param;
3690 struct sock **sk = data;
3691 struct mgmt_rp_disconnect rp;
3693 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3694 rp.addr.type = cp->addr.type;
3696 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3702 mgmt_pending_remove(cmd);
3705 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3707 struct hci_dev *hdev = data;
3708 struct mgmt_cp_unpair_device *cp = cmd->param;
3709 struct mgmt_rp_unpair_device rp;
3711 memset(&rp, 0, sizeof(rp));
3712 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3713 rp.addr.type = cp->addr.type;
3715 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3717 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3719 mgmt_pending_remove(cmd);
3722 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3723 u8 link_type, u8 addr_type, u8 reason)
3725 struct mgmt_ev_device_disconnected ev;
3726 struct sock *sk = NULL;
3729 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3731 bacpy(&ev.addr.bdaddr, bdaddr);
3732 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3735 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3741 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3747 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3748 u8 link_type, u8 addr_type, u8 status)
3750 struct mgmt_rp_disconnect rp;
3751 struct pending_cmd *cmd;
3754 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3757 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3761 bacpy(&rp.addr.bdaddr, bdaddr);
3762 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3764 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3765 mgmt_status(status), &rp, sizeof(rp));
3767 mgmt_pending_remove(cmd);
3772 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3773 u8 addr_type, u8 status)
3775 struct mgmt_ev_connect_failed ev;
3777 bacpy(&ev.addr.bdaddr, bdaddr);
3778 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3779 ev.status = mgmt_status(status);
3781 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3784 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3786 struct mgmt_ev_pin_code_request ev;
3788 bacpy(&ev.addr.bdaddr, bdaddr);
3789 ev.addr.type = BDADDR_BREDR;
3792 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3796 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3799 struct pending_cmd *cmd;
3800 struct mgmt_rp_pin_code_reply rp;
3803 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3807 bacpy(&rp.addr.bdaddr, bdaddr);
3808 rp.addr.type = BDADDR_BREDR;
3810 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3811 mgmt_status(status), &rp, sizeof(rp));
3813 mgmt_pending_remove(cmd);
3818 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3821 struct pending_cmd *cmd;
3822 struct mgmt_rp_pin_code_reply rp;
3825 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3829 bacpy(&rp.addr.bdaddr, bdaddr);
3830 rp.addr.type = BDADDR_BREDR;
3832 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3833 mgmt_status(status), &rp, sizeof(rp));
3835 mgmt_pending_remove(cmd);
3840 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3841 u8 link_type, u8 addr_type, __le32 value,
3844 struct mgmt_ev_user_confirm_request ev;
3846 BT_DBG("%s", hdev->name);
3848 bacpy(&ev.addr.bdaddr, bdaddr);
3849 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3850 ev.confirm_hint = confirm_hint;
3853 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3857 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3858 u8 link_type, u8 addr_type)
3860 struct mgmt_ev_user_passkey_request ev;
3862 BT_DBG("%s", hdev->name);
3864 bacpy(&ev.addr.bdaddr, bdaddr);
3865 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3867 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3871 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3872 u8 link_type, u8 addr_type, u8 status,
3875 struct pending_cmd *cmd;
3876 struct mgmt_rp_user_confirm_reply rp;
3879 cmd = mgmt_pending_find(opcode, hdev);
3883 bacpy(&rp.addr.bdaddr, bdaddr);
3884 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3885 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3888 mgmt_pending_remove(cmd);
3893 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3894 u8 link_type, u8 addr_type, u8 status)
3896 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3897 status, MGMT_OP_USER_CONFIRM_REPLY);
3900 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3901 u8 link_type, u8 addr_type, u8 status)
3903 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3905 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3908 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3909 u8 link_type, u8 addr_type, u8 status)
3911 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3912 status, MGMT_OP_USER_PASSKEY_REPLY);
3915 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3916 u8 link_type, u8 addr_type, u8 status)
3918 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3920 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3923 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3924 u8 link_type, u8 addr_type, u32 passkey,
3927 struct mgmt_ev_passkey_notify ev;
3929 BT_DBG("%s", hdev->name);
3931 bacpy(&ev.addr.bdaddr, bdaddr);
3932 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3933 ev.passkey = __cpu_to_le32(passkey);
3934 ev.entered = entered;
3936 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3939 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3940 u8 addr_type, u8 status)
3942 struct mgmt_ev_auth_failed ev;
3944 bacpy(&ev.addr.bdaddr, bdaddr);
3945 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3946 ev.status = mgmt_status(status);
3948 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3951 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3953 struct cmd_lookup match = { NULL, hdev };
3954 bool changed = false;
3958 u8 mgmt_err = mgmt_status(status);
3959 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3960 cmd_status_rsp, &mgmt_err);
3964 if (test_bit(HCI_AUTH, &hdev->flags)) {
3965 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3968 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3972 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3976 err = new_settings(hdev, match.sk);
3984 static void clear_eir(struct hci_request *req)
3986 struct hci_dev *hdev = req->hdev;
3987 struct hci_cp_write_eir cp;
3989 if (!lmp_ext_inq_capable(hdev))
3992 memset(hdev->eir, 0, sizeof(hdev->eir));
3994 memset(&cp, 0, sizeof(cp));
3996 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3999 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4001 struct cmd_lookup match = { NULL, hdev };
4002 struct hci_request req;
4003 bool changed = false;
4007 u8 mgmt_err = mgmt_status(status);
4009 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4011 err = new_settings(hdev, NULL);
4013 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4020 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4023 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4027 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4030 err = new_settings(hdev, match.sk);
4035 hci_req_init(&req, hdev);
4037 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4042 hci_req_run(&req, NULL);
4047 static void sk_lookup(struct pending_cmd *cmd, void *data)
4049 struct cmd_lookup *match = data;
4051 if (match->sk == NULL) {
4052 match->sk = cmd->sk;
4053 sock_hold(match->sk);
4057 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4060 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4063 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4064 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4065 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4068 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
4077 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4079 struct mgmt_cp_set_local_name ev;
4080 struct pending_cmd *cmd;
4085 memset(&ev, 0, sizeof(ev));
4086 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4087 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4089 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4091 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4093 /* If this is a HCI command related to powering on the
4094 * HCI dev don't send any mgmt signals.
4096 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4100 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4101 cmd ? cmd->sk : NULL);
4104 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4105 u8 *randomizer, u8 status)
4107 struct pending_cmd *cmd;
4110 BT_DBG("%s status %u", hdev->name, status);
4112 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4117 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4118 mgmt_status(status));
4120 struct mgmt_rp_read_local_oob_data rp;
4122 memcpy(rp.hash, hash, sizeof(rp.hash));
4123 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4125 err = cmd_complete(cmd->sk, hdev->id,
4126 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
4130 mgmt_pending_remove(cmd);
4135 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4137 struct cmd_lookup match = { NULL, hdev };
4138 bool changed = false;
4142 u8 mgmt_err = mgmt_status(status);
4144 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
4146 err = new_settings(hdev, NULL);
4148 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
4155 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4158 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4162 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
4165 err = new_settings(hdev, match.sk);
4173 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4174 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4175 ssp, u8 *eir, u16 eir_len)
4178 struct mgmt_ev_device_found *ev = (void *) buf;
4181 if (!hci_discovery_active(hdev))
4184 /* Leave 5 bytes for a potential CoD field */
4185 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4188 memset(buf, 0, sizeof(buf));
4190 bacpy(&ev->addr.bdaddr, bdaddr);
4191 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4194 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4196 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4199 memcpy(ev->eir, eir, eir_len);
4201 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4202 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4205 ev->eir_len = cpu_to_le16(eir_len);
4206 ev_size = sizeof(*ev) + eir_len;
4208 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4211 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4212 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4214 struct mgmt_ev_device_found *ev;
4215 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4218 ev = (struct mgmt_ev_device_found *) buf;
4220 memset(buf, 0, sizeof(buf));
4222 bacpy(&ev->addr.bdaddr, bdaddr);
4223 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4226 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4229 ev->eir_len = cpu_to_le16(eir_len);
4231 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4232 sizeof(*ev) + eir_len, NULL);
4235 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4237 struct mgmt_ev_discovering ev;
4238 struct pending_cmd *cmd;
4240 BT_DBG("%s discovering %u", hdev->name, discovering);
4243 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4245 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4248 u8 type = hdev->discovery.type;
4250 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4252 mgmt_pending_remove(cmd);
4255 memset(&ev, 0, sizeof(ev));
4256 ev.type = hdev->discovery.type;
4257 ev.discovering = discovering;
4259 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4262 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4264 struct pending_cmd *cmd;
4265 struct mgmt_ev_device_blocked ev;
4267 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4269 bacpy(&ev.addr.bdaddr, bdaddr);
4270 ev.addr.type = type;
4272 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4273 cmd ? cmd->sk : NULL);
4276 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4278 struct pending_cmd *cmd;
4279 struct mgmt_ev_device_unblocked ev;
4281 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4283 bacpy(&ev.addr.bdaddr, bdaddr);
4284 ev.addr.type = type;
4286 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4287 cmd ? cmd->sk : NULL);
4290 module_param(enable_hs, bool, 0644);
4291 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");