2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
105 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
107 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
108 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
111 struct list_head list;
119 /* HCI to MGMT error code conversion table */
120 static u8 mgmt_status_table[] = {
122 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
123 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
124 MGMT_STATUS_FAILED, /* Hardware Failure */
125 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
126 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
127 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
128 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
129 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
130 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
131 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
132 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
133 MGMT_STATUS_BUSY, /* Command Disallowed */
134 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
135 MGMT_STATUS_REJECTED, /* Rejected Security */
136 MGMT_STATUS_REJECTED, /* Rejected Personal */
137 MGMT_STATUS_TIMEOUT, /* Host Timeout */
138 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
139 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
140 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
141 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
142 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
143 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
144 MGMT_STATUS_BUSY, /* Repeated Attempts */
145 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
146 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
147 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
148 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
149 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
150 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
151 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
152 MGMT_STATUS_FAILED, /* Unspecified Error */
153 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
154 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
155 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
156 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
157 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
158 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
159 MGMT_STATUS_FAILED, /* Unit Link Key Used */
160 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
161 MGMT_STATUS_TIMEOUT, /* Instant Passed */
162 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
163 MGMT_STATUS_FAILED, /* Transaction Collision */
164 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
165 MGMT_STATUS_REJECTED, /* QoS Rejected */
166 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
167 MGMT_STATUS_REJECTED, /* Insufficient Security */
168 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
169 MGMT_STATUS_BUSY, /* Role Switch Pending */
170 MGMT_STATUS_FAILED, /* Slot Violation */
171 MGMT_STATUS_FAILED, /* Role Switch Failed */
172 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
173 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
174 MGMT_STATUS_BUSY, /* Host Busy Pairing */
175 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
176 MGMT_STATUS_BUSY, /* Controller Busy */
177 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
178 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
179 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
180 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
181 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
184 bool mgmt_valid_hdev(struct hci_dev *hdev)
186 return hdev->dev_type == HCI_BREDR;
189 static u8 mgmt_status(u8 hci_status)
191 if (hci_status < ARRAY_SIZE(mgmt_status_table))
192 return mgmt_status_table[hci_status];
194 return MGMT_STATUS_FAILED;
197 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
200 struct mgmt_hdr *hdr;
201 struct mgmt_ev_cmd_status *ev;
204 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
206 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
210 hdr = (void *) skb_put(skb, sizeof(*hdr));
212 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
213 hdr->index = cpu_to_le16(index);
214 hdr->len = cpu_to_le16(sizeof(*ev));
216 ev = (void *) skb_put(skb, sizeof(*ev));
218 ev->opcode = cpu_to_le16(cmd);
220 err = sock_queue_rcv_skb(sk, skb);
227 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
228 void *rp, size_t rp_len)
231 struct mgmt_hdr *hdr;
232 struct mgmt_ev_cmd_complete *ev;
235 BT_DBG("sock %p", sk);
237 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
241 hdr = (void *) skb_put(skb, sizeof(*hdr));
243 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
244 hdr->index = cpu_to_le16(index);
245 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
247 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
248 ev->opcode = cpu_to_le16(cmd);
252 memcpy(ev->data, rp, rp_len);
254 err = sock_queue_rcv_skb(sk, skb);
261 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
264 struct mgmt_rp_read_version rp;
266 BT_DBG("sock %p", sk);
268 rp.version = MGMT_VERSION;
269 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
271 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
275 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
278 struct mgmt_rp_read_commands *rp;
279 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
280 const u16 num_events = ARRAY_SIZE(mgmt_events);
285 BT_DBG("sock %p", sk);
287 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
289 rp = kmalloc(rp_size, GFP_KERNEL);
293 rp->num_commands = __constant_cpu_to_le16(num_commands);
294 rp->num_events = __constant_cpu_to_le16(num_events);
296 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
297 put_unaligned_le16(mgmt_commands[i], opcode);
299 for (i = 0; i < num_events; i++, opcode++)
300 put_unaligned_le16(mgmt_events[i], opcode);
302 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
309 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
312 struct mgmt_rp_read_index_list *rp;
318 BT_DBG("sock %p", sk);
320 read_lock(&hci_dev_list_lock);
323 list_for_each_entry(d, &hci_dev_list, list) {
324 if (!mgmt_valid_hdev(d))
330 rp_len = sizeof(*rp) + (2 * count);
331 rp = kmalloc(rp_len, GFP_ATOMIC);
333 read_unlock(&hci_dev_list_lock);
338 list_for_each_entry(d, &hci_dev_list, list) {
339 if (test_bit(HCI_SETUP, &d->dev_flags))
342 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
345 if (!mgmt_valid_hdev(d))
348 rp->index[count++] = cpu_to_le16(d->id);
349 BT_DBG("Added hci%u", d->id);
352 rp->num_controllers = cpu_to_le16(count);
353 rp_len = sizeof(*rp) + (2 * count);
355 read_unlock(&hci_dev_list_lock);
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
365 static u32 get_supported_settings(struct hci_dev *hdev)
369 settings |= MGMT_SETTING_POWERED;
370 settings |= MGMT_SETTING_PAIRABLE;
372 if (lmp_ssp_capable(hdev))
373 settings |= MGMT_SETTING_SSP;
375 if (lmp_bredr_capable(hdev)) {
376 settings |= MGMT_SETTING_CONNECTABLE;
377 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
378 settings |= MGMT_SETTING_FAST_CONNECTABLE;
379 settings |= MGMT_SETTING_DISCOVERABLE;
380 settings |= MGMT_SETTING_BREDR;
381 settings |= MGMT_SETTING_LINK_SECURITY;
385 settings |= MGMT_SETTING_HS;
387 if (lmp_le_capable(hdev)) {
388 settings |= MGMT_SETTING_LE;
389 settings |= MGMT_SETTING_ADVERTISING;
395 static u32 get_current_settings(struct hci_dev *hdev)
399 if (hdev_is_powered(hdev))
400 settings |= MGMT_SETTING_POWERED;
402 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
403 settings |= MGMT_SETTING_CONNECTABLE;
405 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
406 settings |= MGMT_SETTING_FAST_CONNECTABLE;
408 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
409 settings |= MGMT_SETTING_DISCOVERABLE;
411 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
412 settings |= MGMT_SETTING_PAIRABLE;
414 if (lmp_bredr_capable(hdev))
415 settings |= MGMT_SETTING_BREDR;
417 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
418 settings |= MGMT_SETTING_LE;
420 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
421 settings |= MGMT_SETTING_LINK_SECURITY;
423 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
424 settings |= MGMT_SETTING_SSP;
426 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
427 settings |= MGMT_SETTING_HS;
429 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
430 settings |= MGMT_SETTING_ADVERTISING;
435 #define PNP_INFO_SVCLASS_ID 0x1200
437 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
439 u8 *ptr = data, *uuids_start = NULL;
440 struct bt_uuid *uuid;
445 list_for_each_entry(uuid, &hdev->uuids, list) {
448 if (uuid->size != 16)
451 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
455 if (uuid16 == PNP_INFO_SVCLASS_ID)
461 uuids_start[1] = EIR_UUID16_ALL;
465 /* Stop if not enough space to put next UUID */
466 if ((ptr - data) + sizeof(u16) > len) {
467 uuids_start[1] = EIR_UUID16_SOME;
471 *ptr++ = (uuid16 & 0x00ff);
472 *ptr++ = (uuid16 & 0xff00) >> 8;
473 uuids_start[0] += sizeof(uuid16);
479 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
481 u8 *ptr = data, *uuids_start = NULL;
482 struct bt_uuid *uuid;
487 list_for_each_entry(uuid, &hdev->uuids, list) {
488 if (uuid->size != 32)
494 uuids_start[1] = EIR_UUID32_ALL;
498 /* Stop if not enough space to put next UUID */
499 if ((ptr - data) + sizeof(u32) > len) {
500 uuids_start[1] = EIR_UUID32_SOME;
504 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
506 uuids_start[0] += sizeof(u32);
512 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
514 u8 *ptr = data, *uuids_start = NULL;
515 struct bt_uuid *uuid;
520 list_for_each_entry(uuid, &hdev->uuids, list) {
521 if (uuid->size != 128)
527 uuids_start[1] = EIR_UUID128_ALL;
531 /* Stop if not enough space to put next UUID */
532 if ((ptr - data) + 16 > len) {
533 uuids_start[1] = EIR_UUID128_SOME;
537 memcpy(ptr, uuid->uuid, 16);
539 uuids_start[0] += 16;
545 static void create_eir(struct hci_dev *hdev, u8 *data)
550 name_len = strlen(hdev->dev_name);
556 ptr[1] = EIR_NAME_SHORT;
558 ptr[1] = EIR_NAME_COMPLETE;
560 /* EIR Data length */
561 ptr[0] = name_len + 1;
563 memcpy(ptr + 2, hdev->dev_name, name_len);
565 ptr += (name_len + 2);
568 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
570 ptr[1] = EIR_TX_POWER;
571 ptr[2] = (u8) hdev->inq_tx_power;
576 if (hdev->devid_source > 0) {
578 ptr[1] = EIR_DEVICE_ID;
580 put_unaligned_le16(hdev->devid_source, ptr + 2);
581 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
582 put_unaligned_le16(hdev->devid_product, ptr + 6);
583 put_unaligned_le16(hdev->devid_version, ptr + 8);
588 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
589 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
590 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
593 static void update_eir(struct hci_request *req)
595 struct hci_dev *hdev = req->hdev;
596 struct hci_cp_write_eir cp;
598 if (!hdev_is_powered(hdev))
601 if (!lmp_ext_inq_capable(hdev))
604 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
607 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
610 memset(&cp, 0, sizeof(cp));
612 create_eir(hdev, cp.data);
614 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
617 memcpy(hdev->eir, cp.data, sizeof(cp.data));
619 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
622 static u8 get_service_classes(struct hci_dev *hdev)
624 struct bt_uuid *uuid;
627 list_for_each_entry(uuid, &hdev->uuids, list)
628 val |= uuid->svc_hint;
633 static void update_class(struct hci_request *req)
635 struct hci_dev *hdev = req->hdev;
638 BT_DBG("%s", hdev->name);
640 if (!hdev_is_powered(hdev))
643 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
646 cod[0] = hdev->minor_class;
647 cod[1] = hdev->major_class;
648 cod[2] = get_service_classes(hdev);
650 if (memcmp(cod, hdev->dev_class, 3) == 0)
653 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
656 static void service_cache_off(struct work_struct *work)
658 struct hci_dev *hdev = container_of(work, struct hci_dev,
660 struct hci_request req;
662 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
665 hci_req_init(&req, hdev);
672 hci_dev_unlock(hdev);
674 hci_req_run(&req, NULL);
677 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
679 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
682 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
684 /* Non-mgmt controlled devices get this bit set
685 * implicitly so that pairing works for them, however
686 * for mgmt we require user-space to explicitly enable
689 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
692 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
693 void *data, u16 data_len)
695 struct mgmt_rp_read_info rp;
697 BT_DBG("sock %p %s", sk, hdev->name);
701 memset(&rp, 0, sizeof(rp));
703 bacpy(&rp.bdaddr, &hdev->bdaddr);
705 rp.version = hdev->hci_ver;
706 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
708 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
709 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
711 memcpy(rp.dev_class, hdev->dev_class, 3);
713 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
714 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
716 hci_dev_unlock(hdev);
718 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
722 static void mgmt_pending_free(struct pending_cmd *cmd)
729 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
730 struct hci_dev *hdev, void *data,
733 struct pending_cmd *cmd;
735 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
739 cmd->opcode = opcode;
740 cmd->index = hdev->id;
742 cmd->param = kmalloc(len, GFP_KERNEL);
749 memcpy(cmd->param, data, len);
754 list_add(&cmd->list, &hdev->mgmt_pending);
759 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
760 void (*cb)(struct pending_cmd *cmd,
764 struct pending_cmd *cmd, *tmp;
766 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
767 if (opcode > 0 && cmd->opcode != opcode)
774 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
776 struct pending_cmd *cmd;
778 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
779 if (cmd->opcode == opcode)
786 static void mgmt_pending_remove(struct pending_cmd *cmd)
788 list_del(&cmd->list);
789 mgmt_pending_free(cmd);
792 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
794 __le32 settings = cpu_to_le32(get_current_settings(hdev));
796 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
800 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
803 struct mgmt_mode *cp = data;
804 struct pending_cmd *cmd;
807 BT_DBG("request for %s", hdev->name);
809 if (cp->val != 0x00 && cp->val != 0x01)
810 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
811 MGMT_STATUS_INVALID_PARAMS);
815 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
816 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
821 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
822 cancel_delayed_work(&hdev->power_off);
825 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
827 err = mgmt_powered(hdev, 1);
832 if (!!cp->val == hdev_is_powered(hdev)) {
833 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
837 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
844 queue_work(hdev->req_workqueue, &hdev->power_on);
846 queue_work(hdev->req_workqueue, &hdev->power_off.work);
851 hci_dev_unlock(hdev);
855 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
856 struct sock *skip_sk)
859 struct mgmt_hdr *hdr;
861 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
865 hdr = (void *) skb_put(skb, sizeof(*hdr));
866 hdr->opcode = cpu_to_le16(event);
868 hdr->index = cpu_to_le16(hdev->id);
870 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
871 hdr->len = cpu_to_le16(data_len);
874 memcpy(skb_put(skb, data_len), data, data_len);
877 __net_timestamp(skb);
879 hci_send_to_control(skb, skip_sk);
885 static int new_settings(struct hci_dev *hdev, struct sock *skip)
889 ev = cpu_to_le32(get_current_settings(hdev));
891 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
896 struct hci_dev *hdev;
900 static void settings_rsp(struct pending_cmd *cmd, void *data)
902 struct cmd_lookup *match = data;
904 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
906 list_del(&cmd->list);
908 if (match->sk == NULL) {
910 sock_hold(match->sk);
913 mgmt_pending_free(cmd);
916 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
920 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
921 mgmt_pending_remove(cmd);
924 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
927 struct mgmt_cp_set_discoverable *cp = data;
928 struct pending_cmd *cmd;
933 BT_DBG("request for %s", hdev->name);
935 if (!lmp_bredr_capable(hdev))
936 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
937 MGMT_STATUS_NOT_SUPPORTED);
939 if (cp->val != 0x00 && cp->val != 0x01)
940 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
941 MGMT_STATUS_INVALID_PARAMS);
943 timeout = __le16_to_cpu(cp->timeout);
944 if (!cp->val && timeout > 0)
945 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
946 MGMT_STATUS_INVALID_PARAMS);
950 if (!hdev_is_powered(hdev) && timeout > 0) {
951 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
952 MGMT_STATUS_NOT_POWERED);
956 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
957 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
958 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
963 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
964 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
965 MGMT_STATUS_REJECTED);
969 if (!hdev_is_powered(hdev)) {
970 bool changed = false;
972 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
973 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
977 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
982 err = new_settings(hdev, sk);
987 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
988 if (hdev->discov_timeout > 0) {
989 cancel_delayed_work(&hdev->discov_off);
990 hdev->discov_timeout = 0;
993 if (cp->val && timeout > 0) {
994 hdev->discov_timeout = timeout;
995 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
996 msecs_to_jiffies(hdev->discov_timeout * 1000));
999 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1003 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1012 scan |= SCAN_INQUIRY;
1014 cancel_delayed_work(&hdev->discov_off);
1016 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1018 mgmt_pending_remove(cmd);
1021 hdev->discov_timeout = timeout;
1024 hci_dev_unlock(hdev);
1028 static void write_fast_connectable(struct hci_request *req, bool enable)
1030 struct hci_dev *hdev = req->hdev;
1031 struct hci_cp_write_page_scan_activity acp;
1034 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1038 type = PAGE_SCAN_TYPE_INTERLACED;
1040 /* 160 msec page scan interval */
1041 acp.interval = __constant_cpu_to_le16(0x0100);
1043 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1045 /* default 1.28 sec page scan */
1046 acp.interval = __constant_cpu_to_le16(0x0800);
1049 acp.window = __constant_cpu_to_le16(0x0012);
1051 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1052 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1053 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1056 if (hdev->page_scan_type != type)
1057 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1060 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1062 struct pending_cmd *cmd;
1064 BT_DBG("status 0x%02x", status);
1068 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1072 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1074 mgmt_pending_remove(cmd);
1077 hci_dev_unlock(hdev);
1080 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1083 struct mgmt_mode *cp = data;
1084 struct pending_cmd *cmd;
1085 struct hci_request req;
1089 BT_DBG("request for %s", hdev->name);
1091 if (!lmp_bredr_capable(hdev))
1092 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1093 MGMT_STATUS_NOT_SUPPORTED);
1095 if (cp->val != 0x00 && cp->val != 0x01)
1096 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1097 MGMT_STATUS_INVALID_PARAMS);
1101 if (!hdev_is_powered(hdev)) {
1102 bool changed = false;
1104 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1108 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1110 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1111 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1114 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1119 err = new_settings(hdev, sk);
1124 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1125 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1126 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1131 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1132 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1136 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1147 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1148 hdev->discov_timeout > 0)
1149 cancel_delayed_work(&hdev->discov_off);
1152 hci_req_init(&req, hdev);
1154 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1156 /* If we're going from non-connectable to connectable or
1157 * vice-versa when fast connectable is enabled ensure that fast
1158 * connectable gets disabled. write_fast_connectable won't do
1159 * anything if the page scan parameters are already what they
1162 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1163 write_fast_connectable(&req, false);
1165 err = hci_req_run(&req, set_connectable_complete);
1167 mgmt_pending_remove(cmd);
1170 hci_dev_unlock(hdev);
1174 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1177 struct mgmt_mode *cp = data;
1180 BT_DBG("request for %s", hdev->name);
1182 if (cp->val != 0x00 && cp->val != 0x01)
1183 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1184 MGMT_STATUS_INVALID_PARAMS);
1189 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1191 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1193 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1197 err = new_settings(hdev, sk);
1200 hci_dev_unlock(hdev);
1204 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1207 struct mgmt_mode *cp = data;
1208 struct pending_cmd *cmd;
1212 BT_DBG("request for %s", hdev->name);
1214 if (!lmp_bredr_capable(hdev))
1215 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1216 MGMT_STATUS_NOT_SUPPORTED);
1218 if (cp->val != 0x00 && cp->val != 0x01)
1219 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1220 MGMT_STATUS_INVALID_PARAMS);
1224 if (!hdev_is_powered(hdev)) {
1225 bool changed = false;
1227 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1228 &hdev->dev_flags)) {
1229 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1233 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1238 err = new_settings(hdev, sk);
1243 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1244 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1251 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1252 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1256 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1262 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1264 mgmt_pending_remove(cmd);
1269 hci_dev_unlock(hdev);
1273 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1275 struct mgmt_mode *cp = data;
1276 struct pending_cmd *cmd;
1280 BT_DBG("request for %s", hdev->name);
1282 if (!lmp_ssp_capable(hdev))
1283 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1284 MGMT_STATUS_NOT_SUPPORTED);
1286 if (cp->val != 0x00 && cp->val != 0x01)
1287 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1288 MGMT_STATUS_INVALID_PARAMS);
1294 if (!hdev_is_powered(hdev)) {
1295 bool changed = false;
1297 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1298 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1302 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1307 err = new_settings(hdev, sk);
1312 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1313 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1318 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1319 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1323 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1329 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1331 mgmt_pending_remove(cmd);
1336 hci_dev_unlock(hdev);
1340 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1342 struct mgmt_mode *cp = data;
1344 BT_DBG("request for %s", hdev->name);
1347 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1348 MGMT_STATUS_NOT_SUPPORTED);
1350 if (cp->val != 0x00 && cp->val != 0x01)
1351 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1352 MGMT_STATUS_INVALID_PARAMS);
1355 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1357 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1359 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1362 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1364 struct cmd_lookup match = { NULL, hdev };
1367 u8 mgmt_err = mgmt_status(status);
1369 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1374 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1376 new_settings(hdev, match.sk);
1382 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1384 struct mgmt_mode *cp = data;
1385 struct hci_cp_write_le_host_supported hci_cp;
1386 struct pending_cmd *cmd;
1387 struct hci_request req;
1391 BT_DBG("request for %s", hdev->name);
1393 if (!lmp_le_capable(hdev))
1394 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1395 MGMT_STATUS_NOT_SUPPORTED);
1397 if (cp->val != 0x00 && cp->val != 0x01)
1398 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1399 MGMT_STATUS_INVALID_PARAMS);
1401 /* LE-only devices do not allow toggling LE on/off */
1402 if (!lmp_bredr_capable(hdev))
1403 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1404 MGMT_STATUS_REJECTED);
1409 enabled = lmp_host_le_capable(hdev);
1411 if (!hdev_is_powered(hdev) || val == enabled) {
1412 bool changed = false;
1414 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1415 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1419 if (!val && test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
1420 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
1424 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1429 err = new_settings(hdev, sk);
1434 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1435 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1440 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1446 memset(&hci_cp, 0, sizeof(hci_cp));
1450 hci_cp.simul = lmp_le_br_capable(hdev);
1453 hci_req_init(&req, hdev);
1455 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags) && !val)
1456 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(val), &val);
1458 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1461 err = hci_req_run(&req, le_enable_complete);
1463 mgmt_pending_remove(cmd);
1466 hci_dev_unlock(hdev);
1470 /* This is a helper function to test for pending mgmt commands that can
1471 * cause CoD or EIR HCI commands. We can only allow one such pending
1472 * mgmt command at a time since otherwise we cannot easily track what
1473 * the current values are, will be, and based on that calculate if a new
1474 * HCI command needs to be sent and if yes with what value.
1476 static bool pending_eir_or_class(struct hci_dev *hdev)
1478 struct pending_cmd *cmd;
1480 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1481 switch (cmd->opcode) {
1482 case MGMT_OP_ADD_UUID:
1483 case MGMT_OP_REMOVE_UUID:
1484 case MGMT_OP_SET_DEV_CLASS:
1485 case MGMT_OP_SET_POWERED:
1493 static const u8 bluetooth_base_uuid[] = {
1494 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1495 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1498 static u8 get_uuid_size(const u8 *uuid)
1502 if (memcmp(uuid, bluetooth_base_uuid, 12))
1505 val = get_unaligned_le32(&uuid[12]);
1512 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1514 struct pending_cmd *cmd;
1518 cmd = mgmt_pending_find(mgmt_op, hdev);
1522 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1523 hdev->dev_class, 3);
1525 mgmt_pending_remove(cmd);
1528 hci_dev_unlock(hdev);
1531 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1533 BT_DBG("status 0x%02x", status);
1535 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1538 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1540 struct mgmt_cp_add_uuid *cp = data;
1541 struct pending_cmd *cmd;
1542 struct hci_request req;
1543 struct bt_uuid *uuid;
1546 BT_DBG("request for %s", hdev->name);
1550 if (pending_eir_or_class(hdev)) {
1551 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1556 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1562 memcpy(uuid->uuid, cp->uuid, 16);
1563 uuid->svc_hint = cp->svc_hint;
1564 uuid->size = get_uuid_size(cp->uuid);
1566 list_add_tail(&uuid->list, &hdev->uuids);
1568 hci_req_init(&req, hdev);
1573 err = hci_req_run(&req, add_uuid_complete);
1575 if (err != -ENODATA)
1578 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1579 hdev->dev_class, 3);
1583 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1592 hci_dev_unlock(hdev);
1596 static bool enable_service_cache(struct hci_dev *hdev)
1598 if (!hdev_is_powered(hdev))
1601 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1602 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1610 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1612 BT_DBG("status 0x%02x", status);
1614 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1617 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1620 struct mgmt_cp_remove_uuid *cp = data;
1621 struct pending_cmd *cmd;
1622 struct bt_uuid *match, *tmp;
1623 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1624 struct hci_request req;
1627 BT_DBG("request for %s", hdev->name);
1631 if (pending_eir_or_class(hdev)) {
1632 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1637 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1638 err = hci_uuids_clear(hdev);
1640 if (enable_service_cache(hdev)) {
1641 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1642 0, hdev->dev_class, 3);
1651 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1652 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1655 list_del(&match->list);
1661 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1662 MGMT_STATUS_INVALID_PARAMS);
1667 hci_req_init(&req, hdev);
1672 err = hci_req_run(&req, remove_uuid_complete);
1674 if (err != -ENODATA)
1677 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1678 hdev->dev_class, 3);
1682 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1691 hci_dev_unlock(hdev);
1695 static void set_class_complete(struct hci_dev *hdev, u8 status)
1697 BT_DBG("status 0x%02x", status);
1699 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1702 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1705 struct mgmt_cp_set_dev_class *cp = data;
1706 struct pending_cmd *cmd;
1707 struct hci_request req;
1710 BT_DBG("request for %s", hdev->name);
1712 if (!lmp_bredr_capable(hdev))
1713 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1714 MGMT_STATUS_NOT_SUPPORTED);
1718 if (pending_eir_or_class(hdev)) {
1719 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1724 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1725 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1726 MGMT_STATUS_INVALID_PARAMS);
1730 hdev->major_class = cp->major;
1731 hdev->minor_class = cp->minor;
1733 if (!hdev_is_powered(hdev)) {
1734 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1735 hdev->dev_class, 3);
1739 hci_req_init(&req, hdev);
1741 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1742 hci_dev_unlock(hdev);
1743 cancel_delayed_work_sync(&hdev->service_cache);
1750 err = hci_req_run(&req, set_class_complete);
1752 if (err != -ENODATA)
1755 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1756 hdev->dev_class, 3);
1760 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1769 hci_dev_unlock(hdev);
1773 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1776 struct mgmt_cp_load_link_keys *cp = data;
1777 u16 key_count, expected_len;
1780 key_count = __le16_to_cpu(cp->key_count);
1782 expected_len = sizeof(*cp) + key_count *
1783 sizeof(struct mgmt_link_key_info);
1784 if (expected_len != len) {
1785 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1787 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1788 MGMT_STATUS_INVALID_PARAMS);
1791 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1792 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1793 MGMT_STATUS_INVALID_PARAMS);
1795 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1798 for (i = 0; i < key_count; i++) {
1799 struct mgmt_link_key_info *key = &cp->keys[i];
1801 if (key->addr.type != BDADDR_BREDR)
1802 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1803 MGMT_STATUS_INVALID_PARAMS);
1808 hci_link_keys_clear(hdev);
1811 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1813 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1815 for (i = 0; i < key_count; i++) {
1816 struct mgmt_link_key_info *key = &cp->keys[i];
1818 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1819 key->type, key->pin_len);
1822 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1824 hci_dev_unlock(hdev);
1829 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1830 u8 addr_type, struct sock *skip_sk)
1832 struct mgmt_ev_device_unpaired ev;
1834 bacpy(&ev.addr.bdaddr, bdaddr);
1835 ev.addr.type = addr_type;
1837 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1841 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1844 struct mgmt_cp_unpair_device *cp = data;
1845 struct mgmt_rp_unpair_device rp;
1846 struct hci_cp_disconnect dc;
1847 struct pending_cmd *cmd;
1848 struct hci_conn *conn;
1851 memset(&rp, 0, sizeof(rp));
1852 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1853 rp.addr.type = cp->addr.type;
1855 if (!bdaddr_type_is_valid(cp->addr.type))
1856 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1857 MGMT_STATUS_INVALID_PARAMS,
1860 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1861 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1862 MGMT_STATUS_INVALID_PARAMS,
1867 if (!hdev_is_powered(hdev)) {
1868 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1869 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1873 if (cp->addr.type == BDADDR_BREDR)
1874 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1876 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1879 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1880 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1884 if (cp->disconnect) {
1885 if (cp->addr.type == BDADDR_BREDR)
1886 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1889 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1896 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1898 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1902 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1909 dc.handle = cpu_to_le16(conn->handle);
1910 dc.reason = 0x13; /* Remote User Terminated Connection */
1911 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1913 mgmt_pending_remove(cmd);
1916 hci_dev_unlock(hdev);
1920 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1923 struct mgmt_cp_disconnect *cp = data;
1924 struct mgmt_rp_disconnect rp;
1925 struct hci_cp_disconnect dc;
1926 struct pending_cmd *cmd;
1927 struct hci_conn *conn;
1932 memset(&rp, 0, sizeof(rp));
1933 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1934 rp.addr.type = cp->addr.type;
1936 if (!bdaddr_type_is_valid(cp->addr.type))
1937 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1938 MGMT_STATUS_INVALID_PARAMS,
1943 if (!test_bit(HCI_UP, &hdev->flags)) {
1944 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1945 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1949 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1950 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1951 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1955 if (cp->addr.type == BDADDR_BREDR)
1956 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1959 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1961 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1962 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1963 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1967 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1973 dc.handle = cpu_to_le16(conn->handle);
1974 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1976 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1978 mgmt_pending_remove(cmd);
1981 hci_dev_unlock(hdev);
1985 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1987 switch (link_type) {
1989 switch (addr_type) {
1990 case ADDR_LE_DEV_PUBLIC:
1991 return BDADDR_LE_PUBLIC;
1994 /* Fallback to LE Random address type */
1995 return BDADDR_LE_RANDOM;
1999 /* Fallback to BR/EDR type */
2000 return BDADDR_BREDR;
2004 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2007 struct mgmt_rp_get_connections *rp;
2017 if (!hdev_is_powered(hdev)) {
2018 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2019 MGMT_STATUS_NOT_POWERED);
2024 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2025 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2029 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2030 rp = kmalloc(rp_len, GFP_KERNEL);
2037 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2038 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2040 bacpy(&rp->addr[i].bdaddr, &c->dst);
2041 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2042 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2047 rp->conn_count = cpu_to_le16(i);
2049 /* Recalculate length in case of filtered SCO connections, etc */
2050 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2052 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2058 hci_dev_unlock(hdev);
2062 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2063 struct mgmt_cp_pin_code_neg_reply *cp)
2065 struct pending_cmd *cmd;
2068 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2073 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2074 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2076 mgmt_pending_remove(cmd);
2081 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2084 struct hci_conn *conn;
2085 struct mgmt_cp_pin_code_reply *cp = data;
2086 struct hci_cp_pin_code_reply reply;
2087 struct pending_cmd *cmd;
2094 if (!hdev_is_powered(hdev)) {
2095 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2096 MGMT_STATUS_NOT_POWERED);
2100 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2102 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2103 MGMT_STATUS_NOT_CONNECTED);
2107 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2108 struct mgmt_cp_pin_code_neg_reply ncp;
2110 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2112 BT_ERR("PIN code is not 16 bytes long");
2114 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2116 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2117 MGMT_STATUS_INVALID_PARAMS);
2122 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2128 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2129 reply.pin_len = cp->pin_len;
2130 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2132 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2134 mgmt_pending_remove(cmd);
2137 hci_dev_unlock(hdev);
2141 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2144 struct mgmt_cp_set_io_capability *cp = data;
2150 hdev->io_capability = cp->io_capability;
2152 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2153 hdev->io_capability);
2155 hci_dev_unlock(hdev);
2157 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2161 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2163 struct hci_dev *hdev = conn->hdev;
2164 struct pending_cmd *cmd;
2166 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2167 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2170 if (cmd->user_data != conn)
2179 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2181 struct mgmt_rp_pair_device rp;
2182 struct hci_conn *conn = cmd->user_data;
2184 bacpy(&rp.addr.bdaddr, &conn->dst);
2185 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2187 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2190 /* So we don't get further callbacks for this connection */
2191 conn->connect_cfm_cb = NULL;
2192 conn->security_cfm_cb = NULL;
2193 conn->disconn_cfm_cb = NULL;
2195 hci_conn_drop(conn);
2197 mgmt_pending_remove(cmd);
2200 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2202 struct pending_cmd *cmd;
2204 BT_DBG("status %u", status);
2206 cmd = find_pairing(conn);
2208 BT_DBG("Unable to find a pending command");
2210 pairing_complete(cmd, mgmt_status(status));
2213 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2215 struct pending_cmd *cmd;
2217 BT_DBG("status %u", status);
2222 cmd = find_pairing(conn);
2224 BT_DBG("Unable to find a pending command");
2226 pairing_complete(cmd, mgmt_status(status));
2229 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2232 struct mgmt_cp_pair_device *cp = data;
2233 struct mgmt_rp_pair_device rp;
2234 struct pending_cmd *cmd;
2235 u8 sec_level, auth_type;
2236 struct hci_conn *conn;
2241 memset(&rp, 0, sizeof(rp));
2242 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2243 rp.addr.type = cp->addr.type;
2245 if (!bdaddr_type_is_valid(cp->addr.type))
2246 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2247 MGMT_STATUS_INVALID_PARAMS,
2252 if (!hdev_is_powered(hdev)) {
2253 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2254 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2258 sec_level = BT_SECURITY_MEDIUM;
2259 if (cp->io_cap == 0x03)
2260 auth_type = HCI_AT_DEDICATED_BONDING;
2262 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2264 if (cp->addr.type == BDADDR_BREDR)
2265 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2266 cp->addr.type, sec_level, auth_type);
2268 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2269 cp->addr.type, sec_level, auth_type);
2274 if (PTR_ERR(conn) == -EBUSY)
2275 status = MGMT_STATUS_BUSY;
2277 status = MGMT_STATUS_CONNECT_FAILED;
2279 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2285 if (conn->connect_cfm_cb) {
2286 hci_conn_drop(conn);
2287 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2288 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2292 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2295 hci_conn_drop(conn);
2299 /* For LE, just connecting isn't a proof that the pairing finished */
2300 if (cp->addr.type == BDADDR_BREDR)
2301 conn->connect_cfm_cb = pairing_complete_cb;
2303 conn->connect_cfm_cb = le_connect_complete_cb;
2305 conn->security_cfm_cb = pairing_complete_cb;
2306 conn->disconn_cfm_cb = pairing_complete_cb;
2307 conn->io_capability = cp->io_cap;
2308 cmd->user_data = conn;
2310 if (conn->state == BT_CONNECTED &&
2311 hci_conn_security(conn, sec_level, auth_type))
2312 pairing_complete(cmd, 0);
2317 hci_dev_unlock(hdev);
2321 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2324 struct mgmt_addr_info *addr = data;
2325 struct pending_cmd *cmd;
2326 struct hci_conn *conn;
2333 if (!hdev_is_powered(hdev)) {
2334 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2335 MGMT_STATUS_NOT_POWERED);
2339 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2341 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2342 MGMT_STATUS_INVALID_PARAMS);
2346 conn = cmd->user_data;
2348 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2349 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2350 MGMT_STATUS_INVALID_PARAMS);
2354 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2356 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2357 addr, sizeof(*addr));
2359 hci_dev_unlock(hdev);
2363 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2364 struct mgmt_addr_info *addr, u16 mgmt_op,
2365 u16 hci_op, __le32 passkey)
2367 struct pending_cmd *cmd;
2368 struct hci_conn *conn;
2373 if (!hdev_is_powered(hdev)) {
2374 err = cmd_complete(sk, hdev->id, mgmt_op,
2375 MGMT_STATUS_NOT_POWERED, addr,
2380 if (addr->type == BDADDR_BREDR)
2381 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2383 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2386 err = cmd_complete(sk, hdev->id, mgmt_op,
2387 MGMT_STATUS_NOT_CONNECTED, addr,
2392 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2393 /* Continue with pairing via SMP */
2394 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2397 err = cmd_complete(sk, hdev->id, mgmt_op,
2398 MGMT_STATUS_SUCCESS, addr,
2401 err = cmd_complete(sk, hdev->id, mgmt_op,
2402 MGMT_STATUS_FAILED, addr,
2408 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2414 /* Continue with pairing via HCI */
2415 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2416 struct hci_cp_user_passkey_reply cp;
2418 bacpy(&cp.bdaddr, &addr->bdaddr);
2419 cp.passkey = passkey;
2420 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2422 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2426 mgmt_pending_remove(cmd);
2429 hci_dev_unlock(hdev);
2433 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2434 void *data, u16 len)
2436 struct mgmt_cp_pin_code_neg_reply *cp = data;
2440 return user_pairing_resp(sk, hdev, &cp->addr,
2441 MGMT_OP_PIN_CODE_NEG_REPLY,
2442 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2445 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2448 struct mgmt_cp_user_confirm_reply *cp = data;
2452 if (len != sizeof(*cp))
2453 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2454 MGMT_STATUS_INVALID_PARAMS);
2456 return user_pairing_resp(sk, hdev, &cp->addr,
2457 MGMT_OP_USER_CONFIRM_REPLY,
2458 HCI_OP_USER_CONFIRM_REPLY, 0);
2461 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2462 void *data, u16 len)
2464 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2468 return user_pairing_resp(sk, hdev, &cp->addr,
2469 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2470 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2473 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2476 struct mgmt_cp_user_passkey_reply *cp = data;
2480 return user_pairing_resp(sk, hdev, &cp->addr,
2481 MGMT_OP_USER_PASSKEY_REPLY,
2482 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2485 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2486 void *data, u16 len)
2488 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2492 return user_pairing_resp(sk, hdev, &cp->addr,
2493 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2494 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2497 static void update_name(struct hci_request *req)
2499 struct hci_dev *hdev = req->hdev;
2500 struct hci_cp_write_local_name cp;
2502 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2504 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2507 static void set_name_complete(struct hci_dev *hdev, u8 status)
2509 struct mgmt_cp_set_local_name *cp;
2510 struct pending_cmd *cmd;
2512 BT_DBG("status 0x%02x", status);
2516 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2523 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2524 mgmt_status(status));
2526 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2529 mgmt_pending_remove(cmd);
2532 hci_dev_unlock(hdev);
2535 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2538 struct mgmt_cp_set_local_name *cp = data;
2539 struct pending_cmd *cmd;
2540 struct hci_request req;
2547 /* If the old values are the same as the new ones just return a
2548 * direct command complete event.
2550 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2551 !memcmp(hdev->short_name, cp->short_name,
2552 sizeof(hdev->short_name))) {
2553 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2558 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2560 if (!hdev_is_powered(hdev)) {
2561 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2563 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2568 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2574 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2580 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2582 hci_req_init(&req, hdev);
2584 if (lmp_bredr_capable(hdev)) {
2589 if (lmp_le_capable(hdev))
2590 hci_update_ad(&req);
2592 err = hci_req_run(&req, set_name_complete);
2594 mgmt_pending_remove(cmd);
2597 hci_dev_unlock(hdev);
2601 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2602 void *data, u16 data_len)
2604 struct pending_cmd *cmd;
2607 BT_DBG("%s", hdev->name);
2611 if (!hdev_is_powered(hdev)) {
2612 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2613 MGMT_STATUS_NOT_POWERED);
2617 if (!lmp_ssp_capable(hdev)) {
2618 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2619 MGMT_STATUS_NOT_SUPPORTED);
2623 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2624 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2629 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2635 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2637 mgmt_pending_remove(cmd);
2640 hci_dev_unlock(hdev);
2644 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2645 void *data, u16 len)
2647 struct mgmt_cp_add_remote_oob_data *cp = data;
2651 BT_DBG("%s ", hdev->name);
2655 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2658 status = MGMT_STATUS_FAILED;
2660 status = MGMT_STATUS_SUCCESS;
2662 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2663 &cp->addr, sizeof(cp->addr));
2665 hci_dev_unlock(hdev);
2669 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2670 void *data, u16 len)
2672 struct mgmt_cp_remove_remote_oob_data *cp = data;
2676 BT_DBG("%s", hdev->name);
2680 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2682 status = MGMT_STATUS_INVALID_PARAMS;
2684 status = MGMT_STATUS_SUCCESS;
2686 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2687 status, &cp->addr, sizeof(cp->addr));
2689 hci_dev_unlock(hdev);
2693 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
2695 struct pending_cmd *cmd;
2699 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2701 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2705 type = hdev->discovery.type;
2707 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2708 &type, sizeof(type));
2709 mgmt_pending_remove(cmd);
2714 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
2716 BT_DBG("status %d", status);
2720 mgmt_start_discovery_failed(hdev, status);
2721 hci_dev_unlock(hdev);
2726 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2727 hci_dev_unlock(hdev);
2729 switch (hdev->discovery.type) {
2730 case DISCOV_TYPE_LE:
2731 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2735 case DISCOV_TYPE_INTERLEAVED:
2736 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2737 DISCOV_INTERLEAVED_TIMEOUT);
2740 case DISCOV_TYPE_BREDR:
2744 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
2748 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2749 void *data, u16 len)
2751 struct mgmt_cp_start_discovery *cp = data;
2752 struct pending_cmd *cmd;
2753 struct hci_cp_le_set_scan_param param_cp;
2754 struct hci_cp_le_set_scan_enable enable_cp;
2755 struct hci_cp_inquiry inq_cp;
2756 struct hci_request req;
2757 /* General inquiry access code (GIAC) */
2758 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2761 BT_DBG("%s", hdev->name);
2765 if (!hdev_is_powered(hdev)) {
2766 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2767 MGMT_STATUS_NOT_POWERED);
2771 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2772 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2777 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2778 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2783 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2789 hdev->discovery.type = cp->type;
2791 hci_req_init(&req, hdev);
2793 switch (hdev->discovery.type) {
2794 case DISCOV_TYPE_BREDR:
2795 if (!lmp_bredr_capable(hdev)) {
2796 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2797 MGMT_STATUS_NOT_SUPPORTED);
2798 mgmt_pending_remove(cmd);
2802 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2803 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2805 mgmt_pending_remove(cmd);
2809 hci_inquiry_cache_flush(hdev);
2811 memset(&inq_cp, 0, sizeof(inq_cp));
2812 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
2813 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
2814 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
2817 case DISCOV_TYPE_LE:
2818 case DISCOV_TYPE_INTERLEAVED:
2819 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2820 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2821 MGMT_STATUS_NOT_SUPPORTED);
2822 mgmt_pending_remove(cmd);
2826 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
2827 !lmp_bredr_capable(hdev)) {
2828 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2829 MGMT_STATUS_NOT_SUPPORTED);
2830 mgmt_pending_remove(cmd);
2834 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
2835 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2836 MGMT_STATUS_REJECTED);
2837 mgmt_pending_remove(cmd);
2841 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
2842 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2844 mgmt_pending_remove(cmd);
2848 memset(¶m_cp, 0, sizeof(param_cp));
2849 param_cp.type = LE_SCAN_ACTIVE;
2850 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
2851 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2852 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2855 memset(&enable_cp, 0, sizeof(enable_cp));
2856 enable_cp.enable = LE_SCAN_ENABLE;
2857 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2858 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2863 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2864 MGMT_STATUS_INVALID_PARAMS);
2865 mgmt_pending_remove(cmd);
2869 err = hci_req_run(&req, start_discovery_complete);
2871 mgmt_pending_remove(cmd);
2873 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2876 hci_dev_unlock(hdev);
2880 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
2882 struct pending_cmd *cmd;
2885 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
2889 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2890 &hdev->discovery.type, sizeof(hdev->discovery.type));
2891 mgmt_pending_remove(cmd);
2896 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
2898 BT_DBG("status %d", status);
2903 mgmt_stop_discovery_failed(hdev, status);
2907 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2910 hci_dev_unlock(hdev);
2913 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2916 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2917 struct pending_cmd *cmd;
2918 struct hci_cp_remote_name_req_cancel cp;
2919 struct inquiry_entry *e;
2920 struct hci_request req;
2921 struct hci_cp_le_set_scan_enable enable_cp;
2924 BT_DBG("%s", hdev->name);
2928 if (!hci_discovery_active(hdev)) {
2929 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2930 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2931 sizeof(mgmt_cp->type));
2935 if (hdev->discovery.type != mgmt_cp->type) {
2936 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2937 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2938 sizeof(mgmt_cp->type));
2942 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2948 hci_req_init(&req, hdev);
2950 switch (hdev->discovery.state) {
2951 case DISCOVERY_FINDING:
2952 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2953 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2955 cancel_delayed_work(&hdev->le_scan_disable);
2957 memset(&enable_cp, 0, sizeof(enable_cp));
2958 enable_cp.enable = LE_SCAN_DISABLE;
2959 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
2960 sizeof(enable_cp), &enable_cp);
2965 case DISCOVERY_RESOLVING:
2966 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2969 mgmt_pending_remove(cmd);
2970 err = cmd_complete(sk, hdev->id,
2971 MGMT_OP_STOP_DISCOVERY, 0,
2973 sizeof(mgmt_cp->type));
2974 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2978 bacpy(&cp.bdaddr, &e->data.bdaddr);
2979 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2985 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2987 mgmt_pending_remove(cmd);
2988 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2989 MGMT_STATUS_FAILED, &mgmt_cp->type,
2990 sizeof(mgmt_cp->type));
2994 err = hci_req_run(&req, stop_discovery_complete);
2996 mgmt_pending_remove(cmd);
2998 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3001 hci_dev_unlock(hdev);
3005 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3008 struct mgmt_cp_confirm_name *cp = data;
3009 struct inquiry_entry *e;
3012 BT_DBG("%s", hdev->name);
3016 if (!hci_discovery_active(hdev)) {
3017 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3018 MGMT_STATUS_FAILED);
3022 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3024 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3025 MGMT_STATUS_INVALID_PARAMS);
3029 if (cp->name_known) {
3030 e->name_state = NAME_KNOWN;
3033 e->name_state = NAME_NEEDED;
3034 hci_inquiry_cache_update_resolve(hdev, e);
3037 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3041 hci_dev_unlock(hdev);
3045 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3048 struct mgmt_cp_block_device *cp = data;
3052 BT_DBG("%s", hdev->name);
3054 if (!bdaddr_type_is_valid(cp->addr.type))
3055 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3056 MGMT_STATUS_INVALID_PARAMS,
3057 &cp->addr, sizeof(cp->addr));
3061 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3063 status = MGMT_STATUS_FAILED;
3065 status = MGMT_STATUS_SUCCESS;
3067 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3068 &cp->addr, sizeof(cp->addr));
3070 hci_dev_unlock(hdev);
3075 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3078 struct mgmt_cp_unblock_device *cp = data;
3082 BT_DBG("%s", hdev->name);
3084 if (!bdaddr_type_is_valid(cp->addr.type))
3085 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3086 MGMT_STATUS_INVALID_PARAMS,
3087 &cp->addr, sizeof(cp->addr));
3091 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3093 status = MGMT_STATUS_INVALID_PARAMS;
3095 status = MGMT_STATUS_SUCCESS;
3097 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3098 &cp->addr, sizeof(cp->addr));
3100 hci_dev_unlock(hdev);
3105 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3108 struct mgmt_cp_set_device_id *cp = data;
3109 struct hci_request req;
3113 BT_DBG("%s", hdev->name);
3115 source = __le16_to_cpu(cp->source);
3117 if (source > 0x0002)
3118 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3119 MGMT_STATUS_INVALID_PARAMS);
3123 hdev->devid_source = source;
3124 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3125 hdev->devid_product = __le16_to_cpu(cp->product);
3126 hdev->devid_version = __le16_to_cpu(cp->version);
3128 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3130 hci_req_init(&req, hdev);
3132 hci_req_run(&req, NULL);
3134 hci_dev_unlock(hdev);
3139 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3141 struct pending_cmd *cmd;
3143 BT_DBG("status 0x%02x", status);
3147 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3152 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3153 mgmt_status(status));
3155 struct mgmt_mode *cp = cmd->param;
3158 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3160 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3162 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3163 new_settings(hdev, cmd->sk);
3166 mgmt_pending_remove(cmd);
3169 hci_dev_unlock(hdev);
3172 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3173 void *data, u16 len)
3175 struct mgmt_mode *cp = data;
3176 struct pending_cmd *cmd;
3177 struct hci_request req;
3180 BT_DBG("%s", hdev->name);
3182 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
3183 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3184 MGMT_STATUS_NOT_SUPPORTED);
3186 if (cp->val != 0x00 && cp->val != 0x01)
3187 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3188 MGMT_STATUS_INVALID_PARAMS);
3190 if (!hdev_is_powered(hdev))
3191 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3192 MGMT_STATUS_NOT_POWERED);
3194 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3195 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3196 MGMT_STATUS_REJECTED);
3200 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3201 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3206 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3207 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3212 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3219 hci_req_init(&req, hdev);
3221 write_fast_connectable(&req, cp->val);
3223 err = hci_req_run(&req, fast_connectable_complete);
3225 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3226 MGMT_STATUS_FAILED);
3227 mgmt_pending_remove(cmd);
3231 hci_dev_unlock(hdev);
3236 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3238 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3240 if (key->master != 0x00 && key->master != 0x01)
3242 if (!bdaddr_type_is_le(key->addr.type))
3247 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3248 void *cp_data, u16 len)
3250 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3251 u16 key_count, expected_len;
3254 key_count = __le16_to_cpu(cp->key_count);
3256 expected_len = sizeof(*cp) + key_count *
3257 sizeof(struct mgmt_ltk_info);
3258 if (expected_len != len) {
3259 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3261 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3262 MGMT_STATUS_INVALID_PARAMS);
3265 BT_DBG("%s key_count %u", hdev->name, key_count);
3267 for (i = 0; i < key_count; i++) {
3268 struct mgmt_ltk_info *key = &cp->keys[i];
3270 if (!ltk_is_valid(key))
3271 return cmd_status(sk, hdev->id,
3272 MGMT_OP_LOAD_LONG_TERM_KEYS,
3273 MGMT_STATUS_INVALID_PARAMS);
3278 hci_smp_ltks_clear(hdev);
3280 for (i = 0; i < key_count; i++) {
3281 struct mgmt_ltk_info *key = &cp->keys[i];
3287 type = HCI_SMP_LTK_SLAVE;
3289 hci_add_ltk(hdev, &key->addr.bdaddr,
3290 bdaddr_to_le(key->addr.type),
3291 type, 0, key->authenticated, key->val,
3292 key->enc_size, key->ediv, key->rand);
3295 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3298 hci_dev_unlock(hdev);
3303 static const struct mgmt_handler {
3304 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3308 } mgmt_handlers[] = {
3309 { NULL }, /* 0x0000 (no command) */
3310 { read_version, false, MGMT_READ_VERSION_SIZE },
3311 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3312 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3313 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3314 { set_powered, false, MGMT_SETTING_SIZE },
3315 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3316 { set_connectable, false, MGMT_SETTING_SIZE },
3317 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3318 { set_pairable, false, MGMT_SETTING_SIZE },
3319 { set_link_security, false, MGMT_SETTING_SIZE },
3320 { set_ssp, false, MGMT_SETTING_SIZE },
3321 { set_hs, false, MGMT_SETTING_SIZE },
3322 { set_le, false, MGMT_SETTING_SIZE },
3323 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3324 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3325 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3326 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3327 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3328 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3329 { disconnect, false, MGMT_DISCONNECT_SIZE },
3330 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3331 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3332 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3333 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3334 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3335 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3336 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3337 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3338 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3339 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3340 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3341 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3342 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3343 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3344 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3345 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3346 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3347 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3348 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3349 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3353 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3357 struct mgmt_hdr *hdr;
3358 u16 opcode, index, len;
3359 struct hci_dev *hdev = NULL;
3360 const struct mgmt_handler *handler;
3363 BT_DBG("got %zu bytes", msglen);
3365 if (msglen < sizeof(*hdr))
3368 buf = kmalloc(msglen, GFP_KERNEL);
3372 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3378 opcode = __le16_to_cpu(hdr->opcode);
3379 index = __le16_to_cpu(hdr->index);
3380 len = __le16_to_cpu(hdr->len);
3382 if (len != msglen - sizeof(*hdr)) {
3387 if (index != MGMT_INDEX_NONE) {
3388 hdev = hci_dev_get(index);
3390 err = cmd_status(sk, index, opcode,
3391 MGMT_STATUS_INVALID_INDEX);
3395 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3396 err = cmd_status(sk, index, opcode,
3397 MGMT_STATUS_INVALID_INDEX);
3402 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3403 mgmt_handlers[opcode].func == NULL) {
3404 BT_DBG("Unknown op %u", opcode);
3405 err = cmd_status(sk, index, opcode,
3406 MGMT_STATUS_UNKNOWN_COMMAND);
3410 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3411 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3412 err = cmd_status(sk, index, opcode,
3413 MGMT_STATUS_INVALID_INDEX);
3417 handler = &mgmt_handlers[opcode];
3419 if ((handler->var_len && len < handler->data_len) ||
3420 (!handler->var_len && len != handler->data_len)) {
3421 err = cmd_status(sk, index, opcode,
3422 MGMT_STATUS_INVALID_PARAMS);
3427 mgmt_init_hdev(sk, hdev);
3429 cp = buf + sizeof(*hdr);
3431 err = handler->func(sk, hdev, cp, len);
3445 int mgmt_index_added(struct hci_dev *hdev)
3447 if (!mgmt_valid_hdev(hdev))
3450 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3453 int mgmt_index_removed(struct hci_dev *hdev)
3455 u8 status = MGMT_STATUS_INVALID_INDEX;
3457 if (!mgmt_valid_hdev(hdev))
3460 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3462 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3465 static void set_bredr_scan(struct hci_request *req)
3467 struct hci_dev *hdev = req->hdev;
3470 /* Ensure that fast connectable is disabled. This function will
3471 * not do anything if the page scan parameters are already what
3474 write_fast_connectable(req, false);
3476 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3478 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3479 scan |= SCAN_INQUIRY;
3482 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3485 static void powered_complete(struct hci_dev *hdev, u8 status)
3487 struct cmd_lookup match = { NULL, hdev };
3489 BT_DBG("status 0x%02x", status);
3493 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3495 new_settings(hdev, match.sk);
3497 hci_dev_unlock(hdev);
3503 static int powered_update_hci(struct hci_dev *hdev)
3505 struct hci_request req;
3508 hci_req_init(&req, hdev);
3510 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3511 !lmp_host_ssp_capable(hdev)) {
3514 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3517 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
3518 lmp_bredr_capable(hdev)) {
3519 struct hci_cp_write_le_host_supported cp;
3522 cp.simul = lmp_le_br_capable(hdev);
3524 /* Check first if we already have the right
3525 * host state (host features set)
3527 if (cp.le != lmp_host_le_capable(hdev) ||
3528 cp.simul != lmp_host_le_br_capable(hdev))
3529 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3533 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
3536 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(adv), &adv);
3539 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3540 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3541 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3542 sizeof(link_sec), &link_sec);
3544 if (lmp_bredr_capable(hdev)) {
3545 set_bredr_scan(&req);
3551 return hci_req_run(&req, powered_complete);
3554 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3556 struct cmd_lookup match = { NULL, hdev };
3557 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3558 u8 zero_cod[] = { 0, 0, 0 };
3561 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3565 if (powered_update_hci(hdev) == 0)
3568 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3573 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3574 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3576 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3577 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3578 zero_cod, sizeof(zero_cod), NULL);
3581 err = new_settings(hdev, match.sk);
3589 int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
3591 struct pending_cmd *cmd;
3594 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
3598 if (err == -ERFKILL)
3599 status = MGMT_STATUS_RFKILLED;
3601 status = MGMT_STATUS_FAILED;
3603 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
3605 mgmt_pending_remove(cmd);
3610 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3612 struct cmd_lookup match = { NULL, hdev };
3613 bool changed = false;
3617 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3620 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3624 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3628 err = new_settings(hdev, match.sk);
3636 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3638 struct pending_cmd *cmd;
3639 bool changed = false;
3643 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3646 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3650 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3653 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3658 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3660 u8 mgmt_err = mgmt_status(status);
3662 if (scan & SCAN_PAGE)
3663 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3664 cmd_status_rsp, &mgmt_err);
3666 if (scan & SCAN_INQUIRY)
3667 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3668 cmd_status_rsp, &mgmt_err);
3673 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3676 struct mgmt_ev_new_link_key ev;
3678 memset(&ev, 0, sizeof(ev));
3680 ev.store_hint = persistent;
3681 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3682 ev.key.addr.type = BDADDR_BREDR;
3683 ev.key.type = key->type;
3684 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3685 ev.key.pin_len = key->pin_len;
3687 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3690 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3692 struct mgmt_ev_new_long_term_key ev;
3694 memset(&ev, 0, sizeof(ev));
3696 ev.store_hint = persistent;
3697 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3698 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3699 ev.key.authenticated = key->authenticated;
3700 ev.key.enc_size = key->enc_size;
3701 ev.key.ediv = key->ediv;
3703 if (key->type == HCI_SMP_LTK)
3706 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3707 memcpy(ev.key.val, key->val, sizeof(key->val));
3709 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3713 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3714 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3718 struct mgmt_ev_device_connected *ev = (void *) buf;
3721 bacpy(&ev->addr.bdaddr, bdaddr);
3722 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3724 ev->flags = __cpu_to_le32(flags);
3727 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3730 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3731 eir_len = eir_append_data(ev->eir, eir_len,
3732 EIR_CLASS_OF_DEV, dev_class, 3);
3734 ev->eir_len = cpu_to_le16(eir_len);
3736 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3737 sizeof(*ev) + eir_len, NULL);
3740 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3742 struct mgmt_cp_disconnect *cp = cmd->param;
3743 struct sock **sk = data;
3744 struct mgmt_rp_disconnect rp;
3746 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3747 rp.addr.type = cp->addr.type;
3749 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3755 mgmt_pending_remove(cmd);
3758 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3760 struct hci_dev *hdev = data;
3761 struct mgmt_cp_unpair_device *cp = cmd->param;
3762 struct mgmt_rp_unpair_device rp;
3764 memset(&rp, 0, sizeof(rp));
3765 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3766 rp.addr.type = cp->addr.type;
3768 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3770 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3772 mgmt_pending_remove(cmd);
3775 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3776 u8 link_type, u8 addr_type, u8 reason)
3778 struct mgmt_ev_device_disconnected ev;
3779 struct sock *sk = NULL;
3782 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3784 bacpy(&ev.addr.bdaddr, bdaddr);
3785 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3788 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3794 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3800 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3801 u8 link_type, u8 addr_type, u8 status)
3803 struct mgmt_rp_disconnect rp;
3804 struct pending_cmd *cmd;
3807 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3810 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3814 bacpy(&rp.addr.bdaddr, bdaddr);
3815 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3817 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3818 mgmt_status(status), &rp, sizeof(rp));
3820 mgmt_pending_remove(cmd);
3825 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3826 u8 addr_type, u8 status)
3828 struct mgmt_ev_connect_failed ev;
3830 bacpy(&ev.addr.bdaddr, bdaddr);
3831 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3832 ev.status = mgmt_status(status);
3834 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3837 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3839 struct mgmt_ev_pin_code_request ev;
3841 bacpy(&ev.addr.bdaddr, bdaddr);
3842 ev.addr.type = BDADDR_BREDR;
3845 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3849 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3852 struct pending_cmd *cmd;
3853 struct mgmt_rp_pin_code_reply rp;
3856 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3860 bacpy(&rp.addr.bdaddr, bdaddr);
3861 rp.addr.type = BDADDR_BREDR;
3863 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3864 mgmt_status(status), &rp, sizeof(rp));
3866 mgmt_pending_remove(cmd);
3871 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3874 struct pending_cmd *cmd;
3875 struct mgmt_rp_pin_code_reply rp;
3878 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3882 bacpy(&rp.addr.bdaddr, bdaddr);
3883 rp.addr.type = BDADDR_BREDR;
3885 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3886 mgmt_status(status), &rp, sizeof(rp));
3888 mgmt_pending_remove(cmd);
3893 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3894 u8 link_type, u8 addr_type, __le32 value,
3897 struct mgmt_ev_user_confirm_request ev;
3899 BT_DBG("%s", hdev->name);
3901 bacpy(&ev.addr.bdaddr, bdaddr);
3902 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3903 ev.confirm_hint = confirm_hint;
3906 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3910 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3911 u8 link_type, u8 addr_type)
3913 struct mgmt_ev_user_passkey_request ev;
3915 BT_DBG("%s", hdev->name);
3917 bacpy(&ev.addr.bdaddr, bdaddr);
3918 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3920 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3924 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3925 u8 link_type, u8 addr_type, u8 status,
3928 struct pending_cmd *cmd;
3929 struct mgmt_rp_user_confirm_reply rp;
3932 cmd = mgmt_pending_find(opcode, hdev);
3936 bacpy(&rp.addr.bdaddr, bdaddr);
3937 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3938 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3941 mgmt_pending_remove(cmd);
3946 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3947 u8 link_type, u8 addr_type, u8 status)
3949 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3950 status, MGMT_OP_USER_CONFIRM_REPLY);
3953 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3954 u8 link_type, u8 addr_type, u8 status)
3956 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3958 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3961 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3962 u8 link_type, u8 addr_type, u8 status)
3964 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3965 status, MGMT_OP_USER_PASSKEY_REPLY);
3968 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3969 u8 link_type, u8 addr_type, u8 status)
3971 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3973 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3976 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3977 u8 link_type, u8 addr_type, u32 passkey,
3980 struct mgmt_ev_passkey_notify ev;
3982 BT_DBG("%s", hdev->name);
3984 bacpy(&ev.addr.bdaddr, bdaddr);
3985 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3986 ev.passkey = __cpu_to_le32(passkey);
3987 ev.entered = entered;
3989 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3992 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3993 u8 addr_type, u8 status)
3995 struct mgmt_ev_auth_failed ev;
3997 bacpy(&ev.addr.bdaddr, bdaddr);
3998 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3999 ev.status = mgmt_status(status);
4001 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4004 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4006 struct cmd_lookup match = { NULL, hdev };
4007 bool changed = false;
4011 u8 mgmt_err = mgmt_status(status);
4012 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4013 cmd_status_rsp, &mgmt_err);
4017 if (test_bit(HCI_AUTH, &hdev->flags)) {
4018 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4021 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4025 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4029 err = new_settings(hdev, match.sk);
4037 static void clear_eir(struct hci_request *req)
4039 struct hci_dev *hdev = req->hdev;
4040 struct hci_cp_write_eir cp;
4042 if (!lmp_ext_inq_capable(hdev))
4045 memset(hdev->eir, 0, sizeof(hdev->eir));
4047 memset(&cp, 0, sizeof(cp));
4049 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4052 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4054 struct cmd_lookup match = { NULL, hdev };
4055 struct hci_request req;
4056 bool changed = false;
4060 u8 mgmt_err = mgmt_status(status);
4062 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4064 err = new_settings(hdev, NULL);
4066 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4073 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4076 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4080 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4083 err = new_settings(hdev, match.sk);
4088 hci_req_init(&req, hdev);
4090 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4095 hci_req_run(&req, NULL);
4100 static void sk_lookup(struct pending_cmd *cmd, void *data)
4102 struct cmd_lookup *match = data;
4104 if (match->sk == NULL) {
4105 match->sk = cmd->sk;
4106 sock_hold(match->sk);
4110 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4113 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4116 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4117 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4118 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4121 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
4130 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4132 struct mgmt_cp_set_local_name ev;
4133 struct pending_cmd *cmd;
4138 memset(&ev, 0, sizeof(ev));
4139 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4140 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4142 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4144 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4146 /* If this is a HCI command related to powering on the
4147 * HCI dev don't send any mgmt signals.
4149 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4153 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4154 cmd ? cmd->sk : NULL);
4157 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4158 u8 *randomizer, u8 status)
4160 struct pending_cmd *cmd;
4163 BT_DBG("%s status %u", hdev->name, status);
4165 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4170 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4171 mgmt_status(status));
4173 struct mgmt_rp_read_local_oob_data rp;
4175 memcpy(rp.hash, hash, sizeof(rp.hash));
4176 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4178 err = cmd_complete(cmd->sk, hdev->id,
4179 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
4183 mgmt_pending_remove(cmd);
4188 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4189 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4190 ssp, u8 *eir, u16 eir_len)
4193 struct mgmt_ev_device_found *ev = (void *) buf;
4196 if (!hci_discovery_active(hdev))
4199 /* Leave 5 bytes for a potential CoD field */
4200 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4203 memset(buf, 0, sizeof(buf));
4205 bacpy(&ev->addr.bdaddr, bdaddr);
4206 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4209 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4211 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4214 memcpy(ev->eir, eir, eir_len);
4216 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4217 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4220 ev->eir_len = cpu_to_le16(eir_len);
4221 ev_size = sizeof(*ev) + eir_len;
4223 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4226 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4227 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4229 struct mgmt_ev_device_found *ev;
4230 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4233 ev = (struct mgmt_ev_device_found *) buf;
4235 memset(buf, 0, sizeof(buf));
4237 bacpy(&ev->addr.bdaddr, bdaddr);
4238 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4241 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4244 ev->eir_len = cpu_to_le16(eir_len);
4246 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4247 sizeof(*ev) + eir_len, NULL);
4250 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4252 struct mgmt_ev_discovering ev;
4253 struct pending_cmd *cmd;
4255 BT_DBG("%s discovering %u", hdev->name, discovering);
4258 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4260 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4263 u8 type = hdev->discovery.type;
4265 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4267 mgmt_pending_remove(cmd);
4270 memset(&ev, 0, sizeof(ev));
4271 ev.type = hdev->discovery.type;
4272 ev.discovering = discovering;
4274 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4277 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4279 struct pending_cmd *cmd;
4280 struct mgmt_ev_device_blocked ev;
4282 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4284 bacpy(&ev.addr.bdaddr, bdaddr);
4285 ev.addr.type = type;
4287 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4288 cmd ? cmd->sk : NULL);
4291 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4293 struct pending_cmd *cmd;
4294 struct mgmt_ev_device_unblocked ev;
4296 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4298 bacpy(&ev.addr.bdaddr, bdaddr);
4299 ev.addr.type = type;
4301 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4302 cmd ? cmd->sk : NULL);
4305 module_param(enable_hs, bool, 0644);
4306 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");