2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
105 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
107 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
108 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
111 struct list_head list;
119 /* HCI to MGMT error code conversion table */
120 static u8 mgmt_status_table[] = {
122 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
123 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
124 MGMT_STATUS_FAILED, /* Hardware Failure */
125 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
126 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
127 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
128 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
129 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
130 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
131 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
132 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
133 MGMT_STATUS_BUSY, /* Command Disallowed */
134 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
135 MGMT_STATUS_REJECTED, /* Rejected Security */
136 MGMT_STATUS_REJECTED, /* Rejected Personal */
137 MGMT_STATUS_TIMEOUT, /* Host Timeout */
138 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
139 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
140 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
141 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
142 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
143 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
144 MGMT_STATUS_BUSY, /* Repeated Attempts */
145 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
146 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
147 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
148 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
149 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
150 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
151 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
152 MGMT_STATUS_FAILED, /* Unspecified Error */
153 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
154 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
155 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
156 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
157 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
158 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
159 MGMT_STATUS_FAILED, /* Unit Link Key Used */
160 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
161 MGMT_STATUS_TIMEOUT, /* Instant Passed */
162 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
163 MGMT_STATUS_FAILED, /* Transaction Collision */
164 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
165 MGMT_STATUS_REJECTED, /* QoS Rejected */
166 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
167 MGMT_STATUS_REJECTED, /* Insufficient Security */
168 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
169 MGMT_STATUS_BUSY, /* Role Switch Pending */
170 MGMT_STATUS_FAILED, /* Slot Violation */
171 MGMT_STATUS_FAILED, /* Role Switch Failed */
172 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
173 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
174 MGMT_STATUS_BUSY, /* Host Busy Pairing */
175 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
176 MGMT_STATUS_BUSY, /* Controller Busy */
177 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
178 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
179 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
180 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
181 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
184 bool mgmt_valid_hdev(struct hci_dev *hdev)
186 return hdev->dev_type == HCI_BREDR;
189 static u8 mgmt_status(u8 hci_status)
191 if (hci_status < ARRAY_SIZE(mgmt_status_table))
192 return mgmt_status_table[hci_status];
194 return MGMT_STATUS_FAILED;
197 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
200 struct mgmt_hdr *hdr;
201 struct mgmt_ev_cmd_status *ev;
204 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
206 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
210 hdr = (void *) skb_put(skb, sizeof(*hdr));
212 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
213 hdr->index = cpu_to_le16(index);
214 hdr->len = cpu_to_le16(sizeof(*ev));
216 ev = (void *) skb_put(skb, sizeof(*ev));
218 ev->opcode = cpu_to_le16(cmd);
220 err = sock_queue_rcv_skb(sk, skb);
227 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
228 void *rp, size_t rp_len)
231 struct mgmt_hdr *hdr;
232 struct mgmt_ev_cmd_complete *ev;
235 BT_DBG("sock %p", sk);
237 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
241 hdr = (void *) skb_put(skb, sizeof(*hdr));
243 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
244 hdr->index = cpu_to_le16(index);
245 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
247 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
248 ev->opcode = cpu_to_le16(cmd);
252 memcpy(ev->data, rp, rp_len);
254 err = sock_queue_rcv_skb(sk, skb);
261 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
264 struct mgmt_rp_read_version rp;
266 BT_DBG("sock %p", sk);
268 rp.version = MGMT_VERSION;
269 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
271 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
275 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
278 struct mgmt_rp_read_commands *rp;
279 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
280 const u16 num_events = ARRAY_SIZE(mgmt_events);
285 BT_DBG("sock %p", sk);
287 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
289 rp = kmalloc(rp_size, GFP_KERNEL);
293 rp->num_commands = __constant_cpu_to_le16(num_commands);
294 rp->num_events = __constant_cpu_to_le16(num_events);
296 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
297 put_unaligned_le16(mgmt_commands[i], opcode);
299 for (i = 0; i < num_events; i++, opcode++)
300 put_unaligned_le16(mgmt_events[i], opcode);
302 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
309 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
312 struct mgmt_rp_read_index_list *rp;
318 BT_DBG("sock %p", sk);
320 read_lock(&hci_dev_list_lock);
323 list_for_each_entry(d, &hci_dev_list, list) {
324 if (!mgmt_valid_hdev(d))
330 rp_len = sizeof(*rp) + (2 * count);
331 rp = kmalloc(rp_len, GFP_ATOMIC);
333 read_unlock(&hci_dev_list_lock);
338 list_for_each_entry(d, &hci_dev_list, list) {
339 if (test_bit(HCI_SETUP, &d->dev_flags))
342 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
345 if (!mgmt_valid_hdev(d))
348 rp->index[count++] = cpu_to_le16(d->id);
349 BT_DBG("Added hci%u", d->id);
352 rp->num_controllers = cpu_to_le16(count);
353 rp_len = sizeof(*rp) + (2 * count);
355 read_unlock(&hci_dev_list_lock);
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
365 static u32 get_supported_settings(struct hci_dev *hdev)
369 settings |= MGMT_SETTING_POWERED;
370 settings |= MGMT_SETTING_PAIRABLE;
372 if (lmp_ssp_capable(hdev))
373 settings |= MGMT_SETTING_SSP;
375 if (lmp_bredr_capable(hdev)) {
376 settings |= MGMT_SETTING_CONNECTABLE;
377 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
378 settings |= MGMT_SETTING_FAST_CONNECTABLE;
379 settings |= MGMT_SETTING_DISCOVERABLE;
380 settings |= MGMT_SETTING_BREDR;
381 settings |= MGMT_SETTING_LINK_SECURITY;
385 settings |= MGMT_SETTING_HS;
387 if (lmp_le_capable(hdev))
388 settings |= MGMT_SETTING_LE;
393 static u32 get_current_settings(struct hci_dev *hdev)
397 if (hdev_is_powered(hdev))
398 settings |= MGMT_SETTING_POWERED;
400 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
401 settings |= MGMT_SETTING_CONNECTABLE;
403 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
404 settings |= MGMT_SETTING_FAST_CONNECTABLE;
406 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
407 settings |= MGMT_SETTING_DISCOVERABLE;
409 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_PAIRABLE;
412 if (lmp_bredr_capable(hdev))
413 settings |= MGMT_SETTING_BREDR;
415 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
416 settings |= MGMT_SETTING_LE;
418 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
419 settings |= MGMT_SETTING_LINK_SECURITY;
421 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_SSP;
424 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
425 settings |= MGMT_SETTING_HS;
430 #define PNP_INFO_SVCLASS_ID 0x1200
432 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
434 u8 *ptr = data, *uuids_start = NULL;
435 struct bt_uuid *uuid;
440 list_for_each_entry(uuid, &hdev->uuids, list) {
443 if (uuid->size != 16)
446 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
450 if (uuid16 == PNP_INFO_SVCLASS_ID)
456 uuids_start[1] = EIR_UUID16_ALL;
460 /* Stop if not enough space to put next UUID */
461 if ((ptr - data) + sizeof(u16) > len) {
462 uuids_start[1] = EIR_UUID16_SOME;
466 *ptr++ = (uuid16 & 0x00ff);
467 *ptr++ = (uuid16 & 0xff00) >> 8;
468 uuids_start[0] += sizeof(uuid16);
474 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
476 u8 *ptr = data, *uuids_start = NULL;
477 struct bt_uuid *uuid;
482 list_for_each_entry(uuid, &hdev->uuids, list) {
483 if (uuid->size != 32)
489 uuids_start[1] = EIR_UUID32_ALL;
493 /* Stop if not enough space to put next UUID */
494 if ((ptr - data) + sizeof(u32) > len) {
495 uuids_start[1] = EIR_UUID32_SOME;
499 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
501 uuids_start[0] += sizeof(u32);
507 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
509 u8 *ptr = data, *uuids_start = NULL;
510 struct bt_uuid *uuid;
515 list_for_each_entry(uuid, &hdev->uuids, list) {
516 if (uuid->size != 128)
522 uuids_start[1] = EIR_UUID128_ALL;
526 /* Stop if not enough space to put next UUID */
527 if ((ptr - data) + 16 > len) {
528 uuids_start[1] = EIR_UUID128_SOME;
532 memcpy(ptr, uuid->uuid, 16);
534 uuids_start[0] += 16;
540 static void create_eir(struct hci_dev *hdev, u8 *data)
545 name_len = strlen(hdev->dev_name);
551 ptr[1] = EIR_NAME_SHORT;
553 ptr[1] = EIR_NAME_COMPLETE;
555 /* EIR Data length */
556 ptr[0] = name_len + 1;
558 memcpy(ptr + 2, hdev->dev_name, name_len);
560 ptr += (name_len + 2);
563 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
565 ptr[1] = EIR_TX_POWER;
566 ptr[2] = (u8) hdev->inq_tx_power;
571 if (hdev->devid_source > 0) {
573 ptr[1] = EIR_DEVICE_ID;
575 put_unaligned_le16(hdev->devid_source, ptr + 2);
576 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
577 put_unaligned_le16(hdev->devid_product, ptr + 6);
578 put_unaligned_le16(hdev->devid_version, ptr + 8);
583 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
584 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
585 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
588 static void update_eir(struct hci_request *req)
590 struct hci_dev *hdev = req->hdev;
591 struct hci_cp_write_eir cp;
593 if (!hdev_is_powered(hdev))
596 if (!lmp_ext_inq_capable(hdev))
599 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
602 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
605 memset(&cp, 0, sizeof(cp));
607 create_eir(hdev, cp.data);
609 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
612 memcpy(hdev->eir, cp.data, sizeof(cp.data));
614 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
617 static u8 get_service_classes(struct hci_dev *hdev)
619 struct bt_uuid *uuid;
622 list_for_each_entry(uuid, &hdev->uuids, list)
623 val |= uuid->svc_hint;
628 static void update_class(struct hci_request *req)
630 struct hci_dev *hdev = req->hdev;
633 BT_DBG("%s", hdev->name);
635 if (!hdev_is_powered(hdev))
638 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
641 cod[0] = hdev->minor_class;
642 cod[1] = hdev->major_class;
643 cod[2] = get_service_classes(hdev);
645 if (memcmp(cod, hdev->dev_class, 3) == 0)
648 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
651 static void service_cache_off(struct work_struct *work)
653 struct hci_dev *hdev = container_of(work, struct hci_dev,
655 struct hci_request req;
657 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
660 hci_req_init(&req, hdev);
667 hci_dev_unlock(hdev);
669 hci_req_run(&req, NULL);
672 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
674 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
677 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
679 /* Non-mgmt controlled devices get this bit set
680 * implicitly so that pairing works for them, however
681 * for mgmt we require user-space to explicitly enable
684 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
687 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
688 void *data, u16 data_len)
690 struct mgmt_rp_read_info rp;
692 BT_DBG("sock %p %s", sk, hdev->name);
696 memset(&rp, 0, sizeof(rp));
698 bacpy(&rp.bdaddr, &hdev->bdaddr);
700 rp.version = hdev->hci_ver;
701 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
703 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
704 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
706 memcpy(rp.dev_class, hdev->dev_class, 3);
708 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
709 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
711 hci_dev_unlock(hdev);
713 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
717 static void mgmt_pending_free(struct pending_cmd *cmd)
724 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
725 struct hci_dev *hdev, void *data,
728 struct pending_cmd *cmd;
730 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
734 cmd->opcode = opcode;
735 cmd->index = hdev->id;
737 cmd->param = kmalloc(len, GFP_KERNEL);
744 memcpy(cmd->param, data, len);
749 list_add(&cmd->list, &hdev->mgmt_pending);
754 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
755 void (*cb)(struct pending_cmd *cmd,
759 struct pending_cmd *cmd, *tmp;
761 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
762 if (opcode > 0 && cmd->opcode != opcode)
769 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
771 struct pending_cmd *cmd;
773 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
774 if (cmd->opcode == opcode)
781 static void mgmt_pending_remove(struct pending_cmd *cmd)
783 list_del(&cmd->list);
784 mgmt_pending_free(cmd);
787 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
789 __le32 settings = cpu_to_le32(get_current_settings(hdev));
791 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
795 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
798 struct mgmt_mode *cp = data;
799 struct pending_cmd *cmd;
802 BT_DBG("request for %s", hdev->name);
804 if (cp->val != 0x00 && cp->val != 0x01)
805 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
806 MGMT_STATUS_INVALID_PARAMS);
810 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
811 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
816 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
817 cancel_delayed_work(&hdev->power_off);
820 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
822 err = mgmt_powered(hdev, 1);
827 if (!!cp->val == hdev_is_powered(hdev)) {
828 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
832 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
839 queue_work(hdev->req_workqueue, &hdev->power_on);
841 queue_work(hdev->req_workqueue, &hdev->power_off.work);
846 hci_dev_unlock(hdev);
850 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
851 struct sock *skip_sk)
854 struct mgmt_hdr *hdr;
856 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
860 hdr = (void *) skb_put(skb, sizeof(*hdr));
861 hdr->opcode = cpu_to_le16(event);
863 hdr->index = cpu_to_le16(hdev->id);
865 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
866 hdr->len = cpu_to_le16(data_len);
869 memcpy(skb_put(skb, data_len), data, data_len);
872 __net_timestamp(skb);
874 hci_send_to_control(skb, skip_sk);
880 static int new_settings(struct hci_dev *hdev, struct sock *skip)
884 ev = cpu_to_le32(get_current_settings(hdev));
886 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
891 struct hci_dev *hdev;
895 static void settings_rsp(struct pending_cmd *cmd, void *data)
897 struct cmd_lookup *match = data;
899 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
901 list_del(&cmd->list);
903 if (match->sk == NULL) {
905 sock_hold(match->sk);
908 mgmt_pending_free(cmd);
911 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
915 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
916 mgmt_pending_remove(cmd);
919 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
922 struct mgmt_cp_set_discoverable *cp = data;
923 struct pending_cmd *cmd;
928 BT_DBG("request for %s", hdev->name);
930 if (!lmp_bredr_capable(hdev))
931 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
932 MGMT_STATUS_NOT_SUPPORTED);
934 if (cp->val != 0x00 && cp->val != 0x01)
935 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
936 MGMT_STATUS_INVALID_PARAMS);
938 timeout = __le16_to_cpu(cp->timeout);
939 if (!cp->val && timeout > 0)
940 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
941 MGMT_STATUS_INVALID_PARAMS);
945 if (!hdev_is_powered(hdev) && timeout > 0) {
946 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
947 MGMT_STATUS_NOT_POWERED);
951 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
952 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
953 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
958 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
959 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
960 MGMT_STATUS_REJECTED);
964 if (!hdev_is_powered(hdev)) {
965 bool changed = false;
967 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
968 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
972 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
977 err = new_settings(hdev, sk);
982 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
983 if (hdev->discov_timeout > 0) {
984 cancel_delayed_work(&hdev->discov_off);
985 hdev->discov_timeout = 0;
988 if (cp->val && timeout > 0) {
989 hdev->discov_timeout = timeout;
990 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
991 msecs_to_jiffies(hdev->discov_timeout * 1000));
994 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
998 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1007 scan |= SCAN_INQUIRY;
1009 cancel_delayed_work(&hdev->discov_off);
1011 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1013 mgmt_pending_remove(cmd);
1016 hdev->discov_timeout = timeout;
1019 hci_dev_unlock(hdev);
1023 static void write_fast_connectable(struct hci_request *req, bool enable)
1025 struct hci_dev *hdev = req->hdev;
1026 struct hci_cp_write_page_scan_activity acp;
1029 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1033 type = PAGE_SCAN_TYPE_INTERLACED;
1035 /* 160 msec page scan interval */
1036 acp.interval = __constant_cpu_to_le16(0x0100);
1038 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1040 /* default 1.28 sec page scan */
1041 acp.interval = __constant_cpu_to_le16(0x0800);
1044 acp.window = __constant_cpu_to_le16(0x0012);
1046 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1047 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1048 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1051 if (hdev->page_scan_type != type)
1052 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1055 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1057 struct pending_cmd *cmd;
1059 BT_DBG("status 0x%02x", status);
1063 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1067 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1069 mgmt_pending_remove(cmd);
1072 hci_dev_unlock(hdev);
1075 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1078 struct mgmt_mode *cp = data;
1079 struct pending_cmd *cmd;
1080 struct hci_request req;
1084 BT_DBG("request for %s", hdev->name);
1086 if (!lmp_bredr_capable(hdev))
1087 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1088 MGMT_STATUS_NOT_SUPPORTED);
1090 if (cp->val != 0x00 && cp->val != 0x01)
1091 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1092 MGMT_STATUS_INVALID_PARAMS);
1096 if (!hdev_is_powered(hdev)) {
1097 bool changed = false;
1099 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1103 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1105 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1106 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1109 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1114 err = new_settings(hdev, sk);
1119 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1120 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1121 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1126 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1127 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1131 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1142 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1143 hdev->discov_timeout > 0)
1144 cancel_delayed_work(&hdev->discov_off);
1147 hci_req_init(&req, hdev);
1149 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1151 /* If we're going from non-connectable to connectable or
1152 * vice-versa when fast connectable is enabled ensure that fast
1153 * connectable gets disabled. write_fast_connectable won't do
1154 * anything if the page scan parameters are already what they
1157 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1158 write_fast_connectable(&req, false);
1160 err = hci_req_run(&req, set_connectable_complete);
1162 mgmt_pending_remove(cmd);
1165 hci_dev_unlock(hdev);
1169 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1172 struct mgmt_mode *cp = data;
1175 BT_DBG("request for %s", hdev->name);
1177 if (cp->val != 0x00 && cp->val != 0x01)
1178 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1179 MGMT_STATUS_INVALID_PARAMS);
1184 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1186 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1188 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1192 err = new_settings(hdev, sk);
1195 hci_dev_unlock(hdev);
1199 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1202 struct mgmt_mode *cp = data;
1203 struct pending_cmd *cmd;
1207 BT_DBG("request for %s", hdev->name);
1209 if (!lmp_bredr_capable(hdev))
1210 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1211 MGMT_STATUS_NOT_SUPPORTED);
1213 if (cp->val != 0x00 && cp->val != 0x01)
1214 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1215 MGMT_STATUS_INVALID_PARAMS);
1219 if (!hdev_is_powered(hdev)) {
1220 bool changed = false;
1222 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1223 &hdev->dev_flags)) {
1224 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1228 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1233 err = new_settings(hdev, sk);
1238 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1239 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1246 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1247 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1251 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1257 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1259 mgmt_pending_remove(cmd);
1264 hci_dev_unlock(hdev);
1268 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1270 struct mgmt_mode *cp = data;
1271 struct pending_cmd *cmd;
1275 BT_DBG("request for %s", hdev->name);
1277 if (!lmp_ssp_capable(hdev))
1278 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1279 MGMT_STATUS_NOT_SUPPORTED);
1281 if (cp->val != 0x00 && cp->val != 0x01)
1282 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1283 MGMT_STATUS_INVALID_PARAMS);
1289 if (!hdev_is_powered(hdev)) {
1290 bool changed = false;
1292 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1293 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1297 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1302 err = new_settings(hdev, sk);
1307 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1308 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1313 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1314 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1318 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1324 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1326 mgmt_pending_remove(cmd);
1331 hci_dev_unlock(hdev);
1335 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1337 struct mgmt_mode *cp = data;
1339 BT_DBG("request for %s", hdev->name);
1342 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1343 MGMT_STATUS_NOT_SUPPORTED);
1345 if (cp->val != 0x00 && cp->val != 0x01)
1346 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1347 MGMT_STATUS_INVALID_PARAMS);
1350 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1352 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1354 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1357 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1359 struct cmd_lookup match = { NULL, hdev };
1362 u8 mgmt_err = mgmt_status(status);
1364 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1369 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1371 new_settings(hdev, match.sk);
1377 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1379 struct mgmt_mode *cp = data;
1380 struct hci_cp_write_le_host_supported hci_cp;
1381 struct pending_cmd *cmd;
1382 struct hci_request req;
1386 BT_DBG("request for %s", hdev->name);
1388 if (!lmp_le_capable(hdev))
1389 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1390 MGMT_STATUS_NOT_SUPPORTED);
1392 if (cp->val != 0x00 && cp->val != 0x01)
1393 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1394 MGMT_STATUS_INVALID_PARAMS);
1396 /* LE-only devices do not allow toggling LE on/off */
1397 if (!lmp_bredr_capable(hdev))
1398 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1399 MGMT_STATUS_REJECTED);
1404 enabled = lmp_host_le_capable(hdev);
1406 if (!hdev_is_powered(hdev) || val == enabled) {
1407 bool changed = false;
1409 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1410 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1414 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1419 err = new_settings(hdev, sk);
1424 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1425 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1430 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1436 memset(&hci_cp, 0, sizeof(hci_cp));
1440 hci_cp.simul = lmp_le_br_capable(hdev);
1443 hci_req_init(&req, hdev);
1445 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1448 err = hci_req_run(&req, le_enable_complete);
1450 mgmt_pending_remove(cmd);
1453 hci_dev_unlock(hdev);
1457 /* This is a helper function to test for pending mgmt commands that can
1458 * cause CoD or EIR HCI commands. We can only allow one such pending
1459 * mgmt command at a time since otherwise we cannot easily track what
1460 * the current values are, will be, and based on that calculate if a new
1461 * HCI command needs to be sent and if yes with what value.
1463 static bool pending_eir_or_class(struct hci_dev *hdev)
1465 struct pending_cmd *cmd;
1467 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1468 switch (cmd->opcode) {
1469 case MGMT_OP_ADD_UUID:
1470 case MGMT_OP_REMOVE_UUID:
1471 case MGMT_OP_SET_DEV_CLASS:
1472 case MGMT_OP_SET_POWERED:
1480 static const u8 bluetooth_base_uuid[] = {
1481 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1482 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1485 static u8 get_uuid_size(const u8 *uuid)
1489 if (memcmp(uuid, bluetooth_base_uuid, 12))
1492 val = get_unaligned_le32(&uuid[12]);
1499 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1501 struct pending_cmd *cmd;
1505 cmd = mgmt_pending_find(mgmt_op, hdev);
1509 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1510 hdev->dev_class, 3);
1512 mgmt_pending_remove(cmd);
1515 hci_dev_unlock(hdev);
1518 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1520 BT_DBG("status 0x%02x", status);
1522 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1525 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1527 struct mgmt_cp_add_uuid *cp = data;
1528 struct pending_cmd *cmd;
1529 struct hci_request req;
1530 struct bt_uuid *uuid;
1533 BT_DBG("request for %s", hdev->name);
1537 if (pending_eir_or_class(hdev)) {
1538 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1543 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1549 memcpy(uuid->uuid, cp->uuid, 16);
1550 uuid->svc_hint = cp->svc_hint;
1551 uuid->size = get_uuid_size(cp->uuid);
1553 list_add_tail(&uuid->list, &hdev->uuids);
1555 hci_req_init(&req, hdev);
1560 err = hci_req_run(&req, add_uuid_complete);
1562 if (err != -ENODATA)
1565 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1566 hdev->dev_class, 3);
1570 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1579 hci_dev_unlock(hdev);
1583 static bool enable_service_cache(struct hci_dev *hdev)
1585 if (!hdev_is_powered(hdev))
1588 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1589 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1597 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1599 BT_DBG("status 0x%02x", status);
1601 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1604 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1607 struct mgmt_cp_remove_uuid *cp = data;
1608 struct pending_cmd *cmd;
1609 struct bt_uuid *match, *tmp;
1610 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1611 struct hci_request req;
1614 BT_DBG("request for %s", hdev->name);
1618 if (pending_eir_or_class(hdev)) {
1619 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1624 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1625 err = hci_uuids_clear(hdev);
1627 if (enable_service_cache(hdev)) {
1628 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1629 0, hdev->dev_class, 3);
1638 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1639 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1642 list_del(&match->list);
1648 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1649 MGMT_STATUS_INVALID_PARAMS);
1654 hci_req_init(&req, hdev);
1659 err = hci_req_run(&req, remove_uuid_complete);
1661 if (err != -ENODATA)
1664 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1665 hdev->dev_class, 3);
1669 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1678 hci_dev_unlock(hdev);
1682 static void set_class_complete(struct hci_dev *hdev, u8 status)
1684 BT_DBG("status 0x%02x", status);
1686 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1689 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1692 struct mgmt_cp_set_dev_class *cp = data;
1693 struct pending_cmd *cmd;
1694 struct hci_request req;
1697 BT_DBG("request for %s", hdev->name);
1699 if (!lmp_bredr_capable(hdev))
1700 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1701 MGMT_STATUS_NOT_SUPPORTED);
1705 if (pending_eir_or_class(hdev)) {
1706 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1711 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1712 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1713 MGMT_STATUS_INVALID_PARAMS);
1717 hdev->major_class = cp->major;
1718 hdev->minor_class = cp->minor;
1720 if (!hdev_is_powered(hdev)) {
1721 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1722 hdev->dev_class, 3);
1726 hci_req_init(&req, hdev);
1728 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1729 hci_dev_unlock(hdev);
1730 cancel_delayed_work_sync(&hdev->service_cache);
1737 err = hci_req_run(&req, set_class_complete);
1739 if (err != -ENODATA)
1742 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1743 hdev->dev_class, 3);
1747 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1756 hci_dev_unlock(hdev);
1760 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1763 struct mgmt_cp_load_link_keys *cp = data;
1764 u16 key_count, expected_len;
1767 key_count = __le16_to_cpu(cp->key_count);
1769 expected_len = sizeof(*cp) + key_count *
1770 sizeof(struct mgmt_link_key_info);
1771 if (expected_len != len) {
1772 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1774 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1775 MGMT_STATUS_INVALID_PARAMS);
1778 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1779 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1780 MGMT_STATUS_INVALID_PARAMS);
1782 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1785 for (i = 0; i < key_count; i++) {
1786 struct mgmt_link_key_info *key = &cp->keys[i];
1788 if (key->addr.type != BDADDR_BREDR)
1789 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1790 MGMT_STATUS_INVALID_PARAMS);
1795 hci_link_keys_clear(hdev);
1798 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1800 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1802 for (i = 0; i < key_count; i++) {
1803 struct mgmt_link_key_info *key = &cp->keys[i];
1805 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1806 key->type, key->pin_len);
1809 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1811 hci_dev_unlock(hdev);
1816 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1817 u8 addr_type, struct sock *skip_sk)
1819 struct mgmt_ev_device_unpaired ev;
1821 bacpy(&ev.addr.bdaddr, bdaddr);
1822 ev.addr.type = addr_type;
1824 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1828 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1831 struct mgmt_cp_unpair_device *cp = data;
1832 struct mgmt_rp_unpair_device rp;
1833 struct hci_cp_disconnect dc;
1834 struct pending_cmd *cmd;
1835 struct hci_conn *conn;
1838 memset(&rp, 0, sizeof(rp));
1839 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1840 rp.addr.type = cp->addr.type;
1842 if (!bdaddr_type_is_valid(cp->addr.type))
1843 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1844 MGMT_STATUS_INVALID_PARAMS,
1847 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1848 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1849 MGMT_STATUS_INVALID_PARAMS,
1854 if (!hdev_is_powered(hdev)) {
1855 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1856 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1860 if (cp->addr.type == BDADDR_BREDR)
1861 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1863 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1866 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1867 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1871 if (cp->disconnect) {
1872 if (cp->addr.type == BDADDR_BREDR)
1873 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1876 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1883 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1885 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1889 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1896 dc.handle = cpu_to_le16(conn->handle);
1897 dc.reason = 0x13; /* Remote User Terminated Connection */
1898 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1900 mgmt_pending_remove(cmd);
1903 hci_dev_unlock(hdev);
1907 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1910 struct mgmt_cp_disconnect *cp = data;
1911 struct mgmt_rp_disconnect rp;
1912 struct hci_cp_disconnect dc;
1913 struct pending_cmd *cmd;
1914 struct hci_conn *conn;
1919 memset(&rp, 0, sizeof(rp));
1920 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1921 rp.addr.type = cp->addr.type;
1923 if (!bdaddr_type_is_valid(cp->addr.type))
1924 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1925 MGMT_STATUS_INVALID_PARAMS,
1930 if (!test_bit(HCI_UP, &hdev->flags)) {
1931 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1932 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1936 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1937 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1938 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1942 if (cp->addr.type == BDADDR_BREDR)
1943 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1946 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1948 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1949 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1950 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1954 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1960 dc.handle = cpu_to_le16(conn->handle);
1961 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1963 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1965 mgmt_pending_remove(cmd);
1968 hci_dev_unlock(hdev);
1972 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1974 switch (link_type) {
1976 switch (addr_type) {
1977 case ADDR_LE_DEV_PUBLIC:
1978 return BDADDR_LE_PUBLIC;
1981 /* Fallback to LE Random address type */
1982 return BDADDR_LE_RANDOM;
1986 /* Fallback to BR/EDR type */
1987 return BDADDR_BREDR;
1991 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1994 struct mgmt_rp_get_connections *rp;
2004 if (!hdev_is_powered(hdev)) {
2005 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2006 MGMT_STATUS_NOT_POWERED);
2011 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2012 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2016 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2017 rp = kmalloc(rp_len, GFP_KERNEL);
2024 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2025 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2027 bacpy(&rp->addr[i].bdaddr, &c->dst);
2028 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2029 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2034 rp->conn_count = cpu_to_le16(i);
2036 /* Recalculate length in case of filtered SCO connections, etc */
2037 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2039 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2045 hci_dev_unlock(hdev);
2049 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2050 struct mgmt_cp_pin_code_neg_reply *cp)
2052 struct pending_cmd *cmd;
2055 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2060 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2061 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2063 mgmt_pending_remove(cmd);
2068 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2071 struct hci_conn *conn;
2072 struct mgmt_cp_pin_code_reply *cp = data;
2073 struct hci_cp_pin_code_reply reply;
2074 struct pending_cmd *cmd;
2081 if (!hdev_is_powered(hdev)) {
2082 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2083 MGMT_STATUS_NOT_POWERED);
2087 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2089 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2090 MGMT_STATUS_NOT_CONNECTED);
2094 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2095 struct mgmt_cp_pin_code_neg_reply ncp;
2097 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2099 BT_ERR("PIN code is not 16 bytes long");
2101 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2103 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2104 MGMT_STATUS_INVALID_PARAMS);
2109 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2115 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2116 reply.pin_len = cp->pin_len;
2117 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2119 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2121 mgmt_pending_remove(cmd);
2124 hci_dev_unlock(hdev);
2128 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2131 struct mgmt_cp_set_io_capability *cp = data;
2137 hdev->io_capability = cp->io_capability;
2139 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2140 hdev->io_capability);
2142 hci_dev_unlock(hdev);
2144 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2148 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2150 struct hci_dev *hdev = conn->hdev;
2151 struct pending_cmd *cmd;
2153 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2154 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2157 if (cmd->user_data != conn)
2166 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2168 struct mgmt_rp_pair_device rp;
2169 struct hci_conn *conn = cmd->user_data;
2171 bacpy(&rp.addr.bdaddr, &conn->dst);
2172 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2174 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2177 /* So we don't get further callbacks for this connection */
2178 conn->connect_cfm_cb = NULL;
2179 conn->security_cfm_cb = NULL;
2180 conn->disconn_cfm_cb = NULL;
2182 hci_conn_drop(conn);
2184 mgmt_pending_remove(cmd);
2187 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2189 struct pending_cmd *cmd;
2191 BT_DBG("status %u", status);
2193 cmd = find_pairing(conn);
2195 BT_DBG("Unable to find a pending command");
2197 pairing_complete(cmd, mgmt_status(status));
2200 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2202 struct pending_cmd *cmd;
2204 BT_DBG("status %u", status);
2209 cmd = find_pairing(conn);
2211 BT_DBG("Unable to find a pending command");
2213 pairing_complete(cmd, mgmt_status(status));
2216 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2219 struct mgmt_cp_pair_device *cp = data;
2220 struct mgmt_rp_pair_device rp;
2221 struct pending_cmd *cmd;
2222 u8 sec_level, auth_type;
2223 struct hci_conn *conn;
2228 memset(&rp, 0, sizeof(rp));
2229 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2230 rp.addr.type = cp->addr.type;
2232 if (!bdaddr_type_is_valid(cp->addr.type))
2233 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2234 MGMT_STATUS_INVALID_PARAMS,
2239 if (!hdev_is_powered(hdev)) {
2240 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2241 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2245 sec_level = BT_SECURITY_MEDIUM;
2246 if (cp->io_cap == 0x03)
2247 auth_type = HCI_AT_DEDICATED_BONDING;
2249 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2251 if (cp->addr.type == BDADDR_BREDR)
2252 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2253 cp->addr.type, sec_level, auth_type);
2255 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2256 cp->addr.type, sec_level, auth_type);
2261 if (PTR_ERR(conn) == -EBUSY)
2262 status = MGMT_STATUS_BUSY;
2264 status = MGMT_STATUS_CONNECT_FAILED;
2266 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2272 if (conn->connect_cfm_cb) {
2273 hci_conn_drop(conn);
2274 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2275 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2279 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2282 hci_conn_drop(conn);
2286 /* For LE, just connecting isn't a proof that the pairing finished */
2287 if (cp->addr.type == BDADDR_BREDR)
2288 conn->connect_cfm_cb = pairing_complete_cb;
2290 conn->connect_cfm_cb = le_connect_complete_cb;
2292 conn->security_cfm_cb = pairing_complete_cb;
2293 conn->disconn_cfm_cb = pairing_complete_cb;
2294 conn->io_capability = cp->io_cap;
2295 cmd->user_data = conn;
2297 if (conn->state == BT_CONNECTED &&
2298 hci_conn_security(conn, sec_level, auth_type))
2299 pairing_complete(cmd, 0);
2304 hci_dev_unlock(hdev);
2308 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2311 struct mgmt_addr_info *addr = data;
2312 struct pending_cmd *cmd;
2313 struct hci_conn *conn;
2320 if (!hdev_is_powered(hdev)) {
2321 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2322 MGMT_STATUS_NOT_POWERED);
2326 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2328 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2329 MGMT_STATUS_INVALID_PARAMS);
2333 conn = cmd->user_data;
2335 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2336 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2337 MGMT_STATUS_INVALID_PARAMS);
2341 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2343 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2344 addr, sizeof(*addr));
2346 hci_dev_unlock(hdev);
2350 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2351 struct mgmt_addr_info *addr, u16 mgmt_op,
2352 u16 hci_op, __le32 passkey)
2354 struct pending_cmd *cmd;
2355 struct hci_conn *conn;
2360 if (!hdev_is_powered(hdev)) {
2361 err = cmd_complete(sk, hdev->id, mgmt_op,
2362 MGMT_STATUS_NOT_POWERED, addr,
2367 if (addr->type == BDADDR_BREDR)
2368 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2370 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2373 err = cmd_complete(sk, hdev->id, mgmt_op,
2374 MGMT_STATUS_NOT_CONNECTED, addr,
2379 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2380 /* Continue with pairing via SMP */
2381 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2384 err = cmd_complete(sk, hdev->id, mgmt_op,
2385 MGMT_STATUS_SUCCESS, addr,
2388 err = cmd_complete(sk, hdev->id, mgmt_op,
2389 MGMT_STATUS_FAILED, addr,
2395 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2401 /* Continue with pairing via HCI */
2402 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2403 struct hci_cp_user_passkey_reply cp;
2405 bacpy(&cp.bdaddr, &addr->bdaddr);
2406 cp.passkey = passkey;
2407 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2409 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2413 mgmt_pending_remove(cmd);
2416 hci_dev_unlock(hdev);
2420 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2421 void *data, u16 len)
2423 struct mgmt_cp_pin_code_neg_reply *cp = data;
2427 return user_pairing_resp(sk, hdev, &cp->addr,
2428 MGMT_OP_PIN_CODE_NEG_REPLY,
2429 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2432 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2435 struct mgmt_cp_user_confirm_reply *cp = data;
2439 if (len != sizeof(*cp))
2440 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2441 MGMT_STATUS_INVALID_PARAMS);
2443 return user_pairing_resp(sk, hdev, &cp->addr,
2444 MGMT_OP_USER_CONFIRM_REPLY,
2445 HCI_OP_USER_CONFIRM_REPLY, 0);
2448 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2449 void *data, u16 len)
2451 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2455 return user_pairing_resp(sk, hdev, &cp->addr,
2456 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2457 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2460 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2463 struct mgmt_cp_user_passkey_reply *cp = data;
2467 return user_pairing_resp(sk, hdev, &cp->addr,
2468 MGMT_OP_USER_PASSKEY_REPLY,
2469 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2472 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2473 void *data, u16 len)
2475 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2479 return user_pairing_resp(sk, hdev, &cp->addr,
2480 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2481 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2484 static void update_name(struct hci_request *req)
2486 struct hci_dev *hdev = req->hdev;
2487 struct hci_cp_write_local_name cp;
2489 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2491 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2494 static void set_name_complete(struct hci_dev *hdev, u8 status)
2496 struct mgmt_cp_set_local_name *cp;
2497 struct pending_cmd *cmd;
2499 BT_DBG("status 0x%02x", status);
2503 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2510 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2511 mgmt_status(status));
2513 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2516 mgmt_pending_remove(cmd);
2519 hci_dev_unlock(hdev);
2522 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2525 struct mgmt_cp_set_local_name *cp = data;
2526 struct pending_cmd *cmd;
2527 struct hci_request req;
2534 /* If the old values are the same as the new ones just return a
2535 * direct command complete event.
2537 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2538 !memcmp(hdev->short_name, cp->short_name,
2539 sizeof(hdev->short_name))) {
2540 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2545 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2547 if (!hdev_is_powered(hdev)) {
2548 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2550 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2555 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2561 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2567 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2569 hci_req_init(&req, hdev);
2571 if (lmp_bredr_capable(hdev)) {
2576 if (lmp_le_capable(hdev))
2577 hci_update_ad(&req);
2579 err = hci_req_run(&req, set_name_complete);
2581 mgmt_pending_remove(cmd);
2584 hci_dev_unlock(hdev);
2588 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2589 void *data, u16 data_len)
2591 struct pending_cmd *cmd;
2594 BT_DBG("%s", hdev->name);
2598 if (!hdev_is_powered(hdev)) {
2599 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2600 MGMT_STATUS_NOT_POWERED);
2604 if (!lmp_ssp_capable(hdev)) {
2605 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2606 MGMT_STATUS_NOT_SUPPORTED);
2610 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2611 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2616 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2622 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2624 mgmt_pending_remove(cmd);
2627 hci_dev_unlock(hdev);
2631 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2632 void *data, u16 len)
2634 struct mgmt_cp_add_remote_oob_data *cp = data;
2638 BT_DBG("%s ", hdev->name);
2642 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2645 status = MGMT_STATUS_FAILED;
2647 status = MGMT_STATUS_SUCCESS;
2649 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2650 &cp->addr, sizeof(cp->addr));
2652 hci_dev_unlock(hdev);
2656 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2657 void *data, u16 len)
2659 struct mgmt_cp_remove_remote_oob_data *cp = data;
2663 BT_DBG("%s", hdev->name);
2667 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2669 status = MGMT_STATUS_INVALID_PARAMS;
2671 status = MGMT_STATUS_SUCCESS;
2673 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2674 status, &cp->addr, sizeof(cp->addr));
2676 hci_dev_unlock(hdev);
2680 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
2682 struct pending_cmd *cmd;
2686 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2688 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2692 type = hdev->discovery.type;
2694 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2695 &type, sizeof(type));
2696 mgmt_pending_remove(cmd);
2701 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
2703 BT_DBG("status %d", status);
2707 mgmt_start_discovery_failed(hdev, status);
2708 hci_dev_unlock(hdev);
2713 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2714 hci_dev_unlock(hdev);
2716 switch (hdev->discovery.type) {
2717 case DISCOV_TYPE_LE:
2718 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2722 case DISCOV_TYPE_INTERLEAVED:
2723 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2724 DISCOV_INTERLEAVED_TIMEOUT);
2727 case DISCOV_TYPE_BREDR:
2731 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
2735 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2736 void *data, u16 len)
2738 struct mgmt_cp_start_discovery *cp = data;
2739 struct pending_cmd *cmd;
2740 struct hci_cp_le_set_scan_param param_cp;
2741 struct hci_cp_le_set_scan_enable enable_cp;
2742 struct hci_cp_inquiry inq_cp;
2743 struct hci_request req;
2744 /* General inquiry access code (GIAC) */
2745 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2748 BT_DBG("%s", hdev->name);
2752 if (!hdev_is_powered(hdev)) {
2753 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2754 MGMT_STATUS_NOT_POWERED);
2758 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2759 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2764 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2765 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2770 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2776 hdev->discovery.type = cp->type;
2778 hci_req_init(&req, hdev);
2780 switch (hdev->discovery.type) {
2781 case DISCOV_TYPE_BREDR:
2782 if (!lmp_bredr_capable(hdev)) {
2783 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2784 MGMT_STATUS_NOT_SUPPORTED);
2785 mgmt_pending_remove(cmd);
2789 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2790 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2792 mgmt_pending_remove(cmd);
2796 hci_inquiry_cache_flush(hdev);
2798 memset(&inq_cp, 0, sizeof(inq_cp));
2799 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
2800 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
2801 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
2804 case DISCOV_TYPE_LE:
2805 case DISCOV_TYPE_INTERLEAVED:
2806 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2807 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2808 MGMT_STATUS_NOT_SUPPORTED);
2809 mgmt_pending_remove(cmd);
2813 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
2814 !lmp_bredr_capable(hdev)) {
2815 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2816 MGMT_STATUS_NOT_SUPPORTED);
2817 mgmt_pending_remove(cmd);
2821 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
2822 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2823 MGMT_STATUS_REJECTED);
2824 mgmt_pending_remove(cmd);
2828 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
2829 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2831 mgmt_pending_remove(cmd);
2835 memset(¶m_cp, 0, sizeof(param_cp));
2836 param_cp.type = LE_SCAN_ACTIVE;
2837 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
2838 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2839 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2842 memset(&enable_cp, 0, sizeof(enable_cp));
2843 enable_cp.enable = LE_SCAN_ENABLE;
2844 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2845 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2850 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2851 MGMT_STATUS_INVALID_PARAMS);
2852 mgmt_pending_remove(cmd);
2856 err = hci_req_run(&req, start_discovery_complete);
2858 mgmt_pending_remove(cmd);
2860 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2863 hci_dev_unlock(hdev);
2867 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
2869 struct pending_cmd *cmd;
2872 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
2876 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2877 &hdev->discovery.type, sizeof(hdev->discovery.type));
2878 mgmt_pending_remove(cmd);
2883 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
2885 BT_DBG("status %d", status);
2890 mgmt_stop_discovery_failed(hdev, status);
2894 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2897 hci_dev_unlock(hdev);
2900 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2903 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2904 struct pending_cmd *cmd;
2905 struct hci_cp_remote_name_req_cancel cp;
2906 struct inquiry_entry *e;
2907 struct hci_request req;
2908 struct hci_cp_le_set_scan_enable enable_cp;
2911 BT_DBG("%s", hdev->name);
2915 if (!hci_discovery_active(hdev)) {
2916 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2917 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2918 sizeof(mgmt_cp->type));
2922 if (hdev->discovery.type != mgmt_cp->type) {
2923 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2924 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2925 sizeof(mgmt_cp->type));
2929 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2935 hci_req_init(&req, hdev);
2937 switch (hdev->discovery.state) {
2938 case DISCOVERY_FINDING:
2939 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2940 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2942 cancel_delayed_work(&hdev->le_scan_disable);
2944 memset(&enable_cp, 0, sizeof(enable_cp));
2945 enable_cp.enable = LE_SCAN_DISABLE;
2946 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
2947 sizeof(enable_cp), &enable_cp);
2952 case DISCOVERY_RESOLVING:
2953 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2956 mgmt_pending_remove(cmd);
2957 err = cmd_complete(sk, hdev->id,
2958 MGMT_OP_STOP_DISCOVERY, 0,
2960 sizeof(mgmt_cp->type));
2961 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2965 bacpy(&cp.bdaddr, &e->data.bdaddr);
2966 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2972 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2974 mgmt_pending_remove(cmd);
2975 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2976 MGMT_STATUS_FAILED, &mgmt_cp->type,
2977 sizeof(mgmt_cp->type));
2981 err = hci_req_run(&req, stop_discovery_complete);
2983 mgmt_pending_remove(cmd);
2985 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2988 hci_dev_unlock(hdev);
2992 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2995 struct mgmt_cp_confirm_name *cp = data;
2996 struct inquiry_entry *e;
2999 BT_DBG("%s", hdev->name);
3003 if (!hci_discovery_active(hdev)) {
3004 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3005 MGMT_STATUS_FAILED);
3009 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3011 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3012 MGMT_STATUS_INVALID_PARAMS);
3016 if (cp->name_known) {
3017 e->name_state = NAME_KNOWN;
3020 e->name_state = NAME_NEEDED;
3021 hci_inquiry_cache_update_resolve(hdev, e);
3024 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3028 hci_dev_unlock(hdev);
3032 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3035 struct mgmt_cp_block_device *cp = data;
3039 BT_DBG("%s", hdev->name);
3041 if (!bdaddr_type_is_valid(cp->addr.type))
3042 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3043 MGMT_STATUS_INVALID_PARAMS,
3044 &cp->addr, sizeof(cp->addr));
3048 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3050 status = MGMT_STATUS_FAILED;
3052 status = MGMT_STATUS_SUCCESS;
3054 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3055 &cp->addr, sizeof(cp->addr));
3057 hci_dev_unlock(hdev);
3062 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3065 struct mgmt_cp_unblock_device *cp = data;
3069 BT_DBG("%s", hdev->name);
3071 if (!bdaddr_type_is_valid(cp->addr.type))
3072 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3073 MGMT_STATUS_INVALID_PARAMS,
3074 &cp->addr, sizeof(cp->addr));
3078 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3080 status = MGMT_STATUS_INVALID_PARAMS;
3082 status = MGMT_STATUS_SUCCESS;
3084 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3085 &cp->addr, sizeof(cp->addr));
3087 hci_dev_unlock(hdev);
3092 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3095 struct mgmt_cp_set_device_id *cp = data;
3096 struct hci_request req;
3100 BT_DBG("%s", hdev->name);
3102 source = __le16_to_cpu(cp->source);
3104 if (source > 0x0002)
3105 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3106 MGMT_STATUS_INVALID_PARAMS);
3110 hdev->devid_source = source;
3111 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3112 hdev->devid_product = __le16_to_cpu(cp->product);
3113 hdev->devid_version = __le16_to_cpu(cp->version);
3115 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3117 hci_req_init(&req, hdev);
3119 hci_req_run(&req, NULL);
3121 hci_dev_unlock(hdev);
3126 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3128 struct pending_cmd *cmd;
3130 BT_DBG("status 0x%02x", status);
3134 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3139 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3140 mgmt_status(status));
3142 struct mgmt_mode *cp = cmd->param;
3145 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3147 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3149 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3150 new_settings(hdev, cmd->sk);
3153 mgmt_pending_remove(cmd);
3156 hci_dev_unlock(hdev);
3159 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3160 void *data, u16 len)
3162 struct mgmt_mode *cp = data;
3163 struct pending_cmd *cmd;
3164 struct hci_request req;
3167 BT_DBG("%s", hdev->name);
3169 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
3170 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3171 MGMT_STATUS_NOT_SUPPORTED);
3173 if (cp->val != 0x00 && cp->val != 0x01)
3174 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3175 MGMT_STATUS_INVALID_PARAMS);
3177 if (!hdev_is_powered(hdev))
3178 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3179 MGMT_STATUS_NOT_POWERED);
3181 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3182 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3183 MGMT_STATUS_REJECTED);
3187 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3188 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3193 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3194 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3199 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3206 hci_req_init(&req, hdev);
3208 write_fast_connectable(&req, cp->val);
3210 err = hci_req_run(&req, fast_connectable_complete);
3212 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3213 MGMT_STATUS_FAILED);
3214 mgmt_pending_remove(cmd);
3218 hci_dev_unlock(hdev);
3223 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3225 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3227 if (key->master != 0x00 && key->master != 0x01)
3229 if (!bdaddr_type_is_le(key->addr.type))
3234 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3235 void *cp_data, u16 len)
3237 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3238 u16 key_count, expected_len;
3241 key_count = __le16_to_cpu(cp->key_count);
3243 expected_len = sizeof(*cp) + key_count *
3244 sizeof(struct mgmt_ltk_info);
3245 if (expected_len != len) {
3246 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3248 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3249 MGMT_STATUS_INVALID_PARAMS);
3252 BT_DBG("%s key_count %u", hdev->name, key_count);
3254 for (i = 0; i < key_count; i++) {
3255 struct mgmt_ltk_info *key = &cp->keys[i];
3257 if (!ltk_is_valid(key))
3258 return cmd_status(sk, hdev->id,
3259 MGMT_OP_LOAD_LONG_TERM_KEYS,
3260 MGMT_STATUS_INVALID_PARAMS);
3265 hci_smp_ltks_clear(hdev);
3267 for (i = 0; i < key_count; i++) {
3268 struct mgmt_ltk_info *key = &cp->keys[i];
3274 type = HCI_SMP_LTK_SLAVE;
3276 hci_add_ltk(hdev, &key->addr.bdaddr,
3277 bdaddr_to_le(key->addr.type),
3278 type, 0, key->authenticated, key->val,
3279 key->enc_size, key->ediv, key->rand);
3282 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3285 hci_dev_unlock(hdev);
3290 static const struct mgmt_handler {
3291 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3295 } mgmt_handlers[] = {
3296 { NULL }, /* 0x0000 (no command) */
3297 { read_version, false, MGMT_READ_VERSION_SIZE },
3298 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3299 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3300 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3301 { set_powered, false, MGMT_SETTING_SIZE },
3302 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3303 { set_connectable, false, MGMT_SETTING_SIZE },
3304 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3305 { set_pairable, false, MGMT_SETTING_SIZE },
3306 { set_link_security, false, MGMT_SETTING_SIZE },
3307 { set_ssp, false, MGMT_SETTING_SIZE },
3308 { set_hs, false, MGMT_SETTING_SIZE },
3309 { set_le, false, MGMT_SETTING_SIZE },
3310 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3311 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3312 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3313 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3314 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3315 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3316 { disconnect, false, MGMT_DISCONNECT_SIZE },
3317 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3318 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3319 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3320 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3321 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3322 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3323 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3324 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3325 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3326 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3327 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3328 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3329 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3330 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3331 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3332 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3333 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3334 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3335 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3336 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3340 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3344 struct mgmt_hdr *hdr;
3345 u16 opcode, index, len;
3346 struct hci_dev *hdev = NULL;
3347 const struct mgmt_handler *handler;
3350 BT_DBG("got %zu bytes", msglen);
3352 if (msglen < sizeof(*hdr))
3355 buf = kmalloc(msglen, GFP_KERNEL);
3359 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3365 opcode = __le16_to_cpu(hdr->opcode);
3366 index = __le16_to_cpu(hdr->index);
3367 len = __le16_to_cpu(hdr->len);
3369 if (len != msglen - sizeof(*hdr)) {
3374 if (index != MGMT_INDEX_NONE) {
3375 hdev = hci_dev_get(index);
3377 err = cmd_status(sk, index, opcode,
3378 MGMT_STATUS_INVALID_INDEX);
3382 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3383 err = cmd_status(sk, index, opcode,
3384 MGMT_STATUS_INVALID_INDEX);
3389 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3390 mgmt_handlers[opcode].func == NULL) {
3391 BT_DBG("Unknown op %u", opcode);
3392 err = cmd_status(sk, index, opcode,
3393 MGMT_STATUS_UNKNOWN_COMMAND);
3397 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3398 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3399 err = cmd_status(sk, index, opcode,
3400 MGMT_STATUS_INVALID_INDEX);
3404 handler = &mgmt_handlers[opcode];
3406 if ((handler->var_len && len < handler->data_len) ||
3407 (!handler->var_len && len != handler->data_len)) {
3408 err = cmd_status(sk, index, opcode,
3409 MGMT_STATUS_INVALID_PARAMS);
3414 mgmt_init_hdev(sk, hdev);
3416 cp = buf + sizeof(*hdr);
3418 err = handler->func(sk, hdev, cp, len);
3432 int mgmt_index_added(struct hci_dev *hdev)
3434 if (!mgmt_valid_hdev(hdev))
3437 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3440 int mgmt_index_removed(struct hci_dev *hdev)
3442 u8 status = MGMT_STATUS_INVALID_INDEX;
3444 if (!mgmt_valid_hdev(hdev))
3447 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3449 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3452 static void set_bredr_scan(struct hci_request *req)
3454 struct hci_dev *hdev = req->hdev;
3457 /* Ensure that fast connectable is disabled. This function will
3458 * not do anything if the page scan parameters are already what
3461 write_fast_connectable(req, false);
3463 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3465 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3466 scan |= SCAN_INQUIRY;
3469 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3472 static void powered_complete(struct hci_dev *hdev, u8 status)
3474 struct cmd_lookup match = { NULL, hdev };
3476 BT_DBG("status 0x%02x", status);
3480 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3482 new_settings(hdev, match.sk);
3484 hci_dev_unlock(hdev);
3490 static int powered_update_hci(struct hci_dev *hdev)
3492 struct hci_request req;
3495 hci_req_init(&req, hdev);
3497 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3498 !lmp_host_ssp_capable(hdev)) {
3501 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3504 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
3505 lmp_bredr_capable(hdev)) {
3506 struct hci_cp_write_le_host_supported cp;
3509 cp.simul = lmp_le_br_capable(hdev);
3511 /* Check first if we already have the right
3512 * host state (host features set)
3514 if (cp.le != lmp_host_le_capable(hdev) ||
3515 cp.simul != lmp_host_le_br_capable(hdev))
3516 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3520 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3521 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3522 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3523 sizeof(link_sec), &link_sec);
3525 if (lmp_bredr_capable(hdev)) {
3526 set_bredr_scan(&req);
3532 return hci_req_run(&req, powered_complete);
3535 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3537 struct cmd_lookup match = { NULL, hdev };
3538 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3539 u8 zero_cod[] = { 0, 0, 0 };
3542 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3546 if (powered_update_hci(hdev) == 0)
3549 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3554 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3555 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3557 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3558 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3559 zero_cod, sizeof(zero_cod), NULL);
3562 err = new_settings(hdev, match.sk);
3570 int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
3572 struct pending_cmd *cmd;
3575 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
3579 if (err == -ERFKILL)
3580 status = MGMT_STATUS_RFKILLED;
3582 status = MGMT_STATUS_FAILED;
3584 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
3586 mgmt_pending_remove(cmd);
3591 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3593 struct cmd_lookup match = { NULL, hdev };
3594 bool changed = false;
3598 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3601 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3605 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3609 err = new_settings(hdev, match.sk);
3617 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3619 struct pending_cmd *cmd;
3620 bool changed = false;
3624 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3627 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3631 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3634 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3639 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3641 u8 mgmt_err = mgmt_status(status);
3643 if (scan & SCAN_PAGE)
3644 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3645 cmd_status_rsp, &mgmt_err);
3647 if (scan & SCAN_INQUIRY)
3648 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3649 cmd_status_rsp, &mgmt_err);
3654 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3657 struct mgmt_ev_new_link_key ev;
3659 memset(&ev, 0, sizeof(ev));
3661 ev.store_hint = persistent;
3662 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3663 ev.key.addr.type = BDADDR_BREDR;
3664 ev.key.type = key->type;
3665 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3666 ev.key.pin_len = key->pin_len;
3668 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3671 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3673 struct mgmt_ev_new_long_term_key ev;
3675 memset(&ev, 0, sizeof(ev));
3677 ev.store_hint = persistent;
3678 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3679 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3680 ev.key.authenticated = key->authenticated;
3681 ev.key.enc_size = key->enc_size;
3682 ev.key.ediv = key->ediv;
3684 if (key->type == HCI_SMP_LTK)
3687 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3688 memcpy(ev.key.val, key->val, sizeof(key->val));
3690 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3694 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3695 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3699 struct mgmt_ev_device_connected *ev = (void *) buf;
3702 bacpy(&ev->addr.bdaddr, bdaddr);
3703 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3705 ev->flags = __cpu_to_le32(flags);
3708 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3711 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3712 eir_len = eir_append_data(ev->eir, eir_len,
3713 EIR_CLASS_OF_DEV, dev_class, 3);
3715 ev->eir_len = cpu_to_le16(eir_len);
3717 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3718 sizeof(*ev) + eir_len, NULL);
3721 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3723 struct mgmt_cp_disconnect *cp = cmd->param;
3724 struct sock **sk = data;
3725 struct mgmt_rp_disconnect rp;
3727 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3728 rp.addr.type = cp->addr.type;
3730 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3736 mgmt_pending_remove(cmd);
3739 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3741 struct hci_dev *hdev = data;
3742 struct mgmt_cp_unpair_device *cp = cmd->param;
3743 struct mgmt_rp_unpair_device rp;
3745 memset(&rp, 0, sizeof(rp));
3746 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3747 rp.addr.type = cp->addr.type;
3749 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3751 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3753 mgmt_pending_remove(cmd);
3756 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3757 u8 link_type, u8 addr_type, u8 reason)
3759 struct mgmt_ev_device_disconnected ev;
3760 struct sock *sk = NULL;
3763 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3765 bacpy(&ev.addr.bdaddr, bdaddr);
3766 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3769 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3775 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3781 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3782 u8 link_type, u8 addr_type, u8 status)
3784 struct mgmt_rp_disconnect rp;
3785 struct pending_cmd *cmd;
3788 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3791 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3795 bacpy(&rp.addr.bdaddr, bdaddr);
3796 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3798 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3799 mgmt_status(status), &rp, sizeof(rp));
3801 mgmt_pending_remove(cmd);
3806 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3807 u8 addr_type, u8 status)
3809 struct mgmt_ev_connect_failed ev;
3811 bacpy(&ev.addr.bdaddr, bdaddr);
3812 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3813 ev.status = mgmt_status(status);
3815 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3818 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3820 struct mgmt_ev_pin_code_request ev;
3822 bacpy(&ev.addr.bdaddr, bdaddr);
3823 ev.addr.type = BDADDR_BREDR;
3826 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3830 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3833 struct pending_cmd *cmd;
3834 struct mgmt_rp_pin_code_reply rp;
3837 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3841 bacpy(&rp.addr.bdaddr, bdaddr);
3842 rp.addr.type = BDADDR_BREDR;
3844 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3845 mgmt_status(status), &rp, sizeof(rp));
3847 mgmt_pending_remove(cmd);
3852 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3855 struct pending_cmd *cmd;
3856 struct mgmt_rp_pin_code_reply rp;
3859 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3863 bacpy(&rp.addr.bdaddr, bdaddr);
3864 rp.addr.type = BDADDR_BREDR;
3866 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3867 mgmt_status(status), &rp, sizeof(rp));
3869 mgmt_pending_remove(cmd);
3874 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3875 u8 link_type, u8 addr_type, __le32 value,
3878 struct mgmt_ev_user_confirm_request ev;
3880 BT_DBG("%s", hdev->name);
3882 bacpy(&ev.addr.bdaddr, bdaddr);
3883 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3884 ev.confirm_hint = confirm_hint;
3887 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3891 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3892 u8 link_type, u8 addr_type)
3894 struct mgmt_ev_user_passkey_request ev;
3896 BT_DBG("%s", hdev->name);
3898 bacpy(&ev.addr.bdaddr, bdaddr);
3899 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3901 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3905 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3906 u8 link_type, u8 addr_type, u8 status,
3909 struct pending_cmd *cmd;
3910 struct mgmt_rp_user_confirm_reply rp;
3913 cmd = mgmt_pending_find(opcode, hdev);
3917 bacpy(&rp.addr.bdaddr, bdaddr);
3918 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3919 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3922 mgmt_pending_remove(cmd);
3927 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3928 u8 link_type, u8 addr_type, u8 status)
3930 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3931 status, MGMT_OP_USER_CONFIRM_REPLY);
3934 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3935 u8 link_type, u8 addr_type, u8 status)
3937 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3939 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3942 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3943 u8 link_type, u8 addr_type, u8 status)
3945 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3946 status, MGMT_OP_USER_PASSKEY_REPLY);
3949 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3950 u8 link_type, u8 addr_type, u8 status)
3952 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3954 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3957 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3958 u8 link_type, u8 addr_type, u32 passkey,
3961 struct mgmt_ev_passkey_notify ev;
3963 BT_DBG("%s", hdev->name);
3965 bacpy(&ev.addr.bdaddr, bdaddr);
3966 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3967 ev.passkey = __cpu_to_le32(passkey);
3968 ev.entered = entered;
3970 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3973 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3974 u8 addr_type, u8 status)
3976 struct mgmt_ev_auth_failed ev;
3978 bacpy(&ev.addr.bdaddr, bdaddr);
3979 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3980 ev.status = mgmt_status(status);
3982 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3985 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3987 struct cmd_lookup match = { NULL, hdev };
3988 bool changed = false;
3992 u8 mgmt_err = mgmt_status(status);
3993 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3994 cmd_status_rsp, &mgmt_err);
3998 if (test_bit(HCI_AUTH, &hdev->flags)) {
3999 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4002 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4006 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4010 err = new_settings(hdev, match.sk);
4018 static void clear_eir(struct hci_request *req)
4020 struct hci_dev *hdev = req->hdev;
4021 struct hci_cp_write_eir cp;
4023 if (!lmp_ext_inq_capable(hdev))
4026 memset(hdev->eir, 0, sizeof(hdev->eir));
4028 memset(&cp, 0, sizeof(cp));
4030 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4033 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4035 struct cmd_lookup match = { NULL, hdev };
4036 struct hci_request req;
4037 bool changed = false;
4041 u8 mgmt_err = mgmt_status(status);
4043 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4045 err = new_settings(hdev, NULL);
4047 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4054 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4057 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4061 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4064 err = new_settings(hdev, match.sk);
4069 hci_req_init(&req, hdev);
4071 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4076 hci_req_run(&req, NULL);
4081 static void sk_lookup(struct pending_cmd *cmd, void *data)
4083 struct cmd_lookup *match = data;
4085 if (match->sk == NULL) {
4086 match->sk = cmd->sk;
4087 sock_hold(match->sk);
4091 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4094 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4097 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4098 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4099 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4102 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
4111 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4113 struct mgmt_cp_set_local_name ev;
4114 struct pending_cmd *cmd;
4119 memset(&ev, 0, sizeof(ev));
4120 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4121 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4123 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4125 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4127 /* If this is a HCI command related to powering on the
4128 * HCI dev don't send any mgmt signals.
4130 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4134 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4135 cmd ? cmd->sk : NULL);
4138 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4139 u8 *randomizer, u8 status)
4141 struct pending_cmd *cmd;
4144 BT_DBG("%s status %u", hdev->name, status);
4146 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4151 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4152 mgmt_status(status));
4154 struct mgmt_rp_read_local_oob_data rp;
4156 memcpy(rp.hash, hash, sizeof(rp.hash));
4157 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4159 err = cmd_complete(cmd->sk, hdev->id,
4160 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
4164 mgmt_pending_remove(cmd);
4169 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4170 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4171 ssp, u8 *eir, u16 eir_len)
4174 struct mgmt_ev_device_found *ev = (void *) buf;
4177 if (!hci_discovery_active(hdev))
4180 /* Leave 5 bytes for a potential CoD field */
4181 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4184 memset(buf, 0, sizeof(buf));
4186 bacpy(&ev->addr.bdaddr, bdaddr);
4187 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4190 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4192 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4195 memcpy(ev->eir, eir, eir_len);
4197 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4198 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4201 ev->eir_len = cpu_to_le16(eir_len);
4202 ev_size = sizeof(*ev) + eir_len;
4204 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4207 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4208 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4210 struct mgmt_ev_device_found *ev;
4211 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4214 ev = (struct mgmt_ev_device_found *) buf;
4216 memset(buf, 0, sizeof(buf));
4218 bacpy(&ev->addr.bdaddr, bdaddr);
4219 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4222 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4225 ev->eir_len = cpu_to_le16(eir_len);
4227 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4228 sizeof(*ev) + eir_len, NULL);
4231 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4233 struct mgmt_ev_discovering ev;
4234 struct pending_cmd *cmd;
4236 BT_DBG("%s discovering %u", hdev->name, discovering);
4239 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4241 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4244 u8 type = hdev->discovery.type;
4246 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4248 mgmt_pending_remove(cmd);
4251 memset(&ev, 0, sizeof(ev));
4252 ev.type = hdev->discovery.type;
4253 ev.discovering = discovering;
4255 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4258 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4260 struct pending_cmd *cmd;
4261 struct mgmt_ev_device_blocked ev;
4263 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4265 bacpy(&ev.addr.bdaddr, bdaddr);
4266 ev.addr.type = type;
4268 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4269 cmd ? cmd->sk : NULL);
4272 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4274 struct pending_cmd *cmd;
4275 struct mgmt_ev_device_unblocked ev;
4277 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4279 bacpy(&ev.addr.bdaddr, bdaddr);
4280 ev.addr.type = type;
4282 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4283 cmd ? cmd->sk : NULL);
4286 module_param(enable_hs, bool, 0644);
4287 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");