2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 skb_queue_head_init(&req->cmd_q);
44 void hci_req_purge(struct hci_request *req)
46 skb_queue_purge(&req->cmd_q);
49 bool hci_req_status_pend(struct hci_dev *hdev)
51 return hdev->req_status == HCI_REQ_PEND;
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
57 struct hci_dev *hdev = req->hdev;
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
67 skb_queue_purge(&req->cmd_q);
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
75 skb = skb_peek_tail(&req->cmd_q);
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 return req_run(req, complete, NULL);
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 return req_run(req, NULL, complete);
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
130 struct hci_request req;
134 BT_DBG("%s", hdev->name);
136 hci_req_init(&req, hdev);
138 hci_req_add_ev(&req, opcode, plen, param, event);
140 hdev->req_status = HCI_REQ_PEND;
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
152 switch (hdev->req_status) {
154 err = -bt_to_errno(hdev->req_result);
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
166 hdev->req_status = hdev->req_result = 0;
168 hdev->req_skb = NULL;
170 BT_DBG("%s end: err %d", hdev->name, err);
178 return ERR_PTR(-ENODATA);
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189 EXPORT_SYMBOL(__hci_cmd_sync);
191 /* Execute request and wait for completion. */
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194 unsigned long opt, u32 timeout, u8 *hci_status)
196 struct hci_request req;
199 BT_DBG("%s start", hdev->name);
201 hci_req_init(&req, hdev);
203 hdev->req_status = HCI_REQ_PEND;
205 err = func(&req, opt);
208 *hci_status = HCI_ERROR_UNSPECIFIED;
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
214 hdev->req_status = 0;
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
221 if (err == -ENODATA) {
228 *hci_status = HCI_ERROR_UNSPECIFIED;
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
236 if (err == -ERESTARTSYS)
239 switch (hdev->req_status) {
241 err = -bt_to_errno(hdev->req_result);
243 *hci_status = hdev->req_result;
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
249 *hci_status = HCI_ERROR_UNSPECIFIED;
255 *hci_status = HCI_ERROR_UNSPECIFIED;
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
263 BT_DBG("%s end: err %d", hdev->name, err);
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270 unsigned long opt, u32 timeout, u8 *hci_status)
274 if (!test_bit(HCI_UP, &hdev->flags))
277 /* Serialize all requests */
278 hci_req_sync_lock(hdev);
279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280 hci_req_sync_unlock(hdev);
285 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297 hdr->opcode = cpu_to_le16(opcode);
301 skb_put_data(skb, param, plen);
303 BT_DBG("skb len %d", skb->len);
305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
311 /* Queue a command to an asynchronous HCI request */
312 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
315 struct hci_dev *hdev = req->hdev;
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
334 if (skb_queue_empty(&req->cmd_q))
335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
337 bt_cb(skb)->hci.req_event = event;
339 skb_queue_tail(&req->cmd_q, skb);
342 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
345 hci_req_add_ev(req, opcode, plen, param, 0);
348 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
361 type = PAGE_SCAN_TYPE_INTERLACED;
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
366 type = PAGE_SCAN_TYPE_STANDARD; /* default */
368 /* default 1.28 sec page scan */
369 acp.interval = cpu_to_le16(0x0800);
372 acp.window = cpu_to_le16(0x0012);
374 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
375 __cpu_to_le16(hdev->page_scan_window) != acp.window)
376 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
379 if (hdev->page_scan_type != type)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
383 /* This function controls the background scanning based on hdev->pend_le_conns
384 * list. If there are pending LE connection we start the background scanning,
385 * otherwise we stop it.
387 * This function requires the caller holds hdev->lock.
389 static void __hci_update_background_scan(struct hci_request *req)
391 struct hci_dev *hdev = req->hdev;
393 if (!test_bit(HCI_UP, &hdev->flags) ||
394 test_bit(HCI_INIT, &hdev->flags) ||
395 hci_dev_test_flag(hdev, HCI_SETUP) ||
396 hci_dev_test_flag(hdev, HCI_CONFIG) ||
397 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
398 hci_dev_test_flag(hdev, HCI_UNREGISTER))
401 /* No point in doing scanning if LE support hasn't been enabled */
402 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
405 /* If discovery is active don't interfere with it */
406 if (hdev->discovery.state != DISCOVERY_STOPPED)
409 /* Reset RSSI and UUID filters when starting background scanning
410 * since these filters are meant for service discovery only.
412 * The Start Discovery and Start Service Discovery operations
413 * ensure to set proper values for RSSI threshold and UUID
414 * filter list. So it is safe to just reset them here.
416 hci_discovery_filter_clear(hdev);
418 if (list_empty(&hdev->pend_le_conns) &&
419 list_empty(&hdev->pend_le_reports)) {
420 /* If there is no pending LE connections or devices
421 * to be scanned for, we should stop the background
425 /* If controller is not scanning we are done. */
426 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
429 hci_req_add_le_scan_disable(req);
431 BT_DBG("%s stopping background scanning", hdev->name);
433 /* If there is at least one pending LE connection, we should
434 * keep the background scan running.
437 /* If controller is connecting, we should not start scanning
438 * since some controllers are not able to scan and connect at
441 if (hci_lookup_le_connect(hdev))
444 /* If controller is currently scanning, we stop it to ensure we
445 * don't miss any advertising (due to duplicates filter).
447 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
448 hci_req_add_le_scan_disable(req);
450 hci_req_add_le_passive_scan(req);
452 BT_DBG("%s starting background scanning", hdev->name);
456 void __hci_req_update_name(struct hci_request *req)
458 struct hci_dev *hdev = req->hdev;
459 struct hci_cp_write_local_name cp;
461 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
463 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
466 #define PNP_INFO_SVCLASS_ID 0x1200
468 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
470 u8 *ptr = data, *uuids_start = NULL;
471 struct bt_uuid *uuid;
476 list_for_each_entry(uuid, &hdev->uuids, list) {
479 if (uuid->size != 16)
482 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
486 if (uuid16 == PNP_INFO_SVCLASS_ID)
492 uuids_start[1] = EIR_UUID16_ALL;
496 /* Stop if not enough space to put next UUID */
497 if ((ptr - data) + sizeof(u16) > len) {
498 uuids_start[1] = EIR_UUID16_SOME;
502 *ptr++ = (uuid16 & 0x00ff);
503 *ptr++ = (uuid16 & 0xff00) >> 8;
504 uuids_start[0] += sizeof(uuid16);
510 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
512 u8 *ptr = data, *uuids_start = NULL;
513 struct bt_uuid *uuid;
518 list_for_each_entry(uuid, &hdev->uuids, list) {
519 if (uuid->size != 32)
525 uuids_start[1] = EIR_UUID32_ALL;
529 /* Stop if not enough space to put next UUID */
530 if ((ptr - data) + sizeof(u32) > len) {
531 uuids_start[1] = EIR_UUID32_SOME;
535 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
537 uuids_start[0] += sizeof(u32);
543 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
545 u8 *ptr = data, *uuids_start = NULL;
546 struct bt_uuid *uuid;
551 list_for_each_entry(uuid, &hdev->uuids, list) {
552 if (uuid->size != 128)
558 uuids_start[1] = EIR_UUID128_ALL;
562 /* Stop if not enough space to put next UUID */
563 if ((ptr - data) + 16 > len) {
564 uuids_start[1] = EIR_UUID128_SOME;
568 memcpy(ptr, uuid->uuid, 16);
570 uuids_start[0] += 16;
576 static void create_eir(struct hci_dev *hdev, u8 *data)
581 name_len = strlen(hdev->dev_name);
587 ptr[1] = EIR_NAME_SHORT;
589 ptr[1] = EIR_NAME_COMPLETE;
591 /* EIR Data length */
592 ptr[0] = name_len + 1;
594 memcpy(ptr + 2, hdev->dev_name, name_len);
596 ptr += (name_len + 2);
599 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
601 ptr[1] = EIR_TX_POWER;
602 ptr[2] = (u8) hdev->inq_tx_power;
607 if (hdev->devid_source > 0) {
609 ptr[1] = EIR_DEVICE_ID;
611 put_unaligned_le16(hdev->devid_source, ptr + 2);
612 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
613 put_unaligned_le16(hdev->devid_product, ptr + 6);
614 put_unaligned_le16(hdev->devid_version, ptr + 8);
619 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
620 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
621 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 void __hci_req_update_eir(struct hci_request *req)
626 struct hci_dev *hdev = req->hdev;
627 struct hci_cp_write_eir cp;
629 if (!hdev_is_powered(hdev))
632 if (!lmp_ext_inq_capable(hdev))
635 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
638 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
641 memset(&cp, 0, sizeof(cp));
643 create_eir(hdev, cp.data);
645 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
648 memcpy(hdev->eir, cp.data, sizeof(cp.data));
650 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
653 void hci_req_add_le_scan_disable(struct hci_request *req)
655 struct hci_dev *hdev = req->hdev;
657 if (use_ext_scan(hdev)) {
658 struct hci_cp_le_set_ext_scan_enable cp;
660 memset(&cp, 0, sizeof(cp));
661 cp.enable = LE_SCAN_DISABLE;
662 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
665 struct hci_cp_le_set_scan_enable cp;
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
673 static void add_to_white_list(struct hci_request *req,
674 struct hci_conn_params *params)
676 struct hci_cp_le_add_to_white_list cp;
678 cp.bdaddr_type = params->addr_type;
679 bacpy(&cp.bdaddr, ¶ms->addr);
681 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
684 static u8 update_white_list(struct hci_request *req)
686 struct hci_dev *hdev = req->hdev;
687 struct hci_conn_params *params;
688 struct bdaddr_list *b;
689 uint8_t white_list_entries = 0;
691 /* Go through the current white list programmed into the
692 * controller one by one and check if that address is still
693 * in the list of pending connections or list of devices to
694 * report. If not present in either list, then queue the
695 * command to remove it from the controller.
697 list_for_each_entry(b, &hdev->le_white_list, list) {
698 /* If the device is neither in pend_le_conns nor
699 * pend_le_reports then remove it from the whitelist.
701 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
702 &b->bdaddr, b->bdaddr_type) &&
703 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
704 &b->bdaddr, b->bdaddr_type)) {
705 struct hci_cp_le_del_from_white_list cp;
707 cp.bdaddr_type = b->bdaddr_type;
708 bacpy(&cp.bdaddr, &b->bdaddr);
710 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
715 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
716 /* White list can not be used with RPAs */
720 white_list_entries++;
723 /* Since all no longer valid white list entries have been
724 * removed, walk through the list of pending connections
725 * and ensure that any new device gets programmed into
728 * If the list of the devices is larger than the list of
729 * available white list entries in the controller, then
730 * just abort and return filer policy value to not use the
733 list_for_each_entry(params, &hdev->pend_le_conns, action) {
734 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
735 ¶ms->addr, params->addr_type))
738 if (white_list_entries >= hdev->le_white_list_size) {
739 /* Select filter policy to accept all advertising */
743 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
744 params->addr_type)) {
745 /* White list can not be used with RPAs */
749 white_list_entries++;
750 add_to_white_list(req, params);
753 /* After adding all new pending connections, walk through
754 * the list of pending reports and also add these to the
755 * white list if there is still space.
757 list_for_each_entry(params, &hdev->pend_le_reports, action) {
758 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
759 ¶ms->addr, params->addr_type))
762 if (white_list_entries >= hdev->le_white_list_size) {
763 /* Select filter policy to accept all advertising */
767 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
768 params->addr_type)) {
769 /* White list can not be used with RPAs */
773 white_list_entries++;
774 add_to_white_list(req, params);
777 /* Select filter policy to use white list */
781 static bool scan_use_rpa(struct hci_dev *hdev)
783 return hci_dev_test_flag(hdev, HCI_PRIVACY);
786 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
787 u16 window, u8 own_addr_type, u8 filter_policy)
789 struct hci_dev *hdev = req->hdev;
791 /* Use ext scanning if set ext scan param and ext scan enable is
794 if (use_ext_scan(hdev)) {
795 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
796 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
797 struct hci_cp_le_scan_phy_params *phy_params;
798 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
801 ext_param_cp = (void *)data;
802 phy_params = (void *)ext_param_cp->data;
804 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
805 ext_param_cp->own_addr_type = own_addr_type;
806 ext_param_cp->filter_policy = filter_policy;
808 plen = sizeof(*ext_param_cp);
810 if (scan_1m(hdev) || scan_2m(hdev)) {
811 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
813 memset(phy_params, 0, sizeof(*phy_params));
814 phy_params->type = type;
815 phy_params->interval = cpu_to_le16(interval);
816 phy_params->window = cpu_to_le16(window);
818 plen += sizeof(*phy_params);
822 if (scan_coded(hdev)) {
823 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
825 memset(phy_params, 0, sizeof(*phy_params));
826 phy_params->type = type;
827 phy_params->interval = cpu_to_le16(interval);
828 phy_params->window = cpu_to_le16(window);
830 plen += sizeof(*phy_params);
834 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
837 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
838 ext_enable_cp.enable = LE_SCAN_ENABLE;
839 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
841 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
842 sizeof(ext_enable_cp), &ext_enable_cp);
844 struct hci_cp_le_set_scan_param param_cp;
845 struct hci_cp_le_set_scan_enable enable_cp;
847 memset(¶m_cp, 0, sizeof(param_cp));
848 param_cp.type = type;
849 param_cp.interval = cpu_to_le16(interval);
850 param_cp.window = cpu_to_le16(window);
851 param_cp.own_address_type = own_addr_type;
852 param_cp.filter_policy = filter_policy;
853 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
856 memset(&enable_cp, 0, sizeof(enable_cp));
857 enable_cp.enable = LE_SCAN_ENABLE;
858 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
859 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
864 void hci_req_add_le_passive_scan(struct hci_request *req)
866 struct hci_dev *hdev = req->hdev;
870 /* Set require_privacy to false since no SCAN_REQ are send
871 * during passive scanning. Not using an non-resolvable address
872 * here is important so that peer devices using direct
873 * advertising with our address will be correctly reported
876 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
880 /* Adding or removing entries from the white list must
881 * happen before enabling scanning. The controller does
882 * not allow white list modification while scanning.
884 filter_policy = update_white_list(req);
886 /* When the controller is using random resolvable addresses and
887 * with that having LE privacy enabled, then controllers with
888 * Extended Scanner Filter Policies support can now enable support
889 * for handling directed advertising.
891 * So instead of using filter polices 0x00 (no whitelist)
892 * and 0x01 (whitelist enabled) use the new filter policies
893 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
895 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
896 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
897 filter_policy |= 0x02;
899 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
900 hdev->le_scan_window, own_addr_type, filter_policy);
903 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
905 struct adv_info *adv_instance;
907 /* Ignore instance 0 */
908 if (instance == 0x00)
911 adv_instance = hci_find_adv_instance(hdev, instance);
915 /* TODO: Take into account the "appearance" and "local-name" flags here.
916 * These are currently being ignored as they are not supported.
918 return adv_instance->scan_rsp_len;
921 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
923 u8 instance = hdev->cur_adv_instance;
924 struct adv_info *adv_instance;
926 /* Ignore instance 0 */
927 if (instance == 0x00)
930 adv_instance = hci_find_adv_instance(hdev, instance);
934 /* TODO: Take into account the "appearance" and "local-name" flags here.
935 * These are currently being ignored as they are not supported.
937 return adv_instance->scan_rsp_len;
940 void __hci_req_disable_advertising(struct hci_request *req)
942 if (ext_adv_capable(req->hdev)) {
943 struct hci_cp_le_set_ext_adv_enable cp;
946 /* Disable all sets since we only support one set at the moment */
947 cp.num_of_sets = 0x00;
949 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
953 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
957 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
960 struct adv_info *adv_instance;
962 if (instance == 0x00) {
963 /* Instance 0 always manages the "Tx Power" and "Flags"
966 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
968 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
969 * corresponds to the "connectable" instance flag.
971 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
972 flags |= MGMT_ADV_FLAG_CONNECTABLE;
974 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
975 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
976 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
977 flags |= MGMT_ADV_FLAG_DISCOV;
982 adv_instance = hci_find_adv_instance(hdev, instance);
984 /* Return 0 when we got an invalid instance identifier. */
988 return adv_instance->flags;
991 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
993 /* If privacy is not enabled don't use RPA */
994 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
997 /* If basic privacy mode is enabled use RPA */
998 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1001 /* If limited privacy mode is enabled don't use RPA if we're
1002 * both discoverable and bondable.
1004 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1005 hci_dev_test_flag(hdev, HCI_BONDABLE))
1008 /* We're neither bondable nor discoverable in the limited
1009 * privacy mode, therefore use RPA.
1014 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1016 /* If there is no connection we are OK to advertise. */
1017 if (hci_conn_num(hdev, LE_LINK) == 0)
1020 /* Check le_states if there is any connection in slave role. */
1021 if (hdev->conn_hash.le_num_slave > 0) {
1022 /* Slave connection state and non connectable mode bit 20. */
1023 if (!connectable && !(hdev->le_states[2] & 0x10))
1026 /* Slave connection state and connectable mode bit 38
1027 * and scannable bit 21.
1029 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1030 !(hdev->le_states[2] & 0x20)))
1034 /* Check le_states if there is any connection in master role. */
1035 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1036 /* Master connection state and non connectable mode bit 18. */
1037 if (!connectable && !(hdev->le_states[2] & 0x02))
1040 /* Master connection state and connectable mode bit 35 and
1043 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1044 !(hdev->le_states[2] & 0x08)))
1051 void __hci_req_enable_advertising(struct hci_request *req)
1053 struct hci_dev *hdev = req->hdev;
1054 struct hci_cp_le_set_adv_param cp;
1055 u8 own_addr_type, enable = 0x01;
1059 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1061 /* If the "connectable" instance flag was not set, then choose between
1062 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1064 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1065 mgmt_get_connectable(hdev);
1067 if (!is_advertising_allowed(hdev, connectable))
1070 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1071 __hci_req_disable_advertising(req);
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1078 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1080 /* Set require_privacy to true only when non-connectable
1081 * advertising is used. In that case it is fine to use a
1082 * non-resolvable private address.
1084 if (hci_update_random_address(req, !connectable,
1085 adv_use_rpa(hdev, flags),
1086 &own_addr_type) < 0)
1089 memset(&cp, 0, sizeof(cp));
1090 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1094 cp.type = LE_ADV_IND;
1095 else if (get_cur_adv_instance_scan_rsp_len(hdev))
1096 cp.type = LE_ADV_SCAN_IND;
1098 cp.type = LE_ADV_NONCONN_IND;
1100 cp.own_address_type = own_addr_type;
1101 cp.channel_map = hdev->le_adv_channel_map;
1103 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1105 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1108 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1111 size_t complete_len;
1113 /* no space left for name (+ NULL + type + len) */
1114 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1117 /* use complete name if present and fits */
1118 complete_len = strlen(hdev->dev_name);
1119 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1120 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1121 hdev->dev_name, complete_len + 1);
1123 /* use short name if present */
1124 short_len = strlen(hdev->short_name);
1126 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1127 hdev->short_name, short_len + 1);
1129 /* use shortened full name if present, we already know that name
1130 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1133 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1135 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1136 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1138 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1145 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1147 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1150 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1152 u8 scan_rsp_len = 0;
1154 if (hdev->appearance) {
1155 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1158 return append_local_name(hdev, ptr, scan_rsp_len);
1161 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1164 struct adv_info *adv_instance;
1166 u8 scan_rsp_len = 0;
1168 adv_instance = hci_find_adv_instance(hdev, instance);
1172 instance_flags = adv_instance->flags;
1174 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1175 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1178 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1179 adv_instance->scan_rsp_len);
1181 scan_rsp_len += adv_instance->scan_rsp_len;
1183 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1184 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1186 return scan_rsp_len;
1189 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1191 struct hci_dev *hdev = req->hdev;
1194 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1197 if (ext_adv_capable(hdev)) {
1198 struct hci_cp_le_set_ext_scan_rsp_data cp;
1200 memset(&cp, 0, sizeof(cp));
1203 len = create_instance_scan_rsp_data(hdev, instance,
1206 len = create_default_scan_rsp_data(hdev, cp.data);
1208 if (hdev->scan_rsp_data_len == len &&
1209 !memcmp(cp.data, hdev->scan_rsp_data, len))
1212 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1213 hdev->scan_rsp_data_len = len;
1217 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1218 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1220 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1223 struct hci_cp_le_set_scan_rsp_data cp;
1225 memset(&cp, 0, sizeof(cp));
1228 len = create_instance_scan_rsp_data(hdev, instance,
1231 len = create_default_scan_rsp_data(hdev, cp.data);
1233 if (hdev->scan_rsp_data_len == len &&
1234 !memcmp(cp.data, hdev->scan_rsp_data, len))
1237 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1238 hdev->scan_rsp_data_len = len;
1242 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1246 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1248 struct adv_info *adv_instance = NULL;
1249 u8 ad_len = 0, flags = 0;
1252 /* Return 0 when the current instance identifier is invalid. */
1254 adv_instance = hci_find_adv_instance(hdev, instance);
1259 instance_flags = get_adv_instance_flags(hdev, instance);
1261 /* The Add Advertising command allows userspace to set both the general
1262 * and limited discoverable flags.
1264 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1265 flags |= LE_AD_GENERAL;
1267 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1268 flags |= LE_AD_LIMITED;
1270 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1271 flags |= LE_AD_NO_BREDR;
1273 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1274 /* If a discovery flag wasn't provided, simply use the global
1278 flags |= mgmt_get_adv_discov_flags(hdev);
1280 /* If flags would still be empty, then there is no need to
1281 * include the "Flags" AD field".
1294 memcpy(ptr, adv_instance->adv_data,
1295 adv_instance->adv_data_len);
1296 ad_len += adv_instance->adv_data_len;
1297 ptr += adv_instance->adv_data_len;
1300 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1303 if (ext_adv_capable(hdev)) {
1305 adv_tx_power = adv_instance->tx_power;
1307 adv_tx_power = hdev->adv_tx_power;
1309 adv_tx_power = hdev->adv_tx_power;
1312 /* Provide Tx Power only if we can provide a valid value for it */
1313 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1315 ptr[1] = EIR_TX_POWER;
1316 ptr[2] = (u8)adv_tx_power;
1326 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1328 struct hci_dev *hdev = req->hdev;
1331 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1334 if (ext_adv_capable(hdev)) {
1335 struct hci_cp_le_set_ext_adv_data cp;
1337 memset(&cp, 0, sizeof(cp));
1339 len = create_instance_adv_data(hdev, instance, cp.data);
1341 /* There's nothing to do if the data hasn't changed */
1342 if (hdev->adv_data_len == len &&
1343 memcmp(cp.data, hdev->adv_data, len) == 0)
1346 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1347 hdev->adv_data_len = len;
1351 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1352 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1354 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1356 struct hci_cp_le_set_adv_data cp;
1358 memset(&cp, 0, sizeof(cp));
1360 len = create_instance_adv_data(hdev, instance, cp.data);
1362 /* There's nothing to do if the data hasn't changed */
1363 if (hdev->adv_data_len == len &&
1364 memcmp(cp.data, hdev->adv_data, len) == 0)
1367 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1368 hdev->adv_data_len = len;
1372 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1376 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1378 struct hci_request req;
1380 hci_req_init(&req, hdev);
1381 __hci_req_update_adv_data(&req, instance);
1383 return hci_req_run(&req, NULL);
1386 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1388 BT_DBG("%s status %u", hdev->name, status);
1391 void hci_req_reenable_advertising(struct hci_dev *hdev)
1393 struct hci_request req;
1395 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1396 list_empty(&hdev->adv_instances))
1399 hci_req_init(&req, hdev);
1401 if (hdev->cur_adv_instance) {
1402 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1405 if (ext_adv_capable(hdev)) {
1406 __hci_req_start_ext_adv(&req, 0x00);
1408 __hci_req_update_adv_data(&req, 0x00);
1409 __hci_req_update_scan_rsp_data(&req, 0x00);
1410 __hci_req_enable_advertising(&req);
1414 hci_req_run(&req, adv_enable_complete);
1417 static void adv_timeout_expire(struct work_struct *work)
1419 struct hci_dev *hdev = container_of(work, struct hci_dev,
1420 adv_instance_expire.work);
1422 struct hci_request req;
1425 BT_DBG("%s", hdev->name);
1429 hdev->adv_instance_timeout = 0;
1431 instance = hdev->cur_adv_instance;
1432 if (instance == 0x00)
1435 hci_req_init(&req, hdev);
1437 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1439 if (list_empty(&hdev->adv_instances))
1440 __hci_req_disable_advertising(&req);
1442 hci_req_run(&req, NULL);
1445 hci_dev_unlock(hdev);
1448 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1449 bool use_rpa, struct adv_info *adv_instance,
1450 u8 *own_addr_type, bdaddr_t *rand_addr)
1454 bacpy(rand_addr, BDADDR_ANY);
1456 /* If privacy is enabled use a resolvable private address. If
1457 * current RPA has expired then generate a new one.
1462 *own_addr_type = ADDR_LE_DEV_RANDOM;
1465 if (!adv_instance->rpa_expired &&
1466 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1469 adv_instance->rpa_expired = false;
1471 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1472 !bacmp(&hdev->random_addr, &hdev->rpa))
1476 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1478 BT_ERR("%s failed to generate new RPA", hdev->name);
1482 bacpy(rand_addr, &hdev->rpa);
1484 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1486 queue_delayed_work(hdev->workqueue,
1487 &adv_instance->rpa_expired_cb, to);
1489 queue_delayed_work(hdev->workqueue,
1490 &hdev->rpa_expired, to);
1495 /* In case of required privacy without resolvable private address,
1496 * use an non-resolvable private address. This is useful for
1497 * non-connectable advertising.
1499 if (require_privacy) {
1503 /* The non-resolvable private address is generated
1504 * from random six bytes with the two most significant
1507 get_random_bytes(&nrpa, 6);
1510 /* The non-resolvable private address shall not be
1511 * equal to the public address.
1513 if (bacmp(&hdev->bdaddr, &nrpa))
1517 *own_addr_type = ADDR_LE_DEV_RANDOM;
1518 bacpy(rand_addr, &nrpa);
1523 /* No privacy so use a public address. */
1524 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1529 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1531 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1534 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1536 struct hci_cp_le_set_ext_adv_params cp;
1537 struct hci_dev *hdev = req->hdev;
1540 bdaddr_t random_addr;
1543 struct adv_info *adv_instance;
1545 /* In ext adv set param interval is 3 octets */
1546 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1549 adv_instance = hci_find_adv_instance(hdev, instance);
1553 adv_instance = NULL;
1556 flags = get_adv_instance_flags(hdev, instance);
1558 /* If the "connectable" instance flag was not set, then choose between
1559 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1561 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1562 mgmt_get_connectable(hdev);
1564 if (!is_advertising_allowed(hdev, connectable))
1567 /* Set require_privacy to true only when non-connectable
1568 * advertising is used. In that case it is fine to use a
1569 * non-resolvable private address.
1571 err = hci_get_random_address(hdev, !connectable,
1572 adv_use_rpa(hdev, flags), adv_instance,
1573 &own_addr_type, &random_addr);
1577 memset(&cp, 0, sizeof(cp));
1579 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1580 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1582 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1586 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1588 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1589 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1591 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1593 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1596 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1598 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1601 cp.own_addr_type = own_addr_type;
1602 cp.channel_map = hdev->le_adv_channel_map;
1606 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1607 cp.primary_phy = HCI_ADV_PHY_1M;
1608 cp.secondary_phy = HCI_ADV_PHY_2M;
1609 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1610 cp.primary_phy = HCI_ADV_PHY_CODED;
1611 cp.secondary_phy = HCI_ADV_PHY_CODED;
1613 /* In all other cases use 1M */
1614 cp.primary_phy = HCI_ADV_PHY_1M;
1615 cp.secondary_phy = HCI_ADV_PHY_1M;
1618 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1620 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1621 bacmp(&random_addr, BDADDR_ANY)) {
1622 struct hci_cp_le_set_adv_set_rand_addr cp;
1624 /* Check if random address need to be updated */
1626 if (!bacmp(&random_addr, &adv_instance->random_addr))
1629 if (!bacmp(&random_addr, &hdev->random_addr))
1633 memset(&cp, 0, sizeof(cp));
1636 bacpy(&cp.bdaddr, &random_addr);
1639 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1646 void __hci_req_enable_ext_advertising(struct hci_request *req)
1648 struct hci_cp_le_set_ext_adv_enable *cp;
1649 struct hci_cp_ext_adv_set *adv_set;
1650 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1653 adv_set = (void *) cp->data;
1655 memset(cp, 0, sizeof(*cp));
1658 cp->num_of_sets = 0x01;
1660 memset(adv_set, 0, sizeof(*adv_set));
1662 adv_set->handle = 0;
1664 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1665 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1669 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1671 struct hci_dev *hdev = req->hdev;
1674 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1675 __hci_req_disable_advertising(req);
1677 err = __hci_req_setup_ext_adv_instance(req, instance);
1681 __hci_req_update_scan_rsp_data(req, instance);
1682 __hci_req_enable_ext_advertising(req);
1687 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1690 struct hci_dev *hdev = req->hdev;
1691 struct adv_info *adv_instance = NULL;
1694 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1695 list_empty(&hdev->adv_instances))
1698 if (hdev->adv_instance_timeout)
1701 adv_instance = hci_find_adv_instance(hdev, instance);
1705 /* A zero timeout means unlimited advertising. As long as there is
1706 * only one instance, duration should be ignored. We still set a timeout
1707 * in case further instances are being added later on.
1709 * If the remaining lifetime of the instance is more than the duration
1710 * then the timeout corresponds to the duration, otherwise it will be
1711 * reduced to the remaining instance lifetime.
1713 if (adv_instance->timeout == 0 ||
1714 adv_instance->duration <= adv_instance->remaining_time)
1715 timeout = adv_instance->duration;
1717 timeout = adv_instance->remaining_time;
1719 /* The remaining time is being reduced unless the instance is being
1720 * advertised without time limit.
1722 if (adv_instance->timeout)
1723 adv_instance->remaining_time =
1724 adv_instance->remaining_time - timeout;
1726 hdev->adv_instance_timeout = timeout;
1727 queue_delayed_work(hdev->req_workqueue,
1728 &hdev->adv_instance_expire,
1729 msecs_to_jiffies(timeout * 1000));
1731 /* If we're just re-scheduling the same instance again then do not
1732 * execute any HCI commands. This happens when a single instance is
1735 if (!force && hdev->cur_adv_instance == instance &&
1736 hci_dev_test_flag(hdev, HCI_LE_ADV))
1739 hdev->cur_adv_instance = instance;
1740 if (ext_adv_capable(hdev)) {
1741 __hci_req_start_ext_adv(req, instance);
1743 __hci_req_update_adv_data(req, instance);
1744 __hci_req_update_scan_rsp_data(req, instance);
1745 __hci_req_enable_advertising(req);
1751 static void cancel_adv_timeout(struct hci_dev *hdev)
1753 if (hdev->adv_instance_timeout) {
1754 hdev->adv_instance_timeout = 0;
1755 cancel_delayed_work(&hdev->adv_instance_expire);
1759 /* For a single instance:
1760 * - force == true: The instance will be removed even when its remaining
1761 * lifetime is not zero.
1762 * - force == false: the instance will be deactivated but kept stored unless
1763 * the remaining lifetime is zero.
1765 * For instance == 0x00:
1766 * - force == true: All instances will be removed regardless of their timeout
1768 * - force == false: Only instances that have a timeout will be removed.
1770 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1771 struct hci_request *req, u8 instance,
1774 struct adv_info *adv_instance, *n, *next_instance = NULL;
1778 /* Cancel any timeout concerning the removed instance(s). */
1779 if (!instance || hdev->cur_adv_instance == instance)
1780 cancel_adv_timeout(hdev);
1782 /* Get the next instance to advertise BEFORE we remove
1783 * the current one. This can be the same instance again
1784 * if there is only one instance.
1786 if (instance && hdev->cur_adv_instance == instance)
1787 next_instance = hci_get_next_instance(hdev, instance);
1789 if (instance == 0x00) {
1790 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1792 if (!(force || adv_instance->timeout))
1795 rem_inst = adv_instance->instance;
1796 err = hci_remove_adv_instance(hdev, rem_inst);
1798 mgmt_advertising_removed(sk, hdev, rem_inst);
1801 adv_instance = hci_find_adv_instance(hdev, instance);
1803 if (force || (adv_instance && adv_instance->timeout &&
1804 !adv_instance->remaining_time)) {
1805 /* Don't advertise a removed instance. */
1806 if (next_instance &&
1807 next_instance->instance == instance)
1808 next_instance = NULL;
1810 err = hci_remove_adv_instance(hdev, instance);
1812 mgmt_advertising_removed(sk, hdev, instance);
1816 if (!req || !hdev_is_powered(hdev) ||
1817 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1821 __hci_req_schedule_adv_instance(req, next_instance->instance,
1825 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1827 struct hci_dev *hdev = req->hdev;
1829 /* If we're advertising or initiating an LE connection we can't
1830 * go ahead and change the random address at this time. This is
1831 * because the eventual initiator address used for the
1832 * subsequently created connection will be undefined (some
1833 * controllers use the new address and others the one we had
1834 * when the operation started).
1836 * In this kind of scenario skip the update and let the random
1837 * address be updated at the next cycle.
1839 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1840 hci_lookup_le_connect(hdev)) {
1841 BT_DBG("Deferring random address update");
1842 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1846 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1849 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1850 bool use_rpa, u8 *own_addr_type)
1852 struct hci_dev *hdev = req->hdev;
1855 /* If privacy is enabled use a resolvable private address. If
1856 * current RPA has expired or there is something else than
1857 * the current RPA in use, then generate a new one.
1862 *own_addr_type = ADDR_LE_DEV_RANDOM;
1864 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1865 !bacmp(&hdev->random_addr, &hdev->rpa))
1868 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1870 bt_dev_err(hdev, "failed to generate new RPA");
1874 set_random_addr(req, &hdev->rpa);
1876 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1877 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1882 /* In case of required privacy without resolvable private address,
1883 * use an non-resolvable private address. This is useful for active
1884 * scanning and non-connectable advertising.
1886 if (require_privacy) {
1890 /* The non-resolvable private address is generated
1891 * from random six bytes with the two most significant
1894 get_random_bytes(&nrpa, 6);
1897 /* The non-resolvable private address shall not be
1898 * equal to the public address.
1900 if (bacmp(&hdev->bdaddr, &nrpa))
1904 *own_addr_type = ADDR_LE_DEV_RANDOM;
1905 set_random_addr(req, &nrpa);
1909 /* If forcing static address is in use or there is no public
1910 * address use the static address as random address (but skip
1911 * the HCI command if the current random address is already the
1914 * In case BR/EDR has been disabled on a dual-mode controller
1915 * and a static address has been configured, then use that
1916 * address instead of the public BR/EDR address.
1918 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1919 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1920 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1921 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1922 *own_addr_type = ADDR_LE_DEV_RANDOM;
1923 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1924 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1925 &hdev->static_addr);
1929 /* Neither privacy nor static address is being used so use a
1932 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1937 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1939 struct bdaddr_list *b;
1941 list_for_each_entry(b, &hdev->whitelist, list) {
1942 struct hci_conn *conn;
1944 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1948 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1955 void __hci_req_update_scan(struct hci_request *req)
1957 struct hci_dev *hdev = req->hdev;
1960 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1963 if (!hdev_is_powered(hdev))
1966 if (mgmt_powering_down(hdev))
1969 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1970 disconnected_whitelist_entries(hdev))
1973 scan = SCAN_DISABLED;
1975 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1976 scan |= SCAN_INQUIRY;
1978 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1979 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1982 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1985 static int update_scan(struct hci_request *req, unsigned long opt)
1987 hci_dev_lock(req->hdev);
1988 __hci_req_update_scan(req);
1989 hci_dev_unlock(req->hdev);
1993 static void scan_update_work(struct work_struct *work)
1995 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1997 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2000 static int connectable_update(struct hci_request *req, unsigned long opt)
2002 struct hci_dev *hdev = req->hdev;
2006 __hci_req_update_scan(req);
2008 /* If BR/EDR is not enabled and we disable advertising as a
2009 * by-product of disabling connectable, we need to update the
2010 * advertising flags.
2012 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2013 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2015 /* Update the advertising parameters if necessary */
2016 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2017 !list_empty(&hdev->adv_instances)) {
2018 if (ext_adv_capable(hdev))
2019 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2021 __hci_req_enable_advertising(req);
2024 __hci_update_background_scan(req);
2026 hci_dev_unlock(hdev);
2031 static void connectable_update_work(struct work_struct *work)
2033 struct hci_dev *hdev = container_of(work, struct hci_dev,
2034 connectable_update);
2037 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2038 mgmt_set_connectable_complete(hdev, status);
2041 static u8 get_service_classes(struct hci_dev *hdev)
2043 struct bt_uuid *uuid;
2046 list_for_each_entry(uuid, &hdev->uuids, list)
2047 val |= uuid->svc_hint;
2052 void __hci_req_update_class(struct hci_request *req)
2054 struct hci_dev *hdev = req->hdev;
2057 BT_DBG("%s", hdev->name);
2059 if (!hdev_is_powered(hdev))
2062 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2065 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2068 cod[0] = hdev->minor_class;
2069 cod[1] = hdev->major_class;
2070 cod[2] = get_service_classes(hdev);
2072 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2075 if (memcmp(cod, hdev->dev_class, 3) == 0)
2078 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2081 static void write_iac(struct hci_request *req)
2083 struct hci_dev *hdev = req->hdev;
2084 struct hci_cp_write_current_iac_lap cp;
2086 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2089 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2090 /* Limited discoverable mode */
2091 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2092 cp.iac_lap[0] = 0x00; /* LIAC */
2093 cp.iac_lap[1] = 0x8b;
2094 cp.iac_lap[2] = 0x9e;
2095 cp.iac_lap[3] = 0x33; /* GIAC */
2096 cp.iac_lap[4] = 0x8b;
2097 cp.iac_lap[5] = 0x9e;
2099 /* General discoverable mode */
2101 cp.iac_lap[0] = 0x33; /* GIAC */
2102 cp.iac_lap[1] = 0x8b;
2103 cp.iac_lap[2] = 0x9e;
2106 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2107 (cp.num_iac * 3) + 1, &cp);
2110 static int discoverable_update(struct hci_request *req, unsigned long opt)
2112 struct hci_dev *hdev = req->hdev;
2116 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2118 __hci_req_update_scan(req);
2119 __hci_req_update_class(req);
2122 /* Advertising instances don't use the global discoverable setting, so
2123 * only update AD if advertising was enabled using Set Advertising.
2125 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2126 __hci_req_update_adv_data(req, 0x00);
2128 /* Discoverable mode affects the local advertising
2129 * address in limited privacy mode.
2131 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2132 if (ext_adv_capable(hdev))
2133 __hci_req_start_ext_adv(req, 0x00);
2135 __hci_req_enable_advertising(req);
2139 hci_dev_unlock(hdev);
2144 static void discoverable_update_work(struct work_struct *work)
2146 struct hci_dev *hdev = container_of(work, struct hci_dev,
2147 discoverable_update);
2150 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2151 mgmt_set_discoverable_complete(hdev, status);
2154 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2157 switch (conn->state) {
2160 if (conn->type == AMP_LINK) {
2161 struct hci_cp_disconn_phy_link cp;
2163 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2165 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2168 struct hci_cp_disconnect dc;
2170 dc.handle = cpu_to_le16(conn->handle);
2172 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2175 conn->state = BT_DISCONN;
2179 if (conn->type == LE_LINK) {
2180 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2182 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2184 } else if (conn->type == ACL_LINK) {
2185 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2187 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2192 if (conn->type == ACL_LINK) {
2193 struct hci_cp_reject_conn_req rej;
2195 bacpy(&rej.bdaddr, &conn->dst);
2196 rej.reason = reason;
2198 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2200 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2201 struct hci_cp_reject_sync_conn_req rej;
2203 bacpy(&rej.bdaddr, &conn->dst);
2205 /* SCO rejection has its own limited set of
2206 * allowed error values (0x0D-0x0F) which isn't
2207 * compatible with most values passed to this
2208 * function. To be safe hard-code one of the
2209 * values that's suitable for SCO.
2211 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2213 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2218 conn->state = BT_CLOSED;
2223 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2226 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2229 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2231 struct hci_request req;
2234 hci_req_init(&req, conn->hdev);
2236 __hci_abort_conn(&req, conn, reason);
2238 err = hci_req_run(&req, abort_conn_complete);
2239 if (err && err != -ENODATA) {
2240 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2247 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2249 hci_dev_lock(req->hdev);
2250 __hci_update_background_scan(req);
2251 hci_dev_unlock(req->hdev);
2255 static void bg_scan_update(struct work_struct *work)
2257 struct hci_dev *hdev = container_of(work, struct hci_dev,
2259 struct hci_conn *conn;
2263 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2269 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2271 hci_le_conn_failed(conn, status);
2273 hci_dev_unlock(hdev);
2276 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2278 hci_req_add_le_scan_disable(req);
2282 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2285 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2286 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2287 struct hci_cp_inquiry cp;
2289 BT_DBG("%s", req->hdev->name);
2291 hci_dev_lock(req->hdev);
2292 hci_inquiry_cache_flush(req->hdev);
2293 hci_dev_unlock(req->hdev);
2295 memset(&cp, 0, sizeof(cp));
2297 if (req->hdev->discovery.limited)
2298 memcpy(&cp.lap, liac, sizeof(cp.lap));
2300 memcpy(&cp.lap, giac, sizeof(cp.lap));
2304 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2309 static void le_scan_disable_work(struct work_struct *work)
2311 struct hci_dev *hdev = container_of(work, struct hci_dev,
2312 le_scan_disable.work);
2315 BT_DBG("%s", hdev->name);
2317 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2320 cancel_delayed_work(&hdev->le_scan_restart);
2322 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2324 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2329 hdev->discovery.scan_start = 0;
2331 /* If we were running LE only scan, change discovery state. If
2332 * we were running both LE and BR/EDR inquiry simultaneously,
2333 * and BR/EDR inquiry is already finished, stop discovery,
2334 * otherwise BR/EDR inquiry will stop discovery when finished.
2335 * If we will resolve remote device name, do not change
2339 if (hdev->discovery.type == DISCOV_TYPE_LE)
2340 goto discov_stopped;
2342 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2345 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2346 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2347 hdev->discovery.state != DISCOVERY_RESOLVING)
2348 goto discov_stopped;
2353 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2354 HCI_CMD_TIMEOUT, &status);
2356 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2357 goto discov_stopped;
2364 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2365 hci_dev_unlock(hdev);
2368 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2370 struct hci_dev *hdev = req->hdev;
2372 /* If controller is not scanning we are done. */
2373 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2376 hci_req_add_le_scan_disable(req);
2378 if (use_ext_scan(hdev)) {
2379 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2381 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2382 ext_enable_cp.enable = LE_SCAN_ENABLE;
2383 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2385 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2386 sizeof(ext_enable_cp), &ext_enable_cp);
2388 struct hci_cp_le_set_scan_enable cp;
2390 memset(&cp, 0, sizeof(cp));
2391 cp.enable = LE_SCAN_ENABLE;
2392 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2393 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2399 static void le_scan_restart_work(struct work_struct *work)
2401 struct hci_dev *hdev = container_of(work, struct hci_dev,
2402 le_scan_restart.work);
2403 unsigned long timeout, duration, scan_start, now;
2406 BT_DBG("%s", hdev->name);
2408 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2410 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2417 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2418 !hdev->discovery.scan_start)
2421 /* When the scan was started, hdev->le_scan_disable has been queued
2422 * after duration from scan_start. During scan restart this job
2423 * has been canceled, and we need to queue it again after proper
2424 * timeout, to make sure that scan does not run indefinitely.
2426 duration = hdev->discovery.scan_duration;
2427 scan_start = hdev->discovery.scan_start;
2429 if (now - scan_start <= duration) {
2432 if (now >= scan_start)
2433 elapsed = now - scan_start;
2435 elapsed = ULONG_MAX - scan_start + now;
2437 timeout = duration - elapsed;
2442 queue_delayed_work(hdev->req_workqueue,
2443 &hdev->le_scan_disable, timeout);
2446 hci_dev_unlock(hdev);
2449 static int active_scan(struct hci_request *req, unsigned long opt)
2451 uint16_t interval = opt;
2452 struct hci_dev *hdev = req->hdev;
2456 BT_DBG("%s", hdev->name);
2458 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2461 /* Don't let discovery abort an outgoing connection attempt
2462 * that's using directed advertising.
2464 if (hci_lookup_le_connect(hdev)) {
2465 hci_dev_unlock(hdev);
2469 cancel_adv_timeout(hdev);
2470 hci_dev_unlock(hdev);
2472 __hci_req_disable_advertising(req);
2475 /* If controller is scanning, it means the background scanning is
2476 * running. Thus, we should temporarily stop it in order to set the
2477 * discovery scanning parameters.
2479 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2480 hci_req_add_le_scan_disable(req);
2482 /* All active scans will be done with either a resolvable private
2483 * address (when privacy feature has been enabled) or non-resolvable
2486 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2489 own_addr_type = ADDR_LE_DEV_PUBLIC;
2491 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2496 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2500 BT_DBG("%s", req->hdev->name);
2502 err = active_scan(req, opt);
2506 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2509 static void start_discovery(struct hci_dev *hdev, u8 *status)
2511 unsigned long timeout;
2513 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2515 switch (hdev->discovery.type) {
2516 case DISCOV_TYPE_BREDR:
2517 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2518 hci_req_sync(hdev, bredr_inquiry,
2519 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2522 case DISCOV_TYPE_INTERLEAVED:
2523 /* When running simultaneous discovery, the LE scanning time
2524 * should occupy the whole discovery time sine BR/EDR inquiry
2525 * and LE scanning are scheduled by the controller.
2527 * For interleaving discovery in comparison, BR/EDR inquiry
2528 * and LE scanning are done sequentially with separate
2531 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2533 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2534 /* During simultaneous discovery, we double LE scan
2535 * interval. We must leave some time for the controller
2536 * to do BR/EDR inquiry.
2538 hci_req_sync(hdev, interleaved_discov,
2539 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2544 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2545 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2546 HCI_CMD_TIMEOUT, status);
2548 case DISCOV_TYPE_LE:
2549 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2550 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2551 HCI_CMD_TIMEOUT, status);
2554 *status = HCI_ERROR_UNSPECIFIED;
2561 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2563 /* When service discovery is used and the controller has a
2564 * strict duplicate filter, it is important to remember the
2565 * start and duration of the scan. This is required for
2566 * restarting scanning during the discovery phase.
2568 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2569 hdev->discovery.result_filtering) {
2570 hdev->discovery.scan_start = jiffies;
2571 hdev->discovery.scan_duration = timeout;
2574 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2578 bool hci_req_stop_discovery(struct hci_request *req)
2580 struct hci_dev *hdev = req->hdev;
2581 struct discovery_state *d = &hdev->discovery;
2582 struct hci_cp_remote_name_req_cancel cp;
2583 struct inquiry_entry *e;
2586 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2588 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2589 if (test_bit(HCI_INQUIRY, &hdev->flags))
2590 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2592 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2593 cancel_delayed_work(&hdev->le_scan_disable);
2594 hci_req_add_le_scan_disable(req);
2599 /* Passive scanning */
2600 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2601 hci_req_add_le_scan_disable(req);
2606 /* No further actions needed for LE-only discovery */
2607 if (d->type == DISCOV_TYPE_LE)
2610 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2611 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2616 bacpy(&cp.bdaddr, &e->data.bdaddr);
2617 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2625 static int stop_discovery(struct hci_request *req, unsigned long opt)
2627 hci_dev_lock(req->hdev);
2628 hci_req_stop_discovery(req);
2629 hci_dev_unlock(req->hdev);
2634 static void discov_update(struct work_struct *work)
2636 struct hci_dev *hdev = container_of(work, struct hci_dev,
2640 switch (hdev->discovery.state) {
2641 case DISCOVERY_STARTING:
2642 start_discovery(hdev, &status);
2643 mgmt_start_discovery_complete(hdev, status);
2645 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2647 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2649 case DISCOVERY_STOPPING:
2650 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2651 mgmt_stop_discovery_complete(hdev, status);
2653 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2655 case DISCOVERY_STOPPED:
2661 static void discov_off(struct work_struct *work)
2663 struct hci_dev *hdev = container_of(work, struct hci_dev,
2666 BT_DBG("%s", hdev->name);
2670 /* When discoverable timeout triggers, then just make sure
2671 * the limited discoverable flag is cleared. Even in the case
2672 * of a timeout triggered from general discoverable, it is
2673 * safe to unconditionally clear the flag.
2675 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2676 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2677 hdev->discov_timeout = 0;
2679 hci_dev_unlock(hdev);
2681 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2682 mgmt_new_settings(hdev);
2685 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2687 struct hci_dev *hdev = req->hdev;
2692 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2693 !lmp_host_ssp_capable(hdev)) {
2696 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2698 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2701 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2702 sizeof(support), &support);
2706 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2707 lmp_bredr_capable(hdev)) {
2708 struct hci_cp_write_le_host_supported cp;
2713 /* Check first if we already have the right
2714 * host state (host features set)
2716 if (cp.le != lmp_host_le_capable(hdev) ||
2717 cp.simul != lmp_host_le_br_capable(hdev))
2718 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2722 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2723 /* Make sure the controller has a good default for
2724 * advertising data. This also applies to the case
2725 * where BR/EDR was toggled during the AUTO_OFF phase.
2727 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2728 list_empty(&hdev->adv_instances)) {
2731 if (ext_adv_capable(hdev)) {
2732 err = __hci_req_setup_ext_adv_instance(req,
2735 __hci_req_update_scan_rsp_data(req,
2739 __hci_req_update_adv_data(req, 0x00);
2740 __hci_req_update_scan_rsp_data(req, 0x00);
2743 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2744 if (!ext_adv_capable(hdev))
2745 __hci_req_enable_advertising(req);
2747 __hci_req_enable_ext_advertising(req);
2749 } else if (!list_empty(&hdev->adv_instances)) {
2750 struct adv_info *adv_instance;
2752 adv_instance = list_first_entry(&hdev->adv_instances,
2753 struct adv_info, list);
2754 __hci_req_schedule_adv_instance(req,
2755 adv_instance->instance,
2760 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2761 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2762 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2763 sizeof(link_sec), &link_sec);
2765 if (lmp_bredr_capable(hdev)) {
2766 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2767 __hci_req_write_fast_connectable(req, true);
2769 __hci_req_write_fast_connectable(req, false);
2770 __hci_req_update_scan(req);
2771 __hci_req_update_class(req);
2772 __hci_req_update_name(req);
2773 __hci_req_update_eir(req);
2776 hci_dev_unlock(hdev);
2780 int __hci_req_hci_power_on(struct hci_dev *hdev)
2782 /* Register the available SMP channels (BR/EDR and LE) only when
2783 * successfully powering on the controller. This late
2784 * registration is required so that LE SMP can clearly decide if
2785 * the public address or static address is used.
2789 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2793 void hci_request_setup(struct hci_dev *hdev)
2795 INIT_WORK(&hdev->discov_update, discov_update);
2796 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2797 INIT_WORK(&hdev->scan_update, scan_update_work);
2798 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2799 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2800 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2801 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2802 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2803 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2806 void hci_request_cancel_all(struct hci_dev *hdev)
2808 hci_req_sync_cancel(hdev, ENODEV);
2810 cancel_work_sync(&hdev->discov_update);
2811 cancel_work_sync(&hdev->bg_scan_update);
2812 cancel_work_sync(&hdev->scan_update);
2813 cancel_work_sync(&hdev->connectable_update);
2814 cancel_work_sync(&hdev->discoverable_update);
2815 cancel_delayed_work_sync(&hdev->discov_off);
2816 cancel_delayed_work_sync(&hdev->le_scan_disable);
2817 cancel_delayed_work_sync(&hdev->le_scan_restart);
2819 if (hdev->adv_instance_timeout) {
2820 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2821 hdev->adv_instance_timeout = 0;