2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
34 #define HCI_REQ_DONE 0
35 #define HCI_REQ_PEND 1
36 #define HCI_REQ_CANCELED 2
38 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
40 skb_queue_head_init(&req->cmd_q);
45 void hci_req_purge(struct hci_request *req)
47 skb_queue_purge(&req->cmd_q);
50 bool hci_req_status_pend(struct hci_dev *hdev)
52 return hdev->req_status == HCI_REQ_PEND;
55 static int req_run(struct hci_request *req, hci_req_complete_t complete,
56 hci_req_complete_skb_t complete_skb)
58 struct hci_dev *hdev = req->hdev;
62 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
64 /* If an error occurred during request building, remove all HCI
65 * commands queued on the HCI request queue.
68 skb_queue_purge(&req->cmd_q);
72 /* Do not allow empty requests */
73 if (skb_queue_empty(&req->cmd_q))
76 skb = skb_peek_tail(&req->cmd_q);
78 bt_cb(skb)->hci.req_complete = complete;
79 } else if (complete_skb) {
80 bt_cb(skb)->hci.req_complete_skb = complete_skb;
81 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
84 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
88 queue_work(hdev->workqueue, &hdev->cmd_work);
93 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
95 return req_run(req, complete, NULL);
98 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
100 return req_run(req, NULL, complete);
103 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
106 bt_dev_dbg(hdev, "result 0x%2.2x", result);
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
112 hdev->req_skb = skb_get(skb);
113 wake_up_interruptible(&hdev->req_wait_q);
117 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
119 bt_dev_dbg(hdev, "err 0x%2.2x", err);
121 if (hdev->req_status == HCI_REQ_PEND) {
122 hdev->req_result = err;
123 hdev->req_status = HCI_REQ_CANCELED;
124 wake_up_interruptible(&hdev->req_wait_q);
128 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129 const void *param, u8 event, u32 timeout)
131 struct hci_request req;
135 bt_dev_dbg(hdev, "");
137 hci_req_init(&req, hdev);
139 hci_req_add_ev(&req, opcode, plen, param, event);
141 hdev->req_status = HCI_REQ_PEND;
143 err = hci_req_run_skb(&req, hci_req_sync_complete);
147 err = wait_event_interruptible_timeout(hdev->req_wait_q,
148 hdev->req_status != HCI_REQ_PEND, timeout);
150 if (err == -ERESTARTSYS)
151 return ERR_PTR(-EINTR);
153 switch (hdev->req_status) {
155 err = -bt_to_errno(hdev->req_result);
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
167 hdev->req_status = hdev->req_result = 0;
169 hdev->req_skb = NULL;
171 bt_dev_dbg(hdev, "end: err %d", err);
179 return ERR_PTR(-ENODATA);
183 EXPORT_SYMBOL(__hci_cmd_sync_ev);
185 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186 const void *param, u32 timeout)
188 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
190 EXPORT_SYMBOL(__hci_cmd_sync);
192 /* Execute request and wait for completion. */
193 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
195 unsigned long opt, u32 timeout, u8 *hci_status)
197 struct hci_request req;
200 bt_dev_dbg(hdev, "start");
202 hci_req_init(&req, hdev);
204 hdev->req_status = HCI_REQ_PEND;
206 err = func(&req, opt);
209 *hci_status = HCI_ERROR_UNSPECIFIED;
213 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 hdev->req_status = 0;
217 /* ENODATA means the HCI request command queue is empty.
218 * This can happen when a request with conditionals doesn't
219 * trigger any commands to be sent. This is normal behavior
220 * and should not trigger an error return.
222 if (err == -ENODATA) {
229 *hci_status = HCI_ERROR_UNSPECIFIED;
234 err = wait_event_interruptible_timeout(hdev->req_wait_q,
235 hdev->req_status != HCI_REQ_PEND, timeout);
237 if (err == -ERESTARTSYS)
240 switch (hdev->req_status) {
242 err = -bt_to_errno(hdev->req_result);
244 *hci_status = hdev->req_result;
247 case HCI_REQ_CANCELED:
248 err = -hdev->req_result;
250 *hci_status = HCI_ERROR_UNSPECIFIED;
256 *hci_status = HCI_ERROR_UNSPECIFIED;
260 kfree_skb(hdev->req_skb);
261 hdev->req_skb = NULL;
262 hdev->req_status = hdev->req_result = 0;
264 bt_dev_dbg(hdev, "end: err %d", err);
269 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
271 unsigned long opt, u32 timeout, u8 *hci_status)
275 /* Serialize all requests */
276 hci_req_sync_lock(hdev);
277 /* check the state after obtaing the lock to protect the HCI_UP
278 * against any races from hci_dev_do_close when the controller
281 if (test_bit(HCI_UP, &hdev->flags))
282 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
285 hci_req_sync_unlock(hdev);
290 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
293 int len = HCI_COMMAND_HDR_SIZE + plen;
294 struct hci_command_hdr *hdr;
297 skb = bt_skb_alloc(len, GFP_ATOMIC);
301 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
302 hdr->opcode = cpu_to_le16(opcode);
306 skb_put_data(skb, param, plen);
308 bt_dev_dbg(hdev, "skb len %d", skb->len);
310 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
311 hci_skb_opcode(skb) = opcode;
316 /* Queue a command to an asynchronous HCI request */
317 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
318 const void *param, u8 event)
320 struct hci_dev *hdev = req->hdev;
323 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
325 /* If an error occurred during request building, there is no point in
326 * queueing the HCI command. We can simply return.
331 skb = hci_prepare_cmd(hdev, opcode, plen, param);
333 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
339 if (skb_queue_empty(&req->cmd_q))
340 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
342 bt_cb(skb)->hci.req_event = event;
344 skb_queue_tail(&req->cmd_q, skb);
347 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
350 hci_req_add_ev(req, opcode, plen, param, 0);
353 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355 struct hci_dev *hdev = req->hdev;
356 struct hci_cp_write_page_scan_activity acp;
359 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
362 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
366 type = PAGE_SCAN_TYPE_INTERLACED;
368 /* 160 msec page scan interval */
369 acp.interval = cpu_to_le16(0x0100);
371 type = hdev->def_page_scan_type;
372 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
375 acp.window = cpu_to_le16(hdev->def_page_scan_window);
377 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
378 __cpu_to_le16(hdev->page_scan_window) != acp.window)
379 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
382 if (hdev->page_scan_type != type)
383 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
386 static void start_interleave_scan(struct hci_dev *hdev)
388 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
389 queue_delayed_work(hdev->req_workqueue,
390 &hdev->interleave_scan, 0);
393 static bool is_interleave_scanning(struct hci_dev *hdev)
395 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
398 static void cancel_interleave_scan(struct hci_dev *hdev)
400 bt_dev_dbg(hdev, "cancelling interleave scan");
402 cancel_delayed_work_sync(&hdev->interleave_scan);
404 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
407 /* Return true if interleave_scan wasn't started until exiting this function,
408 * otherwise, return false
410 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
412 /* Do interleaved scan only if all of the following are true:
413 * - There is at least one ADV monitor
414 * - At least one pending LE connection or one device to be scanned for
415 * - Monitor offloading is not supported
416 * If so, we should alternate between allowlist scan and one without
417 * any filters to save power.
419 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
420 !(list_empty(&hdev->pend_le_conns) &&
421 list_empty(&hdev->pend_le_reports)) &&
422 hci_get_adv_monitor_offload_ext(hdev) ==
423 HCI_ADV_MONITOR_EXT_NONE;
424 bool is_interleaving = is_interleave_scanning(hdev);
426 if (use_interleaving && !is_interleaving) {
427 start_interleave_scan(hdev);
428 bt_dev_dbg(hdev, "starting interleave scan");
432 if (!use_interleaving && is_interleaving)
433 cancel_interleave_scan(hdev);
438 /* This function controls the background scanning based on hdev->pend_le_conns
439 * list. If there are pending LE connection we start the background scanning,
440 * otherwise we stop it.
442 * This function requires the caller holds hdev->lock.
444 static void __hci_update_background_scan(struct hci_request *req)
446 struct hci_dev *hdev = req->hdev;
448 if (!test_bit(HCI_UP, &hdev->flags) ||
449 test_bit(HCI_INIT, &hdev->flags) ||
450 hci_dev_test_flag(hdev, HCI_SETUP) ||
451 hci_dev_test_flag(hdev, HCI_CONFIG) ||
452 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
453 hci_dev_test_flag(hdev, HCI_UNREGISTER))
456 /* No point in doing scanning if LE support hasn't been enabled */
457 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
460 /* If discovery is active don't interfere with it */
461 if (hdev->discovery.state != DISCOVERY_STOPPED)
464 /* Reset RSSI and UUID filters when starting background scanning
465 * since these filters are meant for service discovery only.
467 * The Start Discovery and Start Service Discovery operations
468 * ensure to set proper values for RSSI threshold and UUID
469 * filter list. So it is safe to just reset them here.
471 hci_discovery_filter_clear(hdev);
473 bt_dev_dbg(hdev, "ADV monitoring is %s",
474 hci_is_adv_monitoring(hdev) ? "on" : "off");
476 if (list_empty(&hdev->pend_le_conns) &&
477 list_empty(&hdev->pend_le_reports) &&
478 !hci_is_adv_monitoring(hdev)) {
479 /* If there is no pending LE connections or devices
480 * to be scanned for or no ADV monitors, we should stop the
481 * background scanning.
484 /* If controller is not scanning we are done. */
485 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
488 hci_req_add_le_scan_disable(req, false);
490 bt_dev_dbg(hdev, "stopping background scanning");
492 /* If there is at least one pending LE connection, we should
493 * keep the background scan running.
496 /* If controller is connecting, we should not start scanning
497 * since some controllers are not able to scan and connect at
500 if (hci_lookup_le_connect(hdev))
503 /* If controller is currently scanning, we stop it to ensure we
504 * don't miss any advertising (due to duplicates filter).
506 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
507 hci_req_add_le_scan_disable(req, false);
509 hci_req_add_le_passive_scan(req);
510 bt_dev_dbg(hdev, "starting background scanning");
514 void __hci_req_update_name(struct hci_request *req)
516 struct hci_dev *hdev = req->hdev;
517 struct hci_cp_write_local_name cp;
519 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
521 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
524 #define PNP_INFO_SVCLASS_ID 0x1200
526 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
528 u8 *ptr = data, *uuids_start = NULL;
529 struct bt_uuid *uuid;
534 list_for_each_entry(uuid, &hdev->uuids, list) {
537 if (uuid->size != 16)
540 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
544 if (uuid16 == PNP_INFO_SVCLASS_ID)
550 uuids_start[1] = EIR_UUID16_ALL;
554 /* Stop if not enough space to put next UUID */
555 if ((ptr - data) + sizeof(u16) > len) {
556 uuids_start[1] = EIR_UUID16_SOME;
560 *ptr++ = (uuid16 & 0x00ff);
561 *ptr++ = (uuid16 & 0xff00) >> 8;
562 uuids_start[0] += sizeof(uuid16);
568 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
570 u8 *ptr = data, *uuids_start = NULL;
571 struct bt_uuid *uuid;
576 list_for_each_entry(uuid, &hdev->uuids, list) {
577 if (uuid->size != 32)
583 uuids_start[1] = EIR_UUID32_ALL;
587 /* Stop if not enough space to put next UUID */
588 if ((ptr - data) + sizeof(u32) > len) {
589 uuids_start[1] = EIR_UUID32_SOME;
593 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
595 uuids_start[0] += sizeof(u32);
601 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
603 u8 *ptr = data, *uuids_start = NULL;
604 struct bt_uuid *uuid;
609 list_for_each_entry(uuid, &hdev->uuids, list) {
610 if (uuid->size != 128)
616 uuids_start[1] = EIR_UUID128_ALL;
620 /* Stop if not enough space to put next UUID */
621 if ((ptr - data) + 16 > len) {
622 uuids_start[1] = EIR_UUID128_SOME;
626 memcpy(ptr, uuid->uuid, 16);
628 uuids_start[0] += 16;
634 static void create_eir(struct hci_dev *hdev, u8 *data)
639 name_len = strlen(hdev->dev_name);
645 ptr[1] = EIR_NAME_SHORT;
647 ptr[1] = EIR_NAME_COMPLETE;
649 /* EIR Data length */
650 ptr[0] = name_len + 1;
652 memcpy(ptr + 2, hdev->dev_name, name_len);
654 ptr += (name_len + 2);
657 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
659 ptr[1] = EIR_TX_POWER;
660 ptr[2] = (u8) hdev->inq_tx_power;
665 if (hdev->devid_source > 0) {
667 ptr[1] = EIR_DEVICE_ID;
669 put_unaligned_le16(hdev->devid_source, ptr + 2);
670 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
671 put_unaligned_le16(hdev->devid_product, ptr + 6);
672 put_unaligned_le16(hdev->devid_version, ptr + 8);
677 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
678 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
679 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
682 void __hci_req_update_eir(struct hci_request *req)
684 struct hci_dev *hdev = req->hdev;
685 struct hci_cp_write_eir cp;
687 if (!hdev_is_powered(hdev))
690 if (!lmp_ext_inq_capable(hdev))
693 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
696 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
699 memset(&cp, 0, sizeof(cp));
701 create_eir(hdev, cp.data);
703 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
706 memcpy(hdev->eir, cp.data, sizeof(cp.data));
708 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
711 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
713 struct hci_dev *hdev = req->hdev;
715 if (hdev->scanning_paused) {
716 bt_dev_dbg(hdev, "Scanning is paused for suspend");
721 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
723 if (use_ext_scan(hdev)) {
724 struct hci_cp_le_set_ext_scan_enable cp;
726 memset(&cp, 0, sizeof(cp));
727 cp.enable = LE_SCAN_DISABLE;
728 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
731 struct hci_cp_le_set_scan_enable cp;
733 memset(&cp, 0, sizeof(cp));
734 cp.enable = LE_SCAN_DISABLE;
735 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
738 /* Disable address resolution */
739 if (use_ll_privacy(hdev) &&
740 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
741 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
744 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
748 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
751 struct hci_cp_le_del_from_accept_list cp;
753 cp.bdaddr_type = bdaddr_type;
754 bacpy(&cp.bdaddr, bdaddr);
756 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
758 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
760 if (use_ll_privacy(req->hdev) &&
761 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
764 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
766 struct hci_cp_le_del_from_resolv_list cp;
768 cp.bdaddr_type = bdaddr_type;
769 bacpy(&cp.bdaddr, bdaddr);
771 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
777 /* Adds connection to accept list if needed. On error, returns -1. */
778 static int add_to_accept_list(struct hci_request *req,
779 struct hci_conn_params *params, u8 *num_entries,
782 struct hci_cp_le_add_to_accept_list cp;
783 struct hci_dev *hdev = req->hdev;
785 /* Already in accept list */
786 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr,
790 /* Select filter policy to accept all advertising */
791 if (*num_entries >= hdev->le_accept_list_size)
794 /* Accept list can not be used with RPAs */
796 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
797 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
801 /* During suspend, only wakeable devices can be in accept list */
802 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
803 params->current_flags))
807 cp.bdaddr_type = params->addr_type;
808 bacpy(&cp.bdaddr, ¶ms->addr);
810 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
812 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
814 if (use_ll_privacy(hdev) &&
815 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
818 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
821 struct hci_cp_le_add_to_resolv_list cp;
823 cp.bdaddr_type = params->addr_type;
824 bacpy(&cp.bdaddr, ¶ms->addr);
825 memcpy(cp.peer_irk, irk->val, 16);
827 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
828 memcpy(cp.local_irk, hdev->irk, 16);
830 memset(cp.local_irk, 0, 16);
832 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
840 static u8 update_accept_list(struct hci_request *req)
842 struct hci_dev *hdev = req->hdev;
843 struct hci_conn_params *params;
844 struct bdaddr_list *b;
846 bool pend_conn, pend_report;
847 /* We allow usage of accept list even with RPAs in suspend. In the worst
848 * case, we won't be able to wake from devices that use the privacy1.2
849 * features. Additionally, once we support privacy1.2 and IRK
850 * offloading, we can update this to also check for those conditions.
852 bool allow_rpa = hdev->suspended;
854 if (use_ll_privacy(hdev) &&
855 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
858 /* Go through the current accept list programmed into the
859 * controller one by one and check if that address is still
860 * in the list of pending connections or list of devices to
861 * report. If not present in either list, then queue the
862 * command to remove it from the controller.
864 list_for_each_entry(b, &hdev->le_accept_list, list) {
865 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
868 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
872 /* If the device is not likely to connect or report,
873 * remove it from the accept list.
875 if (!pend_conn && !pend_report) {
876 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
880 /* Accept list can not be used with RPAs */
882 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
883 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
890 /* Since all no longer valid accept list entries have been
891 * removed, walk through the list of pending connections
892 * and ensure that any new device gets programmed into
895 * If the list of the devices is larger than the list of
896 * available accept list entries in the controller, then
897 * just abort and return filer policy value to not use the
900 list_for_each_entry(params, &hdev->pend_le_conns, action) {
901 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
905 /* After adding all new pending connections, walk through
906 * the list of pending reports and also add these to the
907 * accept list if there is still space. Abort if space runs out.
909 list_for_each_entry(params, &hdev->pend_le_reports, action) {
910 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
914 /* Use the allowlist unless the following conditions are all true:
915 * - We are not currently suspending
916 * - There are 1 or more ADV monitors registered and it's not offloaded
917 * - Interleaved scanning is not currently using the allowlist
919 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
920 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
921 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
924 /* Select filter policy to use accept list */
928 static bool scan_use_rpa(struct hci_dev *hdev)
930 return hci_dev_test_flag(hdev, HCI_PRIVACY);
933 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
934 u16 window, u8 own_addr_type, u8 filter_policy,
935 bool filter_dup, bool addr_resolv)
937 struct hci_dev *hdev = req->hdev;
939 if (hdev->scanning_paused) {
940 bt_dev_dbg(hdev, "Scanning is paused for suspend");
944 if (use_ll_privacy(hdev) &&
945 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
949 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
952 /* Use ext scanning if set ext scan param and ext scan enable is
955 if (use_ext_scan(hdev)) {
956 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
957 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
958 struct hci_cp_le_scan_phy_params *phy_params;
959 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
962 ext_param_cp = (void *)data;
963 phy_params = (void *)ext_param_cp->data;
965 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
966 ext_param_cp->own_addr_type = own_addr_type;
967 ext_param_cp->filter_policy = filter_policy;
969 plen = sizeof(*ext_param_cp);
971 if (scan_1m(hdev) || scan_2m(hdev)) {
972 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
974 memset(phy_params, 0, sizeof(*phy_params));
975 phy_params->type = type;
976 phy_params->interval = cpu_to_le16(interval);
977 phy_params->window = cpu_to_le16(window);
979 plen += sizeof(*phy_params);
983 if (scan_coded(hdev)) {
984 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
986 memset(phy_params, 0, sizeof(*phy_params));
987 phy_params->type = type;
988 phy_params->interval = cpu_to_le16(interval);
989 phy_params->window = cpu_to_le16(window);
991 plen += sizeof(*phy_params);
995 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
998 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
999 ext_enable_cp.enable = LE_SCAN_ENABLE;
1000 ext_enable_cp.filter_dup = filter_dup;
1002 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
1003 sizeof(ext_enable_cp), &ext_enable_cp);
1005 struct hci_cp_le_set_scan_param param_cp;
1006 struct hci_cp_le_set_scan_enable enable_cp;
1008 memset(¶m_cp, 0, sizeof(param_cp));
1009 param_cp.type = type;
1010 param_cp.interval = cpu_to_le16(interval);
1011 param_cp.window = cpu_to_le16(window);
1012 param_cp.own_address_type = own_addr_type;
1013 param_cp.filter_policy = filter_policy;
1014 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1017 memset(&enable_cp, 0, sizeof(enable_cp));
1018 enable_cp.enable = LE_SCAN_ENABLE;
1019 enable_cp.filter_dup = filter_dup;
1020 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1025 /* Returns true if an le connection is in the scanning state */
1026 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1028 struct hci_conn_hash *h = &hdev->conn_hash;
1033 list_for_each_entry_rcu(c, &h->list, list) {
1034 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1035 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1046 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1047 * controller based address resolution to be able to reconfigure
1050 void hci_req_add_le_passive_scan(struct hci_request *req)
1052 struct hci_dev *hdev = req->hdev;
1055 u16 window, interval;
1056 /* Default is to enable duplicates filter */
1057 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1058 /* Background scanning should run with address resolution */
1059 bool addr_resolv = true;
1061 if (hdev->scanning_paused) {
1062 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1066 /* Set require_privacy to false since no SCAN_REQ are send
1067 * during passive scanning. Not using an non-resolvable address
1068 * here is important so that peer devices using direct
1069 * advertising with our address will be correctly reported
1070 * by the controller.
1072 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1076 if (hdev->enable_advmon_interleave_scan &&
1077 __hci_update_interleaved_scan(hdev))
1080 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1081 /* Adding or removing entries from the accept list must
1082 * happen before enabling scanning. The controller does
1083 * not allow accept list modification while scanning.
1085 filter_policy = update_accept_list(req);
1087 /* When the controller is using random resolvable addresses and
1088 * with that having LE privacy enabled, then controllers with
1089 * Extended Scanner Filter Policies support can now enable support
1090 * for handling directed advertising.
1092 * So instead of using filter polices 0x00 (no accept list)
1093 * and 0x01 (accept list enabled) use the new filter policies
1094 * 0x02 (no accept list) and 0x03 (accept list enabled).
1096 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1097 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1098 filter_policy |= 0x02;
1100 if (hdev->suspended) {
1101 window = hdev->le_scan_window_suspend;
1102 interval = hdev->le_scan_int_suspend;
1104 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1105 } else if (hci_is_le_conn_scanning(hdev)) {
1106 window = hdev->le_scan_window_connect;
1107 interval = hdev->le_scan_int_connect;
1108 } else if (hci_is_adv_monitoring(hdev)) {
1109 window = hdev->le_scan_window_adv_monitor;
1110 interval = hdev->le_scan_int_adv_monitor;
1112 /* Disable duplicates filter when scanning for advertisement
1113 * monitor for the following reasons.
1115 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
1116 * controllers ignore RSSI_Sampling_Period when the duplicates
1117 * filter is enabled.
1119 * For SW pattern filtering, when we're not doing interleaved
1120 * scanning, it is necessary to disable duplicates filter,
1121 * otherwise hosts can only receive one advertisement and it's
1122 * impossible to know if a peer is still in range.
1124 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
1126 window = hdev->le_scan_window;
1127 interval = hdev->le_scan_interval;
1130 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1132 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1133 own_addr_type, filter_policy, filter_dup,
1137 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1139 struct adv_info *adv_instance;
1141 /* Instance 0x00 always set local name */
1142 if (instance == 0x00)
1145 adv_instance = hci_find_adv_instance(hdev, instance);
1149 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1150 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1153 return adv_instance->scan_rsp_len ? true : false;
1156 static void hci_req_clear_event_filter(struct hci_request *req)
1158 struct hci_cp_set_event_filter f;
1160 if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
1163 if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
1164 memset(&f, 0, sizeof(f));
1165 f.flt_type = HCI_FLT_CLEAR_ALL;
1166 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1170 static void hci_req_set_event_filter(struct hci_request *req)
1172 struct bdaddr_list_with_flags *b;
1173 struct hci_cp_set_event_filter f;
1174 struct hci_dev *hdev = req->hdev;
1175 u8 scan = SCAN_DISABLED;
1176 bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1178 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1181 /* Always clear event filter when starting */
1182 hci_req_clear_event_filter(req);
1184 list_for_each_entry(b, &hdev->accept_list, list) {
1185 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1189 memset(&f, 0, sizeof(f));
1190 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1191 f.flt_type = HCI_FLT_CONN_SETUP;
1192 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1193 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1195 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1196 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1200 if (scan && !scanning) {
1201 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1202 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1203 } else if (!scan && scanning) {
1204 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1205 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1209 static void cancel_adv_timeout(struct hci_dev *hdev)
1211 if (hdev->adv_instance_timeout) {
1212 hdev->adv_instance_timeout = 0;
1213 cancel_delayed_work(&hdev->adv_instance_expire);
1217 /* This function requires the caller holds hdev->lock */
1218 void __hci_req_pause_adv_instances(struct hci_request *req)
1220 bt_dev_dbg(req->hdev, "Pausing advertising instances");
1222 /* Call to disable any advertisements active on the controller.
1223 * This will succeed even if no advertisements are configured.
1225 __hci_req_disable_advertising(req);
1227 /* If we are using software rotation, pause the loop */
1228 if (!ext_adv_capable(req->hdev))
1229 cancel_adv_timeout(req->hdev);
1232 /* This function requires the caller holds hdev->lock */
1233 static void __hci_req_resume_adv_instances(struct hci_request *req)
1235 struct adv_info *adv;
1237 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1239 if (ext_adv_capable(req->hdev)) {
1240 /* Call for each tracked instance to be re-enabled */
1241 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1242 __hci_req_enable_ext_advertising(req,
1247 /* Schedule for most recent instance to be restarted and begin
1248 * the software rotation loop
1250 __hci_req_schedule_adv_instance(req,
1251 req->hdev->cur_adv_instance,
1256 /* This function requires the caller holds hdev->lock */
1257 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1259 struct hci_request req;
1261 hci_req_init(&req, hdev);
1262 __hci_req_resume_adv_instances(&req);
1264 return hci_req_run(&req, NULL);
1267 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1269 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1271 if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1272 test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1273 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1274 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1275 wake_up(&hdev->suspend_wait_q);
1278 if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1279 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1280 wake_up(&hdev->suspend_wait_q);
1284 static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1287 struct hci_dev *hdev = req->hdev;
1289 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1290 case HCI_ADV_MONITOR_EXT_MSFT:
1291 msft_req_add_set_filter_enable(req, enable);
1297 /* No need to block when enabling since it's on resume path */
1298 if (hdev->suspended && !enable)
1299 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1302 /* Call with hci_dev_lock */
1303 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1306 struct hci_conn *conn;
1307 struct hci_request req;
1309 int disconnect_counter;
1311 if (next == hdev->suspend_state) {
1312 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1316 hdev->suspend_state = next;
1317 hci_req_init(&req, hdev);
1319 if (next == BT_SUSPEND_DISCONNECT) {
1320 /* Mark device as suspended */
1321 hdev->suspended = true;
1323 /* Pause discovery if not already stopped */
1324 old_state = hdev->discovery.state;
1325 if (old_state != DISCOVERY_STOPPED) {
1326 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1327 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1328 queue_work(hdev->req_workqueue, &hdev->discov_update);
1331 hdev->discovery_paused = true;
1332 hdev->discovery_old_state = old_state;
1334 /* Stop directed advertising */
1335 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1337 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1338 cancel_delayed_work(&hdev->discov_off);
1339 queue_delayed_work(hdev->req_workqueue,
1340 &hdev->discov_off, 0);
1343 /* Pause other advertisements */
1344 if (hdev->adv_instance_cnt)
1345 __hci_req_pause_adv_instances(&req);
1347 hdev->advertising_paused = true;
1348 hdev->advertising_old_state = old_state;
1350 /* Disable page scan if enabled */
1351 if (test_bit(HCI_PSCAN, &hdev->flags)) {
1352 page_scan = SCAN_DISABLED;
1353 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1355 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1358 /* Disable LE passive scan if enabled */
1359 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1360 cancel_interleave_scan(hdev);
1361 hci_req_add_le_scan_disable(&req, false);
1364 /* Disable advertisement filters */
1365 hci_req_add_set_adv_filter_enable(&req, false);
1367 /* Prevent disconnects from causing scanning to be re-enabled */
1368 hdev->scanning_paused = true;
1370 /* Run commands before disconnecting */
1371 hci_req_run(&req, suspend_req_complete);
1373 disconnect_counter = 0;
1374 /* Soft disconnect everything (power off) */
1375 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1376 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1377 disconnect_counter++;
1380 if (disconnect_counter > 0) {
1382 "Had %d disconnects. Will wait on them",
1383 disconnect_counter);
1384 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1386 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1387 /* Unpause to take care of updating scanning params */
1388 hdev->scanning_paused = false;
1389 /* Enable event filter for paired devices */
1390 hci_req_set_event_filter(&req);
1391 /* Enable passive scan at lower duty cycle */
1392 __hci_update_background_scan(&req);
1393 /* Pause scan changes again. */
1394 hdev->scanning_paused = true;
1395 hci_req_run(&req, suspend_req_complete);
1397 hdev->suspended = false;
1398 hdev->scanning_paused = false;
1400 /* Clear any event filters and restore scan state */
1401 hci_req_clear_event_filter(&req);
1402 __hci_req_update_scan(&req);
1404 /* Reset passive/background scanning to normal */
1405 __hci_update_background_scan(&req);
1406 /* Enable all of the advertisement filters */
1407 hci_req_add_set_adv_filter_enable(&req, true);
1409 /* Unpause directed advertising */
1410 hdev->advertising_paused = false;
1411 if (hdev->advertising_old_state) {
1412 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1413 hdev->suspend_tasks);
1414 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1415 queue_work(hdev->req_workqueue,
1416 &hdev->discoverable_update);
1417 hdev->advertising_old_state = 0;
1420 /* Resume other advertisements */
1421 if (hdev->adv_instance_cnt)
1422 __hci_req_resume_adv_instances(&req);
1424 /* Unpause discovery */
1425 hdev->discovery_paused = false;
1426 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1427 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1428 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1429 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1430 queue_work(hdev->req_workqueue, &hdev->discov_update);
1433 hci_req_run(&req, suspend_req_complete);
1436 hdev->suspend_state = next;
1439 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1440 wake_up(&hdev->suspend_wait_q);
1443 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1445 return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1448 void __hci_req_disable_advertising(struct hci_request *req)
1450 if (ext_adv_capable(req->hdev)) {
1451 __hci_req_disable_ext_adv_instance(req, 0x00);
1456 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1460 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1463 struct adv_info *adv_instance;
1465 if (instance == 0x00) {
1466 /* Instance 0 always manages the "Tx Power" and "Flags"
1469 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1471 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1472 * corresponds to the "connectable" instance flag.
1474 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1475 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1477 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1478 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1479 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1480 flags |= MGMT_ADV_FLAG_DISCOV;
1485 adv_instance = hci_find_adv_instance(hdev, instance);
1487 /* Return 0 when we got an invalid instance identifier. */
1491 return adv_instance->flags;
1494 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1496 /* If privacy is not enabled don't use RPA */
1497 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1500 /* If basic privacy mode is enabled use RPA */
1501 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1504 /* If limited privacy mode is enabled don't use RPA if we're
1505 * both discoverable and bondable.
1507 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1508 hci_dev_test_flag(hdev, HCI_BONDABLE))
1511 /* We're neither bondable nor discoverable in the limited
1512 * privacy mode, therefore use RPA.
1517 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1519 /* If there is no connection we are OK to advertise. */
1520 if (hci_conn_num(hdev, LE_LINK) == 0)
1523 /* Check le_states if there is any connection in peripheral role. */
1524 if (hdev->conn_hash.le_num_peripheral > 0) {
1525 /* Peripheral connection state and non connectable mode bit 20.
1527 if (!connectable && !(hdev->le_states[2] & 0x10))
1530 /* Peripheral connection state and connectable mode bit 38
1531 * and scannable bit 21.
1533 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1534 !(hdev->le_states[2] & 0x20)))
1538 /* Check le_states if there is any connection in central role. */
1539 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1540 /* Central connection state and non connectable mode bit 18. */
1541 if (!connectable && !(hdev->le_states[2] & 0x02))
1544 /* Central connection state and connectable mode bit 35 and
1547 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1548 !(hdev->le_states[2] & 0x08)))
1555 void __hci_req_enable_advertising(struct hci_request *req)
1557 struct hci_dev *hdev = req->hdev;
1558 struct adv_info *adv_instance;
1559 struct hci_cp_le_set_adv_param cp;
1560 u8 own_addr_type, enable = 0x01;
1562 u16 adv_min_interval, adv_max_interval;
1565 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1566 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1568 /* If the "connectable" instance flag was not set, then choose between
1569 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1571 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1572 mgmt_get_connectable(hdev);
1574 if (!is_advertising_allowed(hdev, connectable))
1577 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1578 __hci_req_disable_advertising(req);
1580 /* Clear the HCI_LE_ADV bit temporarily so that the
1581 * hci_update_random_address knows that it's safe to go ahead
1582 * and write a new random address. The flag will be set back on
1583 * as soon as the SET_ADV_ENABLE HCI command completes.
1585 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1587 /* Set require_privacy to true only when non-connectable
1588 * advertising is used. In that case it is fine to use a
1589 * non-resolvable private address.
1591 if (hci_update_random_address(req, !connectable,
1592 adv_use_rpa(hdev, flags),
1593 &own_addr_type) < 0)
1596 memset(&cp, 0, sizeof(cp));
1599 adv_min_interval = adv_instance->min_interval;
1600 adv_max_interval = adv_instance->max_interval;
1602 adv_min_interval = hdev->le_adv_min_interval;
1603 adv_max_interval = hdev->le_adv_max_interval;
1607 cp.type = LE_ADV_IND;
1609 if (adv_cur_instance_is_scannable(hdev))
1610 cp.type = LE_ADV_SCAN_IND;
1612 cp.type = LE_ADV_NONCONN_IND;
1614 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1615 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1616 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1617 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1621 cp.min_interval = cpu_to_le16(adv_min_interval);
1622 cp.max_interval = cpu_to_le16(adv_max_interval);
1625 cp.filter_policy = hdev->adv_filter_policy;
1626 cp.type = hdev->adv_type;
1629 cp.own_address_type = own_addr_type;
1630 cp.channel_map = hdev->le_adv_channel_map;
1632 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1634 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1637 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1640 size_t complete_len;
1642 /* no space left for name (+ NULL + type + len) */
1643 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1646 /* use complete name if present and fits */
1647 complete_len = strlen(hdev->dev_name);
1648 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1649 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1650 hdev->dev_name, complete_len + 1);
1652 /* use short name if present */
1653 short_len = strlen(hdev->short_name);
1655 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1656 hdev->short_name, short_len + 1);
1658 /* use shortened full name if present, we already know that name
1659 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1662 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1664 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1665 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1667 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1674 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1676 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1679 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1681 u8 scan_rsp_len = 0;
1683 if (hdev->appearance)
1684 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1686 return append_local_name(hdev, ptr, scan_rsp_len);
1689 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1692 struct adv_info *adv_instance;
1694 u8 scan_rsp_len = 0;
1696 adv_instance = hci_find_adv_instance(hdev, instance);
1700 instance_flags = adv_instance->flags;
1702 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
1703 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1705 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1706 adv_instance->scan_rsp_len);
1708 scan_rsp_len += adv_instance->scan_rsp_len;
1710 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1711 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1713 return scan_rsp_len;
1716 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1718 struct hci_dev *hdev = req->hdev;
1721 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1724 if (ext_adv_capable(hdev)) {
1726 struct hci_cp_le_set_ext_scan_rsp_data cp;
1727 u8 data[HCI_MAX_EXT_AD_LENGTH];
1730 memset(&pdu, 0, sizeof(pdu));
1733 len = create_instance_scan_rsp_data(hdev, instance,
1736 len = create_default_scan_rsp_data(hdev, pdu.data);
1738 /* Advertising scan response data is handled in bluez.
1739 * This value will be updated only when application request the update
1740 * using adapter_set_scan_rsp_data()
1745 if (hdev->scan_rsp_data_len == len &&
1746 !memcmp(pdu.data, hdev->scan_rsp_data, len))
1749 memcpy(hdev->scan_rsp_data, pdu.data, len);
1750 hdev->scan_rsp_data_len = len;
1752 pdu.cp.handle = instance;
1753 pdu.cp.length = len;
1754 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1755 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1757 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1758 sizeof(pdu.cp) + len, &pdu.cp);
1761 struct hci_cp_le_set_scan_rsp_data cp;
1763 memset(&cp, 0, sizeof(cp));
1766 len = create_instance_scan_rsp_data(hdev, instance,
1769 len = create_default_scan_rsp_data(hdev, cp.data);
1771 /* Advertising scan response data is handled in bluez.
1772 * This value will be updated only when application request the update
1773 * using adapter_set_scan_rsp_data()
1777 if (hdev->scan_rsp_data_len == len &&
1778 !memcmp(cp.data, hdev->scan_rsp_data, len))
1781 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1782 hdev->scan_rsp_data_len = len;
1786 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1791 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1793 struct adv_info *adv_instance = NULL;
1794 u8 ad_len = 0, flags = 0;
1797 /* Return 0 when the current instance identifier is invalid. */
1799 adv_instance = hci_find_adv_instance(hdev, instance);
1804 instance_flags = get_adv_instance_flags(hdev, instance);
1806 /* If instance already has the flags set skip adding it once
1809 if (adv_instance && eir_get_data(adv_instance->adv_data,
1810 adv_instance->adv_data_len, EIR_FLAGS,
1814 /* The Add Advertising command allows userspace to set both the general
1815 * and limited discoverable flags.
1817 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1818 flags |= LE_AD_GENERAL;
1820 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1821 flags |= LE_AD_LIMITED;
1823 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1824 flags |= LE_AD_NO_BREDR;
1826 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1827 /* If a discovery flag wasn't provided, simply use the global
1831 flags |= mgmt_get_adv_discov_flags(hdev);
1833 /* If flags would still be empty, then there is no need to
1834 * include the "Flags" AD field".
1848 memcpy(ptr, adv_instance->adv_data,
1849 adv_instance->adv_data_len);
1850 ad_len += adv_instance->adv_data_len;
1851 ptr += adv_instance->adv_data_len;
1854 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1857 if (ext_adv_capable(hdev)) {
1859 adv_tx_power = adv_instance->tx_power;
1861 adv_tx_power = hdev->adv_tx_power;
1863 adv_tx_power = hdev->adv_tx_power;
1866 /* Provide Tx Power only if we can provide a valid value for it */
1867 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1869 ptr[1] = EIR_TX_POWER;
1870 ptr[2] = (u8)adv_tx_power;
1880 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1882 struct hci_dev *hdev = req->hdev;
1885 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1888 if (ext_adv_capable(hdev)) {
1890 struct hci_cp_le_set_ext_adv_data cp;
1891 u8 data[HCI_MAX_EXT_AD_LENGTH];
1894 memset(&pdu, 0, sizeof(pdu));
1896 len = create_instance_adv_data(hdev, instance, pdu.data);
1899 /* Bluez will handle the advertising data including the flag and tx
1900 * power. This value will be updated only when application request the
1901 * update using adapter_set_advertising_data().
1905 /* There's nothing to do if the data hasn't changed */
1906 if (hdev->adv_data_len == len &&
1907 memcmp(pdu.data, hdev->adv_data, len) == 0)
1910 memcpy(hdev->adv_data, pdu.data, len);
1911 hdev->adv_data_len = len;
1913 pdu.cp.length = len;
1914 pdu.cp.handle = instance;
1915 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1916 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1918 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1919 sizeof(pdu.cp) + len, &pdu.cp);
1922 struct hci_cp_le_set_adv_data cp;
1924 memset(&cp, 0, sizeof(cp));
1926 len = create_instance_adv_data(hdev, instance, cp.data);
1929 /* Bluez will handle the advertising data including the flag and tx
1930 * power. This value will be updated only when application request the
1931 * update using adapter_set_advertising_data().
1935 /* There's nothing to do if the data hasn't changed */
1936 if (hdev->adv_data_len == len &&
1937 memcmp(cp.data, hdev->adv_data, len) == 0)
1940 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1941 hdev->adv_data_len = len;
1945 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1950 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1952 struct hci_request req;
1954 hci_req_init(&req, hdev);
1955 __hci_req_update_adv_data(&req, instance);
1957 return hci_req_run(&req, NULL);
1960 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1963 BT_DBG("%s status %u", hdev->name, status);
1966 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1968 struct hci_request req;
1971 if (!use_ll_privacy(hdev) &&
1972 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1975 hci_req_init(&req, hdev);
1977 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1979 hci_req_run(&req, enable_addr_resolution_complete);
1982 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1984 bt_dev_dbg(hdev, "status %u", status);
1987 void hci_req_reenable_advertising(struct hci_dev *hdev)
1989 struct hci_request req;
1991 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1992 list_empty(&hdev->adv_instances))
1995 hci_req_init(&req, hdev);
1997 if (hdev->cur_adv_instance) {
1998 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
2001 if (ext_adv_capable(hdev)) {
2002 __hci_req_start_ext_adv(&req, 0x00);
2004 __hci_req_update_adv_data(&req, 0x00);
2005 __hci_req_update_scan_rsp_data(&req, 0x00);
2006 __hci_req_enable_advertising(&req);
2010 hci_req_run(&req, adv_enable_complete);
2013 static void adv_timeout_expire(struct work_struct *work)
2015 struct hci_dev *hdev = container_of(work, struct hci_dev,
2016 adv_instance_expire.work);
2018 struct hci_request req;
2021 bt_dev_dbg(hdev, "");
2025 hdev->adv_instance_timeout = 0;
2027 instance = hdev->cur_adv_instance;
2028 if (instance == 0x00)
2031 hci_req_init(&req, hdev);
2033 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
2035 if (list_empty(&hdev->adv_instances))
2036 __hci_req_disable_advertising(&req);
2038 hci_req_run(&req, NULL);
2041 hci_dev_unlock(hdev);
2044 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
2047 struct hci_dev *hdev = req->hdev;
2052 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2053 hci_req_add_le_scan_disable(req, false);
2054 hci_req_add_le_passive_scan(req);
2056 switch (hdev->interleave_scan_state) {
2057 case INTERLEAVE_SCAN_ALLOWLIST:
2058 bt_dev_dbg(hdev, "next state: allowlist");
2059 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2061 case INTERLEAVE_SCAN_NO_FILTER:
2062 bt_dev_dbg(hdev, "next state: no filter");
2063 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
2065 case INTERLEAVE_SCAN_NONE:
2066 BT_ERR("unexpected error");
2070 hci_dev_unlock(hdev);
2075 static void interleave_scan_work(struct work_struct *work)
2077 struct hci_dev *hdev = container_of(work, struct hci_dev,
2078 interleave_scan.work);
2080 unsigned long timeout;
2082 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2083 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2084 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2085 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2087 bt_dev_err(hdev, "unexpected error");
2091 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2092 HCI_CMD_TIMEOUT, &status);
2094 /* Don't continue interleaving if it was canceled */
2095 if (is_interleave_scanning(hdev))
2096 queue_delayed_work(hdev->req_workqueue,
2097 &hdev->interleave_scan, timeout);
2100 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2101 bool use_rpa, struct adv_info *adv_instance,
2102 u8 *own_addr_type, bdaddr_t *rand_addr)
2106 bacpy(rand_addr, BDADDR_ANY);
2108 /* If privacy is enabled use a resolvable private address. If
2109 * current RPA has expired then generate a new one.
2112 /* If Controller supports LL Privacy use own address type is
2115 if (use_ll_privacy(hdev) &&
2116 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2117 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2119 *own_addr_type = ADDR_LE_DEV_RANDOM;
2122 if (adv_rpa_valid(adv_instance))
2125 if (rpa_valid(hdev))
2129 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2131 bt_dev_err(hdev, "failed to generate new RPA");
2135 bacpy(rand_addr, &hdev->rpa);
2140 /* In case of required privacy without resolvable private address,
2141 * use an non-resolvable private address. This is useful for
2142 * non-connectable advertising.
2144 if (require_privacy) {
2148 /* The non-resolvable private address is generated
2149 * from random six bytes with the two most significant
2152 get_random_bytes(&nrpa, 6);
2155 /* The non-resolvable private address shall not be
2156 * equal to the public address.
2158 if (bacmp(&hdev->bdaddr, &nrpa))
2162 *own_addr_type = ADDR_LE_DEV_RANDOM;
2163 bacpy(rand_addr, &nrpa);
2168 /* No privacy so use a public address. */
2169 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2174 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2176 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2179 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2181 struct hci_dev *hdev = req->hdev;
2183 /* If we're advertising or initiating an LE connection we can't
2184 * go ahead and change the random address at this time. This is
2185 * because the eventual initiator address used for the
2186 * subsequently created connection will be undefined (some
2187 * controllers use the new address and others the one we had
2188 * when the operation started).
2190 * In this kind of scenario skip the update and let the random
2191 * address be updated at the next cycle.
2193 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2194 hci_lookup_le_connect(hdev)) {
2195 bt_dev_dbg(hdev, "Deferring random address update");
2196 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2200 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2203 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2205 struct hci_cp_le_set_ext_adv_params cp;
2206 struct hci_dev *hdev = req->hdev;
2209 bdaddr_t random_addr;
2212 struct adv_info *adv_instance;
2216 adv_instance = hci_find_adv_instance(hdev, instance);
2220 adv_instance = NULL;
2223 flags = get_adv_instance_flags(hdev, instance);
2225 /* If the "connectable" instance flag was not set, then choose between
2226 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2228 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2229 mgmt_get_connectable(hdev);
2231 if (!is_advertising_allowed(hdev, connectable))
2234 /* Set require_privacy to true only when non-connectable
2235 * advertising is used. In that case it is fine to use a
2236 * non-resolvable private address.
2238 err = hci_get_random_address(hdev, !connectable,
2239 adv_use_rpa(hdev, flags), adv_instance,
2240 &own_addr_type, &random_addr);
2244 memset(&cp, 0, sizeof(cp));
2247 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2248 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2249 cp.tx_power = adv_instance->tx_power;
2251 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2252 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2253 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2256 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2260 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2262 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2263 } else if (adv_instance_is_scannable(hdev, instance) ||
2264 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
2266 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2268 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2271 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2273 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2276 cp.own_addr_type = own_addr_type;
2277 cp.channel_map = hdev->le_adv_channel_map;
2278 cp.handle = instance;
2280 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2281 cp.primary_phy = HCI_ADV_PHY_1M;
2282 cp.secondary_phy = HCI_ADV_PHY_2M;
2283 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2284 cp.primary_phy = HCI_ADV_PHY_CODED;
2285 cp.secondary_phy = HCI_ADV_PHY_CODED;
2287 /* In all other cases use 1M */
2288 cp.primary_phy = HCI_ADV_PHY_1M;
2289 cp.secondary_phy = HCI_ADV_PHY_1M;
2292 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2294 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2295 bacmp(&random_addr, BDADDR_ANY)) {
2296 struct hci_cp_le_set_adv_set_rand_addr cp;
2298 /* Check if random address need to be updated */
2300 if (!bacmp(&random_addr, &adv_instance->random_addr))
2303 if (!bacmp(&random_addr, &hdev->random_addr))
2305 /* Instance 0x00 doesn't have an adv_info, instead it
2306 * uses hdev->random_addr to track its address so
2307 * whenever it needs to be updated this also set the
2308 * random address since hdev->random_addr is shared with
2309 * scan state machine.
2311 set_random_addr(req, &random_addr);
2314 memset(&cp, 0, sizeof(cp));
2316 cp.handle = instance;
2317 bacpy(&cp.bdaddr, &random_addr);
2320 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2327 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2329 struct hci_dev *hdev = req->hdev;
2330 struct hci_cp_le_set_ext_adv_enable *cp;
2331 struct hci_cp_ext_adv_set *adv_set;
2332 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2333 struct adv_info *adv_instance;
2336 adv_instance = hci_find_adv_instance(hdev, instance);
2340 adv_instance = NULL;
2344 adv_set = (void *) cp->data;
2346 memset(cp, 0, sizeof(*cp));
2349 cp->num_of_sets = 0x01;
2351 memset(adv_set, 0, sizeof(*adv_set));
2353 adv_set->handle = instance;
2355 /* Set duration per instance since controller is responsible for
2358 if (adv_instance && adv_instance->timeout) {
2359 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2361 /* Time = N * 10 ms */
2362 adv_set->duration = cpu_to_le16(duration / 10);
2365 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2366 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2372 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2374 struct hci_dev *hdev = req->hdev;
2375 struct hci_cp_le_set_ext_adv_enable *cp;
2376 struct hci_cp_ext_adv_set *adv_set;
2377 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2380 /* If request specifies an instance that doesn't exist, fail */
2381 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2384 memset(data, 0, sizeof(data));
2387 adv_set = (void *)cp->data;
2389 /* Instance 0x00 indicates all advertising instances will be disabled */
2390 cp->num_of_sets = !!instance;
2393 adv_set->handle = instance;
2395 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2396 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2401 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2403 struct hci_dev *hdev = req->hdev;
2405 /* If request specifies an instance that doesn't exist, fail */
2406 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2409 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2414 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2416 struct hci_dev *hdev = req->hdev;
2417 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2420 /* If instance isn't pending, the chip knows about it, and it's safe to
2423 if (adv_instance && !adv_instance->pending)
2424 __hci_req_disable_ext_adv_instance(req, instance);
2426 err = __hci_req_setup_ext_adv_instance(req, instance);
2430 __hci_req_update_scan_rsp_data(req, instance);
2431 __hci_req_enable_ext_advertising(req, instance);
2436 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2439 struct hci_dev *hdev = req->hdev;
2440 struct adv_info *adv_instance = NULL;
2443 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2444 list_empty(&hdev->adv_instances))
2447 if (hdev->adv_instance_timeout)
2450 adv_instance = hci_find_adv_instance(hdev, instance);
2454 /* A zero timeout means unlimited advertising. As long as there is
2455 * only one instance, duration should be ignored. We still set a timeout
2456 * in case further instances are being added later on.
2458 * If the remaining lifetime of the instance is more than the duration
2459 * then the timeout corresponds to the duration, otherwise it will be
2460 * reduced to the remaining instance lifetime.
2462 if (adv_instance->timeout == 0 ||
2463 adv_instance->duration <= adv_instance->remaining_time)
2464 timeout = adv_instance->duration;
2466 timeout = adv_instance->remaining_time;
2468 /* The remaining time is being reduced unless the instance is being
2469 * advertised without time limit.
2471 if (adv_instance->timeout)
2472 adv_instance->remaining_time =
2473 adv_instance->remaining_time - timeout;
2475 /* Only use work for scheduling instances with legacy advertising */
2476 if (!ext_adv_capable(hdev)) {
2477 hdev->adv_instance_timeout = timeout;
2478 queue_delayed_work(hdev->req_workqueue,
2479 &hdev->adv_instance_expire,
2480 msecs_to_jiffies(timeout * 1000));
2483 /* If we're just re-scheduling the same instance again then do not
2484 * execute any HCI commands. This happens when a single instance is
2487 if (!force && hdev->cur_adv_instance == instance &&
2488 hci_dev_test_flag(hdev, HCI_LE_ADV))
2491 hdev->cur_adv_instance = instance;
2492 if (ext_adv_capable(hdev)) {
2493 __hci_req_start_ext_adv(req, instance);
2495 __hci_req_update_adv_data(req, instance);
2496 __hci_req_update_scan_rsp_data(req, instance);
2497 __hci_req_enable_advertising(req);
2503 /* For a single instance:
2504 * - force == true: The instance will be removed even when its remaining
2505 * lifetime is not zero.
2506 * - force == false: the instance will be deactivated but kept stored unless
2507 * the remaining lifetime is zero.
2509 * For instance == 0x00:
2510 * - force == true: All instances will be removed regardless of their timeout
2512 * - force == false: Only instances that have a timeout will be removed.
2514 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2515 struct hci_request *req, u8 instance,
2518 struct adv_info *adv_instance, *n, *next_instance = NULL;
2522 /* Cancel any timeout concerning the removed instance(s). */
2523 if (!instance || hdev->cur_adv_instance == instance)
2524 cancel_adv_timeout(hdev);
2526 /* Get the next instance to advertise BEFORE we remove
2527 * the current one. This can be the same instance again
2528 * if there is only one instance.
2530 if (instance && hdev->cur_adv_instance == instance)
2531 next_instance = hci_get_next_instance(hdev, instance);
2533 if (instance == 0x00) {
2534 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2536 if (!(force || adv_instance->timeout))
2539 rem_inst = adv_instance->instance;
2540 err = hci_remove_adv_instance(hdev, rem_inst);
2542 mgmt_advertising_removed(sk, hdev, rem_inst);
2545 adv_instance = hci_find_adv_instance(hdev, instance);
2547 if (force || (adv_instance && adv_instance->timeout &&
2548 !adv_instance->remaining_time)) {
2549 /* Don't advertise a removed instance. */
2550 if (next_instance &&
2551 next_instance->instance == instance)
2552 next_instance = NULL;
2554 err = hci_remove_adv_instance(hdev, instance);
2556 mgmt_advertising_removed(sk, hdev, instance);
2560 if (!req || !hdev_is_powered(hdev) ||
2561 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2564 if (next_instance && !ext_adv_capable(hdev))
2565 __hci_req_schedule_adv_instance(req, next_instance->instance,
2569 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2570 bool use_rpa, u8 *own_addr_type)
2572 struct hci_dev *hdev = req->hdev;
2575 /* If privacy is enabled use a resolvable private address. If
2576 * current RPA has expired or there is something else than
2577 * the current RPA in use, then generate a new one.
2580 /* If Controller supports LL Privacy use own address type is
2583 if (use_ll_privacy(hdev) &&
2584 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2585 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2587 *own_addr_type = ADDR_LE_DEV_RANDOM;
2589 if (rpa_valid(hdev))
2592 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2594 bt_dev_err(hdev, "failed to generate new RPA");
2598 set_random_addr(req, &hdev->rpa);
2603 /* In case of required privacy without resolvable private address,
2604 * use an non-resolvable private address. This is useful for active
2605 * scanning and non-connectable advertising.
2607 if (require_privacy) {
2611 /* The non-resolvable private address is generated
2612 * from random six bytes with the two most significant
2615 get_random_bytes(&nrpa, 6);
2618 /* The non-resolvable private address shall not be
2619 * equal to the public address.
2621 if (bacmp(&hdev->bdaddr, &nrpa))
2625 *own_addr_type = ADDR_LE_DEV_RANDOM;
2626 set_random_addr(req, &nrpa);
2630 /* If forcing static address is in use or there is no public
2631 * address use the static address as random address (but skip
2632 * the HCI command if the current random address is already the
2635 * In case BR/EDR has been disabled on a dual-mode controller
2636 * and a static address has been configured, then use that
2637 * address instead of the public BR/EDR address.
2639 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2640 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2641 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2642 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2643 *own_addr_type = ADDR_LE_DEV_RANDOM;
2644 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2645 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2646 &hdev->static_addr);
2650 /* Neither privacy nor static address is being used so use a
2653 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2658 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2660 struct bdaddr_list *b;
2662 list_for_each_entry(b, &hdev->accept_list, list) {
2663 struct hci_conn *conn;
2665 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2669 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2676 void __hci_req_update_scan(struct hci_request *req)
2678 struct hci_dev *hdev = req->hdev;
2681 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2684 if (!hdev_is_powered(hdev))
2687 if (mgmt_powering_down(hdev))
2690 if (hdev->scanning_paused)
2693 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2694 disconnected_accept_list_entries(hdev))
2697 scan = SCAN_DISABLED;
2699 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2700 scan |= SCAN_INQUIRY;
2702 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2703 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2706 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2709 static int update_scan(struct hci_request *req, unsigned long opt)
2711 hci_dev_lock(req->hdev);
2712 __hci_req_update_scan(req);
2713 hci_dev_unlock(req->hdev);
2717 static void scan_update_work(struct work_struct *work)
2719 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2721 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2724 static int connectable_update(struct hci_request *req, unsigned long opt)
2726 struct hci_dev *hdev = req->hdev;
2730 __hci_req_update_scan(req);
2732 /* If BR/EDR is not enabled and we disable advertising as a
2733 * by-product of disabling connectable, we need to update the
2734 * advertising flags.
2736 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2737 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2739 /* Update the advertising parameters if necessary */
2740 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2741 !list_empty(&hdev->adv_instances)) {
2742 if (ext_adv_capable(hdev))
2743 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2745 __hci_req_enable_advertising(req);
2748 __hci_update_background_scan(req);
2750 hci_dev_unlock(hdev);
2755 static void connectable_update_work(struct work_struct *work)
2757 struct hci_dev *hdev = container_of(work, struct hci_dev,
2758 connectable_update);
2761 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2762 mgmt_set_connectable_complete(hdev, status);
2765 static u8 get_service_classes(struct hci_dev *hdev)
2767 struct bt_uuid *uuid;
2770 list_for_each_entry(uuid, &hdev->uuids, list)
2771 val |= uuid->svc_hint;
2776 void __hci_req_update_class(struct hci_request *req)
2778 struct hci_dev *hdev = req->hdev;
2781 bt_dev_dbg(hdev, "");
2783 if (!hdev_is_powered(hdev))
2786 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2789 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2792 cod[0] = hdev->minor_class;
2793 cod[1] = hdev->major_class;
2794 cod[2] = get_service_classes(hdev);
2796 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2799 if (memcmp(cod, hdev->dev_class, 3) == 0)
2802 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2805 static void write_iac(struct hci_request *req)
2807 struct hci_dev *hdev = req->hdev;
2808 struct hci_cp_write_current_iac_lap cp;
2810 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2813 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2814 /* Limited discoverable mode */
2815 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2816 cp.iac_lap[0] = 0x00; /* LIAC */
2817 cp.iac_lap[1] = 0x8b;
2818 cp.iac_lap[2] = 0x9e;
2819 cp.iac_lap[3] = 0x33; /* GIAC */
2820 cp.iac_lap[4] = 0x8b;
2821 cp.iac_lap[5] = 0x9e;
2823 /* General discoverable mode */
2825 cp.iac_lap[0] = 0x33; /* GIAC */
2826 cp.iac_lap[1] = 0x8b;
2827 cp.iac_lap[2] = 0x9e;
2830 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2831 (cp.num_iac * 3) + 1, &cp);
2834 static int discoverable_update(struct hci_request *req, unsigned long opt)
2836 struct hci_dev *hdev = req->hdev;
2840 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2842 __hci_req_update_scan(req);
2843 __hci_req_update_class(req);
2846 /* Advertising instances don't use the global discoverable setting, so
2847 * only update AD if advertising was enabled using Set Advertising.
2849 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2850 __hci_req_update_adv_data(req, 0x00);
2852 /* Discoverable mode affects the local advertising
2853 * address in limited privacy mode.
2855 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2856 if (ext_adv_capable(hdev))
2857 __hci_req_start_ext_adv(req, 0x00);
2859 __hci_req_enable_advertising(req);
2863 hci_dev_unlock(hdev);
2868 static void discoverable_update_work(struct work_struct *work)
2870 struct hci_dev *hdev = container_of(work, struct hci_dev,
2871 discoverable_update);
2874 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2875 mgmt_set_discoverable_complete(hdev, status);
2878 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2881 switch (conn->state) {
2884 if (conn->type == AMP_LINK) {
2885 struct hci_cp_disconn_phy_link cp;
2887 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2889 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2892 struct hci_cp_disconnect dc;
2894 dc.handle = cpu_to_le16(conn->handle);
2896 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2899 conn->state = BT_DISCONN;
2904 if (conn->type == LE_LINK && bacmp(&conn->dst, BDADDR_ANY)) {
2906 if (conn->type == LE_LINK) {
2908 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2910 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2912 } else if (conn->type == ACL_LINK) {
2913 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2915 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2920 if (conn->type == ACL_LINK) {
2921 struct hci_cp_reject_conn_req rej;
2923 bacpy(&rej.bdaddr, &conn->dst);
2924 rej.reason = reason;
2926 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2928 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2929 struct hci_cp_reject_sync_conn_req rej;
2931 bacpy(&rej.bdaddr, &conn->dst);
2933 /* SCO rejection has its own limited set of
2934 * allowed error values (0x0D-0x0F) which isn't
2935 * compatible with most values passed to this
2936 * function. To be safe hard-code one of the
2937 * values that's suitable for SCO.
2939 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2941 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2946 conn->state = BT_CLOSED;
2951 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2954 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2957 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2959 struct hci_request req;
2962 hci_req_init(&req, conn->hdev);
2964 __hci_abort_conn(&req, conn, reason);
2966 err = hci_req_run(&req, abort_conn_complete);
2967 if (err && err != -ENODATA) {
2968 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2975 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2977 hci_dev_lock(req->hdev);
2978 __hci_update_background_scan(req);
2979 hci_dev_unlock(req->hdev);
2983 static void bg_scan_update(struct work_struct *work)
2985 struct hci_dev *hdev = container_of(work, struct hci_dev,
2987 struct hci_conn *conn;
2991 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2997 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2999 hci_le_conn_failed(conn, status);
3001 hci_dev_unlock(hdev);
3004 static int le_scan_disable(struct hci_request *req, unsigned long opt)
3006 hci_req_add_le_scan_disable(req, false);
3010 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
3013 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
3014 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
3015 struct hci_cp_inquiry cp;
3017 if (test_bit(HCI_INQUIRY, &req->hdev->flags))
3020 bt_dev_dbg(req->hdev, "");
3022 hci_dev_lock(req->hdev);
3023 hci_inquiry_cache_flush(req->hdev);
3024 hci_dev_unlock(req->hdev);
3026 memset(&cp, 0, sizeof(cp));
3028 if (req->hdev->discovery.limited)
3029 memcpy(&cp.lap, liac, sizeof(cp.lap));
3031 memcpy(&cp.lap, giac, sizeof(cp.lap));
3035 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3040 static void le_scan_disable_work(struct work_struct *work)
3042 struct hci_dev *hdev = container_of(work, struct hci_dev,
3043 le_scan_disable.work);
3046 bt_dev_dbg(hdev, "");
3048 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3051 cancel_delayed_work(&hdev->le_scan_restart);
3053 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
3055 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
3060 hdev->discovery.scan_start = 0;
3062 /* If we were running LE only scan, change discovery state. If
3063 * we were running both LE and BR/EDR inquiry simultaneously,
3064 * and BR/EDR inquiry is already finished, stop discovery,
3065 * otherwise BR/EDR inquiry will stop discovery when finished.
3066 * If we will resolve remote device name, do not change
3070 if (hdev->discovery.type == DISCOV_TYPE_LE)
3071 goto discov_stopped;
3073 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3076 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3077 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3078 hdev->discovery.state != DISCOVERY_RESOLVING)
3079 goto discov_stopped;
3084 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3085 HCI_CMD_TIMEOUT, &status);
3087 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3088 goto discov_stopped;
3095 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3096 hci_dev_unlock(hdev);
3099 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3101 struct hci_dev *hdev = req->hdev;
3103 /* If controller is not scanning we are done. */
3104 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3107 if (hdev->scanning_paused) {
3108 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3112 hci_req_add_le_scan_disable(req, false);
3114 if (use_ext_scan(hdev)) {
3115 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3117 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3118 ext_enable_cp.enable = LE_SCAN_ENABLE;
3119 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3121 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3122 sizeof(ext_enable_cp), &ext_enable_cp);
3124 struct hci_cp_le_set_scan_enable cp;
3126 memset(&cp, 0, sizeof(cp));
3127 cp.enable = LE_SCAN_ENABLE;
3128 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3129 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3135 static void le_scan_restart_work(struct work_struct *work)
3137 struct hci_dev *hdev = container_of(work, struct hci_dev,
3138 le_scan_restart.work);
3139 unsigned long timeout, duration, scan_start, now;
3142 bt_dev_dbg(hdev, "");
3144 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3146 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3153 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3154 !hdev->discovery.scan_start)
3157 /* When the scan was started, hdev->le_scan_disable has been queued
3158 * after duration from scan_start. During scan restart this job
3159 * has been canceled, and we need to queue it again after proper
3160 * timeout, to make sure that scan does not run indefinitely.
3162 duration = hdev->discovery.scan_duration;
3163 scan_start = hdev->discovery.scan_start;
3165 if (now - scan_start <= duration) {
3168 if (now >= scan_start)
3169 elapsed = now - scan_start;
3171 elapsed = ULONG_MAX - scan_start + now;
3173 timeout = duration - elapsed;
3178 queue_delayed_work(hdev->req_workqueue,
3179 &hdev->le_scan_disable, timeout);
3182 hci_dev_unlock(hdev);
3185 static int active_scan(struct hci_request *req, unsigned long opt)
3187 uint16_t interval = opt;
3188 struct hci_dev *hdev = req->hdev;
3190 /* Accept list is not used for discovery */
3191 u8 filter_policy = 0x00;
3192 /* Default is to enable duplicates filter */
3193 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3194 /* Discovery doesn't require controller address resolution */
3195 bool addr_resolv = false;
3198 bt_dev_dbg(hdev, "");
3200 /* If controller is scanning, it means the background scanning is
3201 * running. Thus, we should temporarily stop it in order to set the
3202 * discovery scanning parameters.
3204 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3205 hci_req_add_le_scan_disable(req, false);
3206 cancel_interleave_scan(hdev);
3209 /* All active scans will be done with either a resolvable private
3210 * address (when privacy feature has been enabled) or non-resolvable
3213 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3216 own_addr_type = ADDR_LE_DEV_PUBLIC;
3219 if (hci_is_adv_monitoring(hdev)) {
3220 /* Duplicate filter should be disabled when some advertisement
3221 * monitor is activated, otherwise AdvMon can only receive one
3222 * advertisement for one peer(*) during active scanning, and
3223 * might report loss to these peers.
3225 * Note that different controllers have different meanings of
3226 * |duplicate|. Some of them consider packets with the same
3227 * address as duplicate, and others consider packets with the
3228 * same address and the same RSSI as duplicate. Although in the
3229 * latter case we don't need to disable duplicate filter, but
3230 * it is common to have active scanning for a short period of
3231 * time, the power impact should be neglectable.
3233 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
3235 hci_dev_unlock(hdev);
3237 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3238 hdev->le_scan_window_discovery, own_addr_type,
3239 filter_policy, filter_dup, addr_resolv);
3243 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3247 bt_dev_dbg(req->hdev, "");
3249 err = active_scan(req, opt);
3253 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3256 static void start_discovery(struct hci_dev *hdev, u8 *status)
3258 unsigned long timeout;
3260 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3262 switch (hdev->discovery.type) {
3263 case DISCOV_TYPE_BREDR:
3264 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3265 hci_req_sync(hdev, bredr_inquiry,
3266 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3269 case DISCOV_TYPE_INTERLEAVED:
3270 /* When running simultaneous discovery, the LE scanning time
3271 * should occupy the whole discovery time sine BR/EDR inquiry
3272 * and LE scanning are scheduled by the controller.
3274 * For interleaving discovery in comparison, BR/EDR inquiry
3275 * and LE scanning are done sequentially with separate
3278 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3280 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3281 /* During simultaneous discovery, we double LE scan
3282 * interval. We must leave some time for the controller
3283 * to do BR/EDR inquiry.
3285 hci_req_sync(hdev, interleaved_discov,
3286 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3291 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3292 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3293 HCI_CMD_TIMEOUT, status);
3295 case DISCOV_TYPE_LE:
3296 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3297 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3298 HCI_CMD_TIMEOUT, status);
3301 *status = HCI_ERROR_UNSPECIFIED;
3308 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3310 /* When service discovery is used and the controller has a
3311 * strict duplicate filter, it is important to remember the
3312 * start and duration of the scan. This is required for
3313 * restarting scanning during the discovery phase.
3315 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3316 hdev->discovery.result_filtering) {
3317 hdev->discovery.scan_start = jiffies;
3318 hdev->discovery.scan_duration = timeout;
3321 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3325 bool hci_req_stop_discovery(struct hci_request *req)
3327 struct hci_dev *hdev = req->hdev;
3328 struct discovery_state *d = &hdev->discovery;
3329 struct hci_cp_remote_name_req_cancel cp;
3330 struct inquiry_entry *e;
3333 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3335 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3336 if (test_bit(HCI_INQUIRY, &hdev->flags))
3337 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3339 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3340 cancel_delayed_work(&hdev->le_scan_disable);
3341 cancel_delayed_work(&hdev->le_scan_restart);
3342 hci_req_add_le_scan_disable(req, false);
3347 /* Passive scanning */
3348 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3349 hci_req_add_le_scan_disable(req, false);
3354 /* No further actions needed for LE-only discovery */
3355 if (d->type == DISCOV_TYPE_LE)
3358 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3359 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3364 bacpy(&cp.bdaddr, &e->data.bdaddr);
3365 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3373 static int stop_discovery(struct hci_request *req, unsigned long opt)
3375 hci_dev_lock(req->hdev);
3376 hci_req_stop_discovery(req);
3377 hci_dev_unlock(req->hdev);
3382 static void discov_update(struct work_struct *work)
3384 struct hci_dev *hdev = container_of(work, struct hci_dev,
3388 switch (hdev->discovery.state) {
3389 case DISCOVERY_STARTING:
3390 start_discovery(hdev, &status);
3391 mgmt_start_discovery_complete(hdev, status);
3393 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3395 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3397 case DISCOVERY_STOPPING:
3398 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3399 mgmt_stop_discovery_complete(hdev, status);
3401 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3403 case DISCOVERY_STOPPED:
3409 static void discov_off(struct work_struct *work)
3411 struct hci_dev *hdev = container_of(work, struct hci_dev,
3414 bt_dev_dbg(hdev, "");
3418 /* When discoverable timeout triggers, then just make sure
3419 * the limited discoverable flag is cleared. Even in the case
3420 * of a timeout triggered from general discoverable, it is
3421 * safe to unconditionally clear the flag.
3423 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3424 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3425 hdev->discov_timeout = 0;
3427 hci_dev_unlock(hdev);
3429 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3430 mgmt_new_settings(hdev);
3433 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3435 struct hci_dev *hdev = req->hdev;
3440 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3441 !lmp_host_ssp_capable(hdev)) {
3444 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3446 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3449 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3450 sizeof(support), &support);
3454 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3455 lmp_bredr_capable(hdev)) {
3456 struct hci_cp_write_le_host_supported cp;
3461 /* Check first if we already have the right
3462 * host state (host features set)
3464 if (cp.le != lmp_host_le_capable(hdev) ||
3465 cp.simul != lmp_host_le_br_capable(hdev))
3466 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3470 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3471 /* Make sure the controller has a good default for
3472 * advertising data. This also applies to the case
3473 * where BR/EDR was toggled during the AUTO_OFF phase.
3475 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3476 list_empty(&hdev->adv_instances)) {
3479 if (ext_adv_capable(hdev)) {
3480 err = __hci_req_setup_ext_adv_instance(req,
3483 __hci_req_update_scan_rsp_data(req,
3487 __hci_req_update_adv_data(req, 0x00);
3488 __hci_req_update_scan_rsp_data(req, 0x00);
3491 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3492 if (!ext_adv_capable(hdev))
3493 __hci_req_enable_advertising(req);
3495 __hci_req_enable_ext_advertising(req,
3498 } else if (!list_empty(&hdev->adv_instances)) {
3499 struct adv_info *adv_instance;
3501 adv_instance = list_first_entry(&hdev->adv_instances,
3502 struct adv_info, list);
3503 __hci_req_schedule_adv_instance(req,
3504 adv_instance->instance,
3509 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3510 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3511 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3512 sizeof(link_sec), &link_sec);
3514 if (lmp_bredr_capable(hdev)) {
3515 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3516 __hci_req_write_fast_connectable(req, true);
3518 __hci_req_write_fast_connectable(req, false);
3519 __hci_req_update_scan(req);
3520 __hci_req_update_class(req);
3521 __hci_req_update_name(req);
3522 __hci_req_update_eir(req);
3525 hci_dev_unlock(hdev);
3529 int __hci_req_hci_power_on(struct hci_dev *hdev)
3531 /* Register the available SMP channels (BR/EDR and LE) only when
3532 * successfully powering on the controller. This late
3533 * registration is required so that LE SMP can clearly decide if
3534 * the public address or static address is used.
3538 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3542 void hci_request_setup(struct hci_dev *hdev)
3544 INIT_WORK(&hdev->discov_update, discov_update);
3545 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3546 INIT_WORK(&hdev->scan_update, scan_update_work);
3547 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3548 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3549 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3550 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3551 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3552 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3553 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3556 void hci_request_cancel_all(struct hci_dev *hdev)
3558 hci_req_sync_cancel(hdev, ENODEV);
3560 cancel_work_sync(&hdev->discov_update);
3561 cancel_work_sync(&hdev->bg_scan_update);
3562 cancel_work_sync(&hdev->scan_update);
3563 cancel_work_sync(&hdev->connectable_update);
3564 cancel_work_sync(&hdev->discoverable_update);
3565 cancel_delayed_work_sync(&hdev->discov_off);
3566 cancel_delayed_work_sync(&hdev->le_scan_disable);
3567 cancel_delayed_work_sync(&hdev->le_scan_restart);
3569 if (hdev->adv_instance_timeout) {
3570 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3571 hdev->adv_instance_timeout = 0;
3574 cancel_interleave_scan(hdev);