2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
34 #define HCI_REQ_DONE 0
35 #define HCI_REQ_PEND 1
36 #define HCI_REQ_CANCELED 2
38 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
40 skb_queue_head_init(&req->cmd_q);
45 void hci_req_purge(struct hci_request *req)
47 skb_queue_purge(&req->cmd_q);
50 bool hci_req_status_pend(struct hci_dev *hdev)
52 return hdev->req_status == HCI_REQ_PEND;
55 static int req_run(struct hci_request *req, hci_req_complete_t complete,
56 hci_req_complete_skb_t complete_skb)
58 struct hci_dev *hdev = req->hdev;
62 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
64 /* If an error occurred during request building, remove all HCI
65 * commands queued on the HCI request queue.
68 skb_queue_purge(&req->cmd_q);
72 /* Do not allow empty requests */
73 if (skb_queue_empty(&req->cmd_q))
76 skb = skb_peek_tail(&req->cmd_q);
78 bt_cb(skb)->hci.req_complete = complete;
79 } else if (complete_skb) {
80 bt_cb(skb)->hci.req_complete_skb = complete_skb;
81 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
84 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
88 queue_work(hdev->workqueue, &hdev->cmd_work);
93 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
95 return req_run(req, complete, NULL);
98 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
100 return req_run(req, NULL, complete);
103 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
106 bt_dev_dbg(hdev, "result 0x%2.2x", result);
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
112 hdev->req_skb = skb_get(skb);
113 wake_up_interruptible(&hdev->req_wait_q);
117 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
119 bt_dev_dbg(hdev, "err 0x%2.2x", err);
121 if (hdev->req_status == HCI_REQ_PEND) {
122 hdev->req_result = err;
123 hdev->req_status = HCI_REQ_CANCELED;
124 wake_up_interruptible(&hdev->req_wait_q);
128 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129 const void *param, u8 event, u32 timeout)
131 struct hci_request req;
135 bt_dev_dbg(hdev, "");
137 hci_req_init(&req, hdev);
139 hci_req_add_ev(&req, opcode, plen, param, event);
141 hdev->req_status = HCI_REQ_PEND;
143 err = hci_req_run_skb(&req, hci_req_sync_complete);
147 err = wait_event_interruptible_timeout(hdev->req_wait_q,
148 hdev->req_status != HCI_REQ_PEND, timeout);
150 if (err == -ERESTARTSYS)
151 return ERR_PTR(-EINTR);
153 switch (hdev->req_status) {
155 err = -bt_to_errno(hdev->req_result);
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
167 hdev->req_status = hdev->req_result = 0;
169 hdev->req_skb = NULL;
171 bt_dev_dbg(hdev, "end: err %d", err);
179 return ERR_PTR(-ENODATA);
183 EXPORT_SYMBOL(__hci_cmd_sync_ev);
185 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186 const void *param, u32 timeout)
188 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
190 EXPORT_SYMBOL(__hci_cmd_sync);
192 /* Execute request and wait for completion. */
193 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
195 unsigned long opt, u32 timeout, u8 *hci_status)
197 struct hci_request req;
200 bt_dev_dbg(hdev, "start");
202 hci_req_init(&req, hdev);
204 hdev->req_status = HCI_REQ_PEND;
206 err = func(&req, opt);
209 *hci_status = HCI_ERROR_UNSPECIFIED;
213 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 hdev->req_status = 0;
217 /* ENODATA means the HCI request command queue is empty.
218 * This can happen when a request with conditionals doesn't
219 * trigger any commands to be sent. This is normal behavior
220 * and should not trigger an error return.
222 if (err == -ENODATA) {
229 *hci_status = HCI_ERROR_UNSPECIFIED;
234 err = wait_event_interruptible_timeout(hdev->req_wait_q,
235 hdev->req_status != HCI_REQ_PEND, timeout);
237 if (err == -ERESTARTSYS)
240 switch (hdev->req_status) {
242 err = -bt_to_errno(hdev->req_result);
244 *hci_status = hdev->req_result;
247 case HCI_REQ_CANCELED:
248 err = -hdev->req_result;
250 *hci_status = HCI_ERROR_UNSPECIFIED;
256 *hci_status = HCI_ERROR_UNSPECIFIED;
260 kfree_skb(hdev->req_skb);
261 hdev->req_skb = NULL;
262 hdev->req_status = hdev->req_result = 0;
264 bt_dev_dbg(hdev, "end: err %d", err);
269 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
271 unsigned long opt, u32 timeout, u8 *hci_status)
275 /* Serialize all requests */
276 hci_req_sync_lock(hdev);
277 /* check the state after obtaing the lock to protect the HCI_UP
278 * against any races from hci_dev_do_close when the controller
281 if (test_bit(HCI_UP, &hdev->flags))
282 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
285 hci_req_sync_unlock(hdev);
290 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
293 int len = HCI_COMMAND_HDR_SIZE + plen;
294 struct hci_command_hdr *hdr;
297 skb = bt_skb_alloc(len, GFP_ATOMIC);
301 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
302 hdr->opcode = cpu_to_le16(opcode);
306 skb_put_data(skb, param, plen);
308 bt_dev_dbg(hdev, "skb len %d", skb->len);
310 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
311 hci_skb_opcode(skb) = opcode;
316 /* Queue a command to an asynchronous HCI request */
317 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
318 const void *param, u8 event)
320 struct hci_dev *hdev = req->hdev;
323 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
325 /* If an error occurred during request building, there is no point in
326 * queueing the HCI command. We can simply return.
331 skb = hci_prepare_cmd(hdev, opcode, plen, param);
333 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
339 if (skb_queue_empty(&req->cmd_q))
340 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
342 bt_cb(skb)->hci.req_event = event;
344 skb_queue_tail(&req->cmd_q, skb);
347 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
350 hci_req_add_ev(req, opcode, plen, param, 0);
353 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355 struct hci_dev *hdev = req->hdev;
357 struct hci_cp_write_page_scan_activity acp;
361 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
364 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
369 type = PAGE_SCAN_TYPE_INTERLACED;
371 type = PAGE_SCAN_TYPE_STANDARD; /* default */
374 type = PAGE_SCAN_TYPE_INTERLACED;
376 /* 160 msec page scan interval */
377 acp.interval = cpu_to_le16(0x0100);
379 type = hdev->def_page_scan_type;
380 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
383 acp.window = cpu_to_le16(hdev->def_page_scan_window);
385 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
386 __cpu_to_le16(hdev->page_scan_window) != acp.window)
387 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
391 if (hdev->page_scan_type != type)
392 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
395 static void start_interleave_scan(struct hci_dev *hdev)
397 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
398 queue_delayed_work(hdev->req_workqueue,
399 &hdev->interleave_scan, 0);
402 static bool is_interleave_scanning(struct hci_dev *hdev)
404 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
407 static void cancel_interleave_scan(struct hci_dev *hdev)
409 bt_dev_dbg(hdev, "cancelling interleave scan");
411 cancel_delayed_work_sync(&hdev->interleave_scan);
413 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
416 /* Return true if interleave_scan wasn't started until exiting this function,
417 * otherwise, return false
419 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
421 /* Do interleaved scan only if all of the following are true:
422 * - There is at least one ADV monitor
423 * - At least one pending LE connection or one device to be scanned for
424 * - Monitor offloading is not supported
425 * If so, we should alternate between allowlist scan and one without
426 * any filters to save power.
428 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
429 !(list_empty(&hdev->pend_le_conns) &&
430 list_empty(&hdev->pend_le_reports)) &&
431 hci_get_adv_monitor_offload_ext(hdev) ==
432 HCI_ADV_MONITOR_EXT_NONE;
433 bool is_interleaving = is_interleave_scanning(hdev);
435 if (use_interleaving && !is_interleaving) {
436 start_interleave_scan(hdev);
437 bt_dev_dbg(hdev, "starting interleave scan");
441 if (!use_interleaving && is_interleaving)
442 cancel_interleave_scan(hdev);
447 /* This function controls the background scanning based on hdev->pend_le_conns
448 * list. If there are pending LE connection we start the background scanning,
449 * otherwise we stop it.
451 * This function requires the caller holds hdev->lock.
453 static void __hci_update_background_scan(struct hci_request *req)
455 struct hci_dev *hdev = req->hdev;
457 if (!test_bit(HCI_UP, &hdev->flags) ||
458 test_bit(HCI_INIT, &hdev->flags) ||
459 hci_dev_test_flag(hdev, HCI_SETUP) ||
460 hci_dev_test_flag(hdev, HCI_CONFIG) ||
461 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
462 hci_dev_test_flag(hdev, HCI_UNREGISTER))
465 /* No point in doing scanning if LE support hasn't been enabled */
466 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
469 /* If discovery is active don't interfere with it */
470 if (hdev->discovery.state != DISCOVERY_STOPPED)
473 /* Reset RSSI and UUID filters when starting background scanning
474 * since these filters are meant for service discovery only.
476 * The Start Discovery and Start Service Discovery operations
477 * ensure to set proper values for RSSI threshold and UUID
478 * filter list. So it is safe to just reset them here.
480 hci_discovery_filter_clear(hdev);
482 bt_dev_dbg(hdev, "ADV monitoring is %s",
483 hci_is_adv_monitoring(hdev) ? "on" : "off");
485 if (list_empty(&hdev->pend_le_conns) &&
486 list_empty(&hdev->pend_le_reports) &&
487 !hci_is_adv_monitoring(hdev)) {
488 /* If there is no pending LE connections or devices
489 * to be scanned for or no ADV monitors, we should stop the
490 * background scanning.
493 /* If controller is not scanning we are done. */
494 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
497 hci_req_add_le_scan_disable(req, false);
499 bt_dev_dbg(hdev, "stopping background scanning");
501 /* If there is at least one pending LE connection, we should
502 * keep the background scan running.
505 /* If controller is connecting, we should not start scanning
506 * since some controllers are not able to scan and connect at
509 if (hci_lookup_le_connect(hdev))
512 /* If controller is currently scanning, we stop it to ensure we
513 * don't miss any advertising (due to duplicates filter).
515 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
516 hci_req_add_le_scan_disable(req, false);
518 hci_req_add_le_passive_scan(req);
519 bt_dev_dbg(hdev, "starting background scanning");
523 void __hci_req_update_name(struct hci_request *req)
525 struct hci_dev *hdev = req->hdev;
526 struct hci_cp_write_local_name cp;
528 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
530 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
533 #define PNP_INFO_SVCLASS_ID 0x1200
535 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
537 u8 *ptr = data, *uuids_start = NULL;
538 struct bt_uuid *uuid;
543 list_for_each_entry(uuid, &hdev->uuids, list) {
546 if (uuid->size != 16)
549 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
553 if (uuid16 == PNP_INFO_SVCLASS_ID)
559 uuids_start[1] = EIR_UUID16_ALL;
563 /* Stop if not enough space to put next UUID */
564 if ((ptr - data) + sizeof(u16) > len) {
565 uuids_start[1] = EIR_UUID16_SOME;
569 *ptr++ = (uuid16 & 0x00ff);
570 *ptr++ = (uuid16 & 0xff00) >> 8;
571 uuids_start[0] += sizeof(uuid16);
577 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
579 u8 *ptr = data, *uuids_start = NULL;
580 struct bt_uuid *uuid;
585 list_for_each_entry(uuid, &hdev->uuids, list) {
586 if (uuid->size != 32)
592 uuids_start[1] = EIR_UUID32_ALL;
596 /* Stop if not enough space to put next UUID */
597 if ((ptr - data) + sizeof(u32) > len) {
598 uuids_start[1] = EIR_UUID32_SOME;
602 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
604 uuids_start[0] += sizeof(u32);
610 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
612 u8 *ptr = data, *uuids_start = NULL;
613 struct bt_uuid *uuid;
618 list_for_each_entry(uuid, &hdev->uuids, list) {
619 if (uuid->size != 128)
625 uuids_start[1] = EIR_UUID128_ALL;
629 /* Stop if not enough space to put next UUID */
630 if ((ptr - data) + 16 > len) {
631 uuids_start[1] = EIR_UUID128_SOME;
635 memcpy(ptr, uuid->uuid, 16);
637 uuids_start[0] += 16;
643 static void create_eir(struct hci_dev *hdev, u8 *data)
648 name_len = strlen(hdev->dev_name);
654 ptr[1] = EIR_NAME_SHORT;
656 ptr[1] = EIR_NAME_COMPLETE;
658 /* EIR Data length */
659 ptr[0] = name_len + 1;
661 memcpy(ptr + 2, hdev->dev_name, name_len);
663 ptr += (name_len + 2);
666 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
668 ptr[1] = EIR_TX_POWER;
669 ptr[2] = (u8) hdev->inq_tx_power;
674 if (hdev->devid_source > 0) {
676 ptr[1] = EIR_DEVICE_ID;
678 put_unaligned_le16(hdev->devid_source, ptr + 2);
679 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
680 put_unaligned_le16(hdev->devid_product, ptr + 6);
681 put_unaligned_le16(hdev->devid_version, ptr + 8);
686 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
687 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
688 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
691 void __hci_req_update_eir(struct hci_request *req)
693 struct hci_dev *hdev = req->hdev;
694 struct hci_cp_write_eir cp;
696 if (!hdev_is_powered(hdev))
699 if (!lmp_ext_inq_capable(hdev))
702 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
705 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
708 memset(&cp, 0, sizeof(cp));
710 create_eir(hdev, cp.data);
712 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
715 memcpy(hdev->eir, cp.data, sizeof(cp.data));
717 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
720 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
722 struct hci_dev *hdev = req->hdev;
724 if (hdev->scanning_paused) {
725 bt_dev_dbg(hdev, "Scanning is paused for suspend");
730 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
732 if (use_ext_scan(hdev)) {
733 struct hci_cp_le_set_ext_scan_enable cp;
735 memset(&cp, 0, sizeof(cp));
736 cp.enable = LE_SCAN_DISABLE;
737 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
740 struct hci_cp_le_set_scan_enable cp;
742 memset(&cp, 0, sizeof(cp));
743 cp.enable = LE_SCAN_DISABLE;
744 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
747 /* Disable address resolution */
748 if (use_ll_privacy(hdev) &&
749 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
750 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
753 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
757 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
760 struct hci_cp_le_del_from_accept_list cp;
762 cp.bdaddr_type = bdaddr_type;
763 bacpy(&cp.bdaddr, bdaddr);
765 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
767 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
769 if (use_ll_privacy(req->hdev) &&
770 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
773 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
775 struct hci_cp_le_del_from_resolv_list cp;
777 cp.bdaddr_type = bdaddr_type;
778 bacpy(&cp.bdaddr, bdaddr);
780 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
786 /* Adds connection to accept list if needed. On error, returns -1. */
787 static int add_to_accept_list(struct hci_request *req,
788 struct hci_conn_params *params, u8 *num_entries,
791 struct hci_cp_le_add_to_accept_list cp;
792 struct hci_dev *hdev = req->hdev;
794 /* Already in accept list */
795 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr,
799 /* Select filter policy to accept all advertising */
800 if (*num_entries >= hdev->le_accept_list_size)
803 /* Accept list can not be used with RPAs */
805 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
806 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
810 /* During suspend, only wakeable devices can be in accept list */
811 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
812 params->current_flags))
816 cp.bdaddr_type = params->addr_type;
817 bacpy(&cp.bdaddr, ¶ms->addr);
819 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
821 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
823 if (use_ll_privacy(hdev) &&
824 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
827 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
830 struct hci_cp_le_add_to_resolv_list cp;
832 cp.bdaddr_type = params->addr_type;
833 bacpy(&cp.bdaddr, ¶ms->addr);
834 memcpy(cp.peer_irk, irk->val, 16);
836 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
837 memcpy(cp.local_irk, hdev->irk, 16);
839 memset(cp.local_irk, 0, 16);
841 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
849 static u8 update_accept_list(struct hci_request *req)
851 struct hci_dev *hdev = req->hdev;
852 struct hci_conn_params *params;
853 struct bdaddr_list *b;
855 bool pend_conn, pend_report;
856 /* We allow usage of accept list even with RPAs in suspend. In the worst
857 * case, we won't be able to wake from devices that use the privacy1.2
858 * features. Additionally, once we support privacy1.2 and IRK
859 * offloading, we can update this to also check for those conditions.
861 bool allow_rpa = hdev->suspended;
863 if (use_ll_privacy(hdev) &&
864 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
867 /* Go through the current accept list programmed into the
868 * controller one by one and check if that address is still
869 * in the list of pending connections or list of devices to
870 * report. If not present in either list, then queue the
871 * command to remove it from the controller.
873 list_for_each_entry(b, &hdev->le_accept_list, list) {
874 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
877 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
881 /* If the device is not likely to connect or report,
882 * remove it from the accept list.
884 if (!pend_conn && !pend_report) {
885 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
889 /* Accept list can not be used with RPAs */
891 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
892 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
899 /* Since all no longer valid accept list entries have been
900 * removed, walk through the list of pending connections
901 * and ensure that any new device gets programmed into
904 * If the list of the devices is larger than the list of
905 * available accept list entries in the controller, then
906 * just abort and return filer policy value to not use the
909 list_for_each_entry(params, &hdev->pend_le_conns, action) {
910 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
914 /* After adding all new pending connections, walk through
915 * the list of pending reports and also add these to the
916 * accept list if there is still space. Abort if space runs out.
918 list_for_each_entry(params, &hdev->pend_le_reports, action) {
919 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
923 /* Use the allowlist unless the following conditions are all true:
924 * - We are not currently suspending
925 * - There are 1 or more ADV monitors registered and it's not offloaded
926 * - Interleaved scanning is not currently using the allowlist
928 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
929 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
930 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
933 /* Select filter policy to use accept list */
937 static bool scan_use_rpa(struct hci_dev *hdev)
939 return hci_dev_test_flag(hdev, HCI_PRIVACY);
942 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
943 u16 window, u8 own_addr_type, u8 filter_policy,
944 bool filter_dup, bool addr_resolv)
946 struct hci_dev *hdev = req->hdev;
948 if (hdev->scanning_paused) {
949 bt_dev_dbg(hdev, "Scanning is paused for suspend");
953 if (use_ll_privacy(hdev) &&
954 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
958 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
961 /* Use ext scanning if set ext scan param and ext scan enable is
964 if (use_ext_scan(hdev)) {
965 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
966 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
967 struct hci_cp_le_scan_phy_params *phy_params;
968 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
971 ext_param_cp = (void *)data;
972 phy_params = (void *)ext_param_cp->data;
974 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
975 ext_param_cp->own_addr_type = own_addr_type;
976 ext_param_cp->filter_policy = filter_policy;
978 plen = sizeof(*ext_param_cp);
980 if (scan_1m(hdev) || scan_2m(hdev)) {
981 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
983 memset(phy_params, 0, sizeof(*phy_params));
984 phy_params->type = type;
985 phy_params->interval = cpu_to_le16(interval);
986 phy_params->window = cpu_to_le16(window);
988 plen += sizeof(*phy_params);
992 if (scan_coded(hdev)) {
993 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
995 memset(phy_params, 0, sizeof(*phy_params));
996 phy_params->type = type;
997 phy_params->interval = cpu_to_le16(interval);
998 phy_params->window = cpu_to_le16(window);
1000 plen += sizeof(*phy_params);
1004 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
1005 plen, ext_param_cp);
1007 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
1008 ext_enable_cp.enable = LE_SCAN_ENABLE;
1009 ext_enable_cp.filter_dup = filter_dup;
1011 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
1012 sizeof(ext_enable_cp), &ext_enable_cp);
1014 struct hci_cp_le_set_scan_param param_cp;
1015 struct hci_cp_le_set_scan_enable enable_cp;
1017 memset(¶m_cp, 0, sizeof(param_cp));
1018 param_cp.type = type;
1019 param_cp.interval = cpu_to_le16(interval);
1020 param_cp.window = cpu_to_le16(window);
1021 param_cp.own_address_type = own_addr_type;
1022 param_cp.filter_policy = filter_policy;
1023 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1026 memset(&enable_cp, 0, sizeof(enable_cp));
1027 enable_cp.enable = LE_SCAN_ENABLE;
1028 enable_cp.filter_dup = filter_dup;
1029 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1034 /* Returns true if an le connection is in the scanning state */
1035 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1037 struct hci_conn_hash *h = &hdev->conn_hash;
1042 list_for_each_entry_rcu(c, &h->list, list) {
1043 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1044 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1055 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1056 * controller based address resolution to be able to reconfigure
1059 void hci_req_add_le_passive_scan(struct hci_request *req)
1061 struct hci_dev *hdev = req->hdev;
1064 u16 window, interval;
1065 /* Default is to enable duplicates filter */
1066 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1067 /* Background scanning should run with address resolution */
1068 bool addr_resolv = true;
1070 if (hdev->scanning_paused) {
1071 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1075 /* Set require_privacy to false since no SCAN_REQ are send
1076 * during passive scanning. Not using an non-resolvable address
1077 * here is important so that peer devices using direct
1078 * advertising with our address will be correctly reported
1079 * by the controller.
1081 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1085 if (hdev->enable_advmon_interleave_scan &&
1086 __hci_update_interleaved_scan(hdev))
1089 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1090 /* Adding or removing entries from the accept list must
1091 * happen before enabling scanning. The controller does
1092 * not allow accept list modification while scanning.
1094 filter_policy = update_accept_list(req);
1096 /* When the controller is using random resolvable addresses and
1097 * with that having LE privacy enabled, then controllers with
1098 * Extended Scanner Filter Policies support can now enable support
1099 * for handling directed advertising.
1101 * So instead of using filter polices 0x00 (no accept list)
1102 * and 0x01 (accept list enabled) use the new filter policies
1103 * 0x02 (no accept list) and 0x03 (accept list enabled).
1105 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1106 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1107 filter_policy |= 0x02;
1109 if (hdev->suspended) {
1110 window = hdev->le_scan_window_suspend;
1111 interval = hdev->le_scan_int_suspend;
1113 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1114 } else if (hci_is_le_conn_scanning(hdev)) {
1115 window = hdev->le_scan_window_connect;
1116 interval = hdev->le_scan_int_connect;
1117 } else if (hci_is_adv_monitoring(hdev)) {
1118 window = hdev->le_scan_window_adv_monitor;
1119 interval = hdev->le_scan_int_adv_monitor;
1121 /* Disable duplicates filter when scanning for advertisement
1122 * monitor for the following reasons.
1124 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
1125 * controllers ignore RSSI_Sampling_Period when the duplicates
1126 * filter is enabled.
1128 * For SW pattern filtering, when we're not doing interleaved
1129 * scanning, it is necessary to disable duplicates filter,
1130 * otherwise hosts can only receive one advertisement and it's
1131 * impossible to know if a peer is still in range.
1133 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
1135 window = hdev->le_scan_window;
1136 interval = hdev->le_scan_interval;
1139 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1141 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1142 own_addr_type, filter_policy, filter_dup,
1146 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1148 struct adv_info *adv_instance;
1150 /* Instance 0x00 always set local name */
1151 if (instance == 0x00)
1154 adv_instance = hci_find_adv_instance(hdev, instance);
1158 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1159 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1162 return adv_instance->scan_rsp_len ? true : false;
1165 static void hci_req_clear_event_filter(struct hci_request *req)
1167 struct hci_cp_set_event_filter f;
1169 if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
1172 if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
1173 memset(&f, 0, sizeof(f));
1174 f.flt_type = HCI_FLT_CLEAR_ALL;
1175 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1179 static void hci_req_set_event_filter(struct hci_request *req)
1181 struct bdaddr_list_with_flags *b;
1182 struct hci_cp_set_event_filter f;
1183 struct hci_dev *hdev = req->hdev;
1184 u8 scan = SCAN_DISABLED;
1185 bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1187 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1190 /* Always clear event filter when starting */
1191 hci_req_clear_event_filter(req);
1193 list_for_each_entry(b, &hdev->accept_list, list) {
1194 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1198 memset(&f, 0, sizeof(f));
1199 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1200 f.flt_type = HCI_FLT_CONN_SETUP;
1201 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1202 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1204 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1205 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1209 if (scan && !scanning) {
1210 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1211 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1212 } else if (!scan && scanning) {
1213 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1214 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1218 static void cancel_adv_timeout(struct hci_dev *hdev)
1220 if (hdev->adv_instance_timeout) {
1221 hdev->adv_instance_timeout = 0;
1222 cancel_delayed_work(&hdev->adv_instance_expire);
1226 /* This function requires the caller holds hdev->lock */
1227 void __hci_req_pause_adv_instances(struct hci_request *req)
1229 bt_dev_dbg(req->hdev, "Pausing advertising instances");
1231 /* Call to disable any advertisements active on the controller.
1232 * This will succeed even if no advertisements are configured.
1234 __hci_req_disable_advertising(req);
1236 /* If we are using software rotation, pause the loop */
1237 if (!ext_adv_capable(req->hdev))
1238 cancel_adv_timeout(req->hdev);
1241 /* This function requires the caller holds hdev->lock */
1242 static void __hci_req_resume_adv_instances(struct hci_request *req)
1244 struct adv_info *adv;
1246 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1248 if (ext_adv_capable(req->hdev)) {
1249 /* Call for each tracked instance to be re-enabled */
1250 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1251 __hci_req_enable_ext_advertising(req,
1256 /* Schedule for most recent instance to be restarted and begin
1257 * the software rotation loop
1259 __hci_req_schedule_adv_instance(req,
1260 req->hdev->cur_adv_instance,
1265 /* This function requires the caller holds hdev->lock */
1266 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1268 struct hci_request req;
1270 hci_req_init(&req, hdev);
1271 __hci_req_resume_adv_instances(&req);
1273 return hci_req_run(&req, NULL);
1276 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1278 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1280 if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1281 test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1282 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1283 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1284 wake_up(&hdev->suspend_wait_q);
1287 if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1288 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1289 wake_up(&hdev->suspend_wait_q);
1293 static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1296 struct hci_dev *hdev = req->hdev;
1298 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1299 case HCI_ADV_MONITOR_EXT_MSFT:
1300 msft_req_add_set_filter_enable(req, enable);
1306 /* No need to block when enabling since it's on resume path */
1307 if (hdev->suspended && !enable)
1308 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1311 /* Call with hci_dev_lock */
1312 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1315 struct hci_conn *conn;
1316 struct hci_request req;
1318 int disconnect_counter;
1320 if (next == hdev->suspend_state) {
1321 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1325 hdev->suspend_state = next;
1326 hci_req_init(&req, hdev);
1328 if (next == BT_SUSPEND_DISCONNECT) {
1329 /* Mark device as suspended */
1330 hdev->suspended = true;
1332 /* Pause discovery if not already stopped */
1333 old_state = hdev->discovery.state;
1334 if (old_state != DISCOVERY_STOPPED) {
1335 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1336 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1337 queue_work(hdev->req_workqueue, &hdev->discov_update);
1340 hdev->discovery_paused = true;
1341 hdev->discovery_old_state = old_state;
1343 /* Stop directed advertising */
1344 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1346 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1347 cancel_delayed_work(&hdev->discov_off);
1348 queue_delayed_work(hdev->req_workqueue,
1349 &hdev->discov_off, 0);
1352 /* Pause other advertisements */
1353 if (hdev->adv_instance_cnt)
1354 __hci_req_pause_adv_instances(&req);
1356 hdev->advertising_paused = true;
1357 hdev->advertising_old_state = old_state;
1359 /* Disable page scan if enabled */
1360 if (test_bit(HCI_PSCAN, &hdev->flags)) {
1361 page_scan = SCAN_DISABLED;
1362 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1364 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1367 /* Disable LE passive scan if enabled */
1368 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1369 cancel_interleave_scan(hdev);
1370 hci_req_add_le_scan_disable(&req, false);
1373 /* Disable advertisement filters */
1374 hci_req_add_set_adv_filter_enable(&req, false);
1376 /* Prevent disconnects from causing scanning to be re-enabled */
1377 hdev->scanning_paused = true;
1379 /* Run commands before disconnecting */
1380 hci_req_run(&req, suspend_req_complete);
1382 disconnect_counter = 0;
1383 /* Soft disconnect everything (power off) */
1384 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1385 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1386 disconnect_counter++;
1389 if (disconnect_counter > 0) {
1391 "Had %d disconnects. Will wait on them",
1392 disconnect_counter);
1393 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1395 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1396 /* Unpause to take care of updating scanning params */
1397 hdev->scanning_paused = false;
1398 /* Enable event filter for paired devices */
1399 hci_req_set_event_filter(&req);
1400 /* Enable passive scan at lower duty cycle */
1401 __hci_update_background_scan(&req);
1402 /* Pause scan changes again. */
1403 hdev->scanning_paused = true;
1404 hci_req_run(&req, suspend_req_complete);
1406 hdev->suspended = false;
1407 hdev->scanning_paused = false;
1409 /* Clear any event filters and restore scan state */
1410 hci_req_clear_event_filter(&req);
1411 __hci_req_update_scan(&req);
1413 /* Reset passive/background scanning to normal */
1414 __hci_update_background_scan(&req);
1415 /* Enable all of the advertisement filters */
1416 hci_req_add_set_adv_filter_enable(&req, true);
1418 /* Unpause directed advertising */
1419 hdev->advertising_paused = false;
1420 if (hdev->advertising_old_state) {
1421 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1422 hdev->suspend_tasks);
1423 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1424 queue_work(hdev->req_workqueue,
1425 &hdev->discoverable_update);
1426 hdev->advertising_old_state = 0;
1429 /* Resume other advertisements */
1430 if (hdev->adv_instance_cnt)
1431 __hci_req_resume_adv_instances(&req);
1433 /* Unpause discovery */
1434 hdev->discovery_paused = false;
1435 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1436 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1437 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1438 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1439 queue_work(hdev->req_workqueue, &hdev->discov_update);
1442 hci_req_run(&req, suspend_req_complete);
1445 hdev->suspend_state = next;
1448 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1449 wake_up(&hdev->suspend_wait_q);
1452 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1454 return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1457 void __hci_req_disable_advertising(struct hci_request *req)
1459 if (ext_adv_capable(req->hdev)) {
1460 __hci_req_disable_ext_adv_instance(req, 0x00);
1465 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1469 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1472 struct adv_info *adv_instance;
1474 if (instance == 0x00) {
1475 /* Instance 0 always manages the "Tx Power" and "Flags"
1478 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1480 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1481 * corresponds to the "connectable" instance flag.
1483 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1484 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1486 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1487 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1488 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1489 flags |= MGMT_ADV_FLAG_DISCOV;
1494 adv_instance = hci_find_adv_instance(hdev, instance);
1496 /* Return 0 when we got an invalid instance identifier. */
1500 return adv_instance->flags;
1503 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1505 /* If privacy is not enabled don't use RPA */
1506 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1509 /* If basic privacy mode is enabled use RPA */
1510 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1513 /* If limited privacy mode is enabled don't use RPA if we're
1514 * both discoverable and bondable.
1516 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1517 hci_dev_test_flag(hdev, HCI_BONDABLE))
1520 /* We're neither bondable nor discoverable in the limited
1521 * privacy mode, therefore use RPA.
1526 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1528 /* If there is no connection we are OK to advertise. */
1529 if (hci_conn_num(hdev, LE_LINK) == 0)
1532 /* Check le_states if there is any connection in peripheral role. */
1533 if (hdev->conn_hash.le_num_peripheral > 0) {
1534 /* Peripheral connection state and non connectable mode bit 20.
1536 if (!connectable && !(hdev->le_states[2] & 0x10))
1539 /* Peripheral connection state and connectable mode bit 38
1540 * and scannable bit 21.
1542 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1543 !(hdev->le_states[2] & 0x20)))
1547 /* Check le_states if there is any connection in central role. */
1548 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1549 /* Central connection state and non connectable mode bit 18. */
1550 if (!connectable && !(hdev->le_states[2] & 0x02))
1553 /* Central connection state and connectable mode bit 35 and
1556 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1557 !(hdev->le_states[2] & 0x08)))
1564 void __hci_req_enable_advertising(struct hci_request *req)
1566 struct hci_dev *hdev = req->hdev;
1567 struct adv_info *adv_instance;
1568 struct hci_cp_le_set_adv_param cp;
1569 u8 own_addr_type, enable = 0x01;
1571 u16 adv_min_interval, adv_max_interval;
1574 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1575 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1577 /* If the "connectable" instance flag was not set, then choose between
1578 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1580 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1581 mgmt_get_connectable(hdev);
1583 if (!is_advertising_allowed(hdev, connectable))
1586 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1587 __hci_req_disable_advertising(req);
1589 /* Clear the HCI_LE_ADV bit temporarily so that the
1590 * hci_update_random_address knows that it's safe to go ahead
1591 * and write a new random address. The flag will be set back on
1592 * as soon as the SET_ADV_ENABLE HCI command completes.
1594 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1596 /* Set require_privacy to true only when non-connectable
1597 * advertising is used. In that case it is fine to use a
1598 * non-resolvable private address.
1600 if (hci_update_random_address(req, !connectable,
1601 adv_use_rpa(hdev, flags),
1602 &own_addr_type) < 0)
1605 memset(&cp, 0, sizeof(cp));
1608 adv_min_interval = adv_instance->min_interval;
1609 adv_max_interval = adv_instance->max_interval;
1611 adv_min_interval = hdev->le_adv_min_interval;
1612 adv_max_interval = hdev->le_adv_max_interval;
1616 cp.type = LE_ADV_IND;
1618 if (adv_cur_instance_is_scannable(hdev))
1619 cp.type = LE_ADV_SCAN_IND;
1621 cp.type = LE_ADV_NONCONN_IND;
1623 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1624 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1625 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1626 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1630 cp.min_interval = cpu_to_le16(adv_min_interval);
1631 cp.max_interval = cpu_to_le16(adv_max_interval);
1634 cp.filter_policy = hdev->adv_filter_policy;
1635 cp.type = hdev->adv_type;
1638 cp.own_address_type = own_addr_type;
1639 cp.channel_map = hdev->le_adv_channel_map;
1641 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1643 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1646 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1649 size_t complete_len;
1651 /* no space left for name (+ NULL + type + len) */
1652 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1655 /* use complete name if present and fits */
1656 complete_len = strlen(hdev->dev_name);
1657 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1658 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1659 hdev->dev_name, complete_len + 1);
1661 /* use short name if present */
1662 short_len = strlen(hdev->short_name);
1664 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1665 hdev->short_name, short_len + 1);
1667 /* use shortened full name if present, we already know that name
1668 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1671 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1673 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1674 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1676 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1683 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1685 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1688 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1690 u8 scan_rsp_len = 0;
1692 if (hdev->appearance)
1693 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1695 return append_local_name(hdev, ptr, scan_rsp_len);
1698 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1701 struct adv_info *adv_instance;
1703 u8 scan_rsp_len = 0;
1705 adv_instance = hci_find_adv_instance(hdev, instance);
1709 instance_flags = adv_instance->flags;
1711 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
1712 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1714 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1715 adv_instance->scan_rsp_len);
1717 scan_rsp_len += adv_instance->scan_rsp_len;
1719 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1720 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1722 return scan_rsp_len;
1725 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1727 struct hci_dev *hdev = req->hdev;
1730 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1733 if (ext_adv_capable(hdev)) {
1735 struct hci_cp_le_set_ext_scan_rsp_data cp;
1736 u8 data[HCI_MAX_EXT_AD_LENGTH];
1739 memset(&pdu, 0, sizeof(pdu));
1742 len = create_instance_scan_rsp_data(hdev, instance,
1745 len = create_default_scan_rsp_data(hdev, pdu.data);
1747 /* Advertising scan response data is handled in bluez.
1748 * This value will be updated only when application request the update
1749 * using adapter_set_scan_rsp_data()
1754 if (hdev->scan_rsp_data_len == len &&
1755 !memcmp(pdu.data, hdev->scan_rsp_data, len))
1758 memcpy(hdev->scan_rsp_data, pdu.data, len);
1759 hdev->scan_rsp_data_len = len;
1761 pdu.cp.handle = instance;
1762 pdu.cp.length = len;
1763 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1764 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1766 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1767 sizeof(pdu.cp) + len, &pdu.cp);
1770 struct hci_cp_le_set_scan_rsp_data cp;
1772 memset(&cp, 0, sizeof(cp));
1775 len = create_instance_scan_rsp_data(hdev, instance,
1778 len = create_default_scan_rsp_data(hdev, cp.data);
1780 /* Advertising scan response data is handled in bluez.
1781 * This value will be updated only when application request the update
1782 * using adapter_set_scan_rsp_data()
1786 if (hdev->scan_rsp_data_len == len &&
1787 !memcmp(cp.data, hdev->scan_rsp_data, len))
1790 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1791 hdev->scan_rsp_data_len = len;
1795 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1800 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1802 struct adv_info *adv_instance = NULL;
1803 u8 ad_len = 0, flags = 0;
1806 /* Return 0 when the current instance identifier is invalid. */
1808 adv_instance = hci_find_adv_instance(hdev, instance);
1813 instance_flags = get_adv_instance_flags(hdev, instance);
1815 /* If instance already has the flags set skip adding it once
1818 if (adv_instance && eir_get_data(adv_instance->adv_data,
1819 adv_instance->adv_data_len, EIR_FLAGS,
1823 /* The Add Advertising command allows userspace to set both the general
1824 * and limited discoverable flags.
1826 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1827 flags |= LE_AD_GENERAL;
1829 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1830 flags |= LE_AD_LIMITED;
1832 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1833 flags |= LE_AD_NO_BREDR;
1835 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1836 /* If a discovery flag wasn't provided, simply use the global
1840 flags |= mgmt_get_adv_discov_flags(hdev);
1842 /* If flags would still be empty, then there is no need to
1843 * include the "Flags" AD field".
1857 memcpy(ptr, adv_instance->adv_data,
1858 adv_instance->adv_data_len);
1859 ad_len += adv_instance->adv_data_len;
1860 ptr += adv_instance->adv_data_len;
1863 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1866 if (ext_adv_capable(hdev)) {
1868 adv_tx_power = adv_instance->tx_power;
1870 adv_tx_power = hdev->adv_tx_power;
1872 adv_tx_power = hdev->adv_tx_power;
1875 /* Provide Tx Power only if we can provide a valid value for it */
1876 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1878 ptr[1] = EIR_TX_POWER;
1879 ptr[2] = (u8)adv_tx_power;
1889 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1891 struct hci_dev *hdev = req->hdev;
1894 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1897 if (ext_adv_capable(hdev)) {
1899 struct hci_cp_le_set_ext_adv_data cp;
1900 u8 data[HCI_MAX_EXT_AD_LENGTH];
1903 memset(&pdu, 0, sizeof(pdu));
1905 len = create_instance_adv_data(hdev, instance, pdu.data);
1908 /* Bluez will handle the advertising data including the flag and tx
1909 * power. This value will be updated only when application request the
1910 * update using adapter_set_advertising_data().
1914 /* There's nothing to do if the data hasn't changed */
1915 if (hdev->adv_data_len == len &&
1916 memcmp(pdu.data, hdev->adv_data, len) == 0)
1919 memcpy(hdev->adv_data, pdu.data, len);
1920 hdev->adv_data_len = len;
1922 pdu.cp.length = len;
1923 pdu.cp.handle = instance;
1924 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1925 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1927 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1928 sizeof(pdu.cp) + len, &pdu.cp);
1931 struct hci_cp_le_set_adv_data cp;
1933 memset(&cp, 0, sizeof(cp));
1935 len = create_instance_adv_data(hdev, instance, cp.data);
1938 /* Bluez will handle the advertising data including the flag and tx
1939 * power. This value will be updated only when application request the
1940 * update using adapter_set_advertising_data().
1944 /* There's nothing to do if the data hasn't changed */
1945 if (hdev->adv_data_len == len &&
1946 memcmp(cp.data, hdev->adv_data, len) == 0)
1949 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1950 hdev->adv_data_len = len;
1954 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1959 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1961 struct hci_request req;
1963 hci_req_init(&req, hdev);
1964 __hci_req_update_adv_data(&req, instance);
1966 return hci_req_run(&req, NULL);
1969 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1972 BT_DBG("%s status %u", hdev->name, status);
1975 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1977 struct hci_request req;
1980 if (!use_ll_privacy(hdev) &&
1981 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1984 hci_req_init(&req, hdev);
1986 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1988 hci_req_run(&req, enable_addr_resolution_complete);
1991 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1993 bt_dev_dbg(hdev, "status %u", status);
1996 void hci_req_reenable_advertising(struct hci_dev *hdev)
1998 struct hci_request req;
2000 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
2001 list_empty(&hdev->adv_instances))
2004 hci_req_init(&req, hdev);
2006 if (hdev->cur_adv_instance) {
2007 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
2010 if (ext_adv_capable(hdev)) {
2011 __hci_req_start_ext_adv(&req, 0x00);
2013 __hci_req_update_adv_data(&req, 0x00);
2014 __hci_req_update_scan_rsp_data(&req, 0x00);
2015 __hci_req_enable_advertising(&req);
2019 hci_req_run(&req, adv_enable_complete);
2022 static void adv_timeout_expire(struct work_struct *work)
2024 struct hci_dev *hdev = container_of(work, struct hci_dev,
2025 adv_instance_expire.work);
2027 struct hci_request req;
2030 bt_dev_dbg(hdev, "");
2034 hdev->adv_instance_timeout = 0;
2036 instance = hdev->cur_adv_instance;
2037 if (instance == 0x00)
2040 hci_req_init(&req, hdev);
2042 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
2044 if (list_empty(&hdev->adv_instances))
2045 __hci_req_disable_advertising(&req);
2047 hci_req_run(&req, NULL);
2050 hci_dev_unlock(hdev);
2053 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
2056 struct hci_dev *hdev = req->hdev;
2061 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2062 hci_req_add_le_scan_disable(req, false);
2063 hci_req_add_le_passive_scan(req);
2065 switch (hdev->interleave_scan_state) {
2066 case INTERLEAVE_SCAN_ALLOWLIST:
2067 bt_dev_dbg(hdev, "next state: allowlist");
2068 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2070 case INTERLEAVE_SCAN_NO_FILTER:
2071 bt_dev_dbg(hdev, "next state: no filter");
2072 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
2074 case INTERLEAVE_SCAN_NONE:
2075 BT_ERR("unexpected error");
2079 hci_dev_unlock(hdev);
2084 static void interleave_scan_work(struct work_struct *work)
2086 struct hci_dev *hdev = container_of(work, struct hci_dev,
2087 interleave_scan.work);
2089 unsigned long timeout;
2091 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2092 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2093 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2094 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2096 bt_dev_err(hdev, "unexpected error");
2100 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2101 HCI_CMD_TIMEOUT, &status);
2103 /* Don't continue interleaving if it was canceled */
2104 if (is_interleave_scanning(hdev))
2105 queue_delayed_work(hdev->req_workqueue,
2106 &hdev->interleave_scan, timeout);
2109 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2110 bool use_rpa, struct adv_info *adv_instance,
2111 u8 *own_addr_type, bdaddr_t *rand_addr)
2115 bacpy(rand_addr, BDADDR_ANY);
2117 /* If privacy is enabled use a resolvable private address. If
2118 * current RPA has expired then generate a new one.
2121 /* If Controller supports LL Privacy use own address type is
2124 if (use_ll_privacy(hdev) &&
2125 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2126 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2128 *own_addr_type = ADDR_LE_DEV_RANDOM;
2131 if (adv_rpa_valid(adv_instance))
2134 if (rpa_valid(hdev))
2138 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2140 bt_dev_err(hdev, "failed to generate new RPA");
2144 bacpy(rand_addr, &hdev->rpa);
2149 /* In case of required privacy without resolvable private address,
2150 * use an non-resolvable private address. This is useful for
2151 * non-connectable advertising.
2153 if (require_privacy) {
2157 /* The non-resolvable private address is generated
2158 * from random six bytes with the two most significant
2161 get_random_bytes(&nrpa, 6);
2164 /* The non-resolvable private address shall not be
2165 * equal to the public address.
2167 if (bacmp(&hdev->bdaddr, &nrpa))
2171 *own_addr_type = ADDR_LE_DEV_RANDOM;
2172 bacpy(rand_addr, &nrpa);
2177 /* No privacy so use a public address. */
2178 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2183 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2185 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2188 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2190 struct hci_dev *hdev = req->hdev;
2192 /* If we're advertising or initiating an LE connection we can't
2193 * go ahead and change the random address at this time. This is
2194 * because the eventual initiator address used for the
2195 * subsequently created connection will be undefined (some
2196 * controllers use the new address and others the one we had
2197 * when the operation started).
2199 * In this kind of scenario skip the update and let the random
2200 * address be updated at the next cycle.
2202 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2203 hci_lookup_le_connect(hdev)) {
2204 bt_dev_dbg(hdev, "Deferring random address update");
2205 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2209 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2212 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2214 struct hci_cp_le_set_ext_adv_params cp;
2215 struct hci_dev *hdev = req->hdev;
2218 bdaddr_t random_addr;
2221 struct adv_info *adv_instance;
2225 adv_instance = hci_find_adv_instance(hdev, instance);
2229 adv_instance = NULL;
2232 flags = get_adv_instance_flags(hdev, instance);
2234 /* If the "connectable" instance flag was not set, then choose between
2235 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2237 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2238 mgmt_get_connectable(hdev);
2240 if (!is_advertising_allowed(hdev, connectable))
2243 /* Set require_privacy to true only when non-connectable
2244 * advertising is used. In that case it is fine to use a
2245 * non-resolvable private address.
2247 err = hci_get_random_address(hdev, !connectable,
2248 adv_use_rpa(hdev, flags), adv_instance,
2249 &own_addr_type, &random_addr);
2253 memset(&cp, 0, sizeof(cp));
2256 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2257 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2258 cp.tx_power = adv_instance->tx_power;
2260 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2261 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2262 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2265 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2269 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2271 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2272 } else if (adv_instance_is_scannable(hdev, instance) ||
2273 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
2275 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2277 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2280 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2282 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2285 cp.own_addr_type = own_addr_type;
2286 cp.channel_map = hdev->le_adv_channel_map;
2287 cp.handle = instance;
2289 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2290 cp.primary_phy = HCI_ADV_PHY_1M;
2291 cp.secondary_phy = HCI_ADV_PHY_2M;
2292 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2293 cp.primary_phy = HCI_ADV_PHY_CODED;
2294 cp.secondary_phy = HCI_ADV_PHY_CODED;
2296 /* In all other cases use 1M */
2297 cp.primary_phy = HCI_ADV_PHY_1M;
2298 cp.secondary_phy = HCI_ADV_PHY_1M;
2301 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2303 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2304 bacmp(&random_addr, BDADDR_ANY)) {
2305 struct hci_cp_le_set_adv_set_rand_addr cp;
2307 /* Check if random address need to be updated */
2309 if (!bacmp(&random_addr, &adv_instance->random_addr))
2312 if (!bacmp(&random_addr, &hdev->random_addr))
2314 /* Instance 0x00 doesn't have an adv_info, instead it
2315 * uses hdev->random_addr to track its address so
2316 * whenever it needs to be updated this also set the
2317 * random address since hdev->random_addr is shared with
2318 * scan state machine.
2320 set_random_addr(req, &random_addr);
2323 memset(&cp, 0, sizeof(cp));
2325 cp.handle = instance;
2326 bacpy(&cp.bdaddr, &random_addr);
2329 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2336 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2338 struct hci_dev *hdev = req->hdev;
2339 struct hci_cp_le_set_ext_adv_enable *cp;
2340 struct hci_cp_ext_adv_set *adv_set;
2341 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2342 struct adv_info *adv_instance;
2345 adv_instance = hci_find_adv_instance(hdev, instance);
2349 adv_instance = NULL;
2353 adv_set = (void *) cp->data;
2355 memset(cp, 0, sizeof(*cp));
2358 cp->num_of_sets = 0x01;
2360 memset(adv_set, 0, sizeof(*adv_set));
2362 adv_set->handle = instance;
2364 /* Set duration per instance since controller is responsible for
2367 if (adv_instance && adv_instance->timeout) {
2368 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2370 /* Time = N * 10 ms */
2371 adv_set->duration = cpu_to_le16(duration / 10);
2374 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2375 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2381 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2383 struct hci_dev *hdev = req->hdev;
2384 struct hci_cp_le_set_ext_adv_enable *cp;
2385 struct hci_cp_ext_adv_set *adv_set;
2386 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2389 /* If request specifies an instance that doesn't exist, fail */
2390 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2393 memset(data, 0, sizeof(data));
2396 adv_set = (void *)cp->data;
2398 /* Instance 0x00 indicates all advertising instances will be disabled */
2399 cp->num_of_sets = !!instance;
2402 adv_set->handle = instance;
2404 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2405 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2410 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2412 struct hci_dev *hdev = req->hdev;
2414 /* If request specifies an instance that doesn't exist, fail */
2415 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2418 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2423 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2425 struct hci_dev *hdev = req->hdev;
2426 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2429 /* If instance isn't pending, the chip knows about it, and it's safe to
2432 if (adv_instance && !adv_instance->pending)
2433 __hci_req_disable_ext_adv_instance(req, instance);
2435 err = __hci_req_setup_ext_adv_instance(req, instance);
2439 __hci_req_update_scan_rsp_data(req, instance);
2440 __hci_req_enable_ext_advertising(req, instance);
2445 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2448 struct hci_dev *hdev = req->hdev;
2449 struct adv_info *adv_instance = NULL;
2452 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2453 list_empty(&hdev->adv_instances))
2456 if (hdev->adv_instance_timeout)
2459 adv_instance = hci_find_adv_instance(hdev, instance);
2463 /* A zero timeout means unlimited advertising. As long as there is
2464 * only one instance, duration should be ignored. We still set a timeout
2465 * in case further instances are being added later on.
2467 * If the remaining lifetime of the instance is more than the duration
2468 * then the timeout corresponds to the duration, otherwise it will be
2469 * reduced to the remaining instance lifetime.
2471 if (adv_instance->timeout == 0 ||
2472 adv_instance->duration <= adv_instance->remaining_time)
2473 timeout = adv_instance->duration;
2475 timeout = adv_instance->remaining_time;
2477 /* The remaining time is being reduced unless the instance is being
2478 * advertised without time limit.
2480 if (adv_instance->timeout)
2481 adv_instance->remaining_time =
2482 adv_instance->remaining_time - timeout;
2484 /* Only use work for scheduling instances with legacy advertising */
2485 if (!ext_adv_capable(hdev)) {
2486 hdev->adv_instance_timeout = timeout;
2487 queue_delayed_work(hdev->req_workqueue,
2488 &hdev->adv_instance_expire,
2489 msecs_to_jiffies(timeout * 1000));
2492 /* If we're just re-scheduling the same instance again then do not
2493 * execute any HCI commands. This happens when a single instance is
2496 if (!force && hdev->cur_adv_instance == instance &&
2497 hci_dev_test_flag(hdev, HCI_LE_ADV))
2500 hdev->cur_adv_instance = instance;
2501 if (ext_adv_capable(hdev)) {
2502 __hci_req_start_ext_adv(req, instance);
2504 __hci_req_update_adv_data(req, instance);
2505 __hci_req_update_scan_rsp_data(req, instance);
2506 __hci_req_enable_advertising(req);
2512 /* For a single instance:
2513 * - force == true: The instance will be removed even when its remaining
2514 * lifetime is not zero.
2515 * - force == false: the instance will be deactivated but kept stored unless
2516 * the remaining lifetime is zero.
2518 * For instance == 0x00:
2519 * - force == true: All instances will be removed regardless of their timeout
2521 * - force == false: Only instances that have a timeout will be removed.
2523 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2524 struct hci_request *req, u8 instance,
2527 struct adv_info *adv_instance, *n, *next_instance = NULL;
2531 /* Cancel any timeout concerning the removed instance(s). */
2532 if (!instance || hdev->cur_adv_instance == instance)
2533 cancel_adv_timeout(hdev);
2535 /* Get the next instance to advertise BEFORE we remove
2536 * the current one. This can be the same instance again
2537 * if there is only one instance.
2539 if (instance && hdev->cur_adv_instance == instance)
2540 next_instance = hci_get_next_instance(hdev, instance);
2542 if (instance == 0x00) {
2543 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2545 if (!(force || adv_instance->timeout))
2548 rem_inst = adv_instance->instance;
2549 err = hci_remove_adv_instance(hdev, rem_inst);
2551 mgmt_advertising_removed(sk, hdev, rem_inst);
2554 adv_instance = hci_find_adv_instance(hdev, instance);
2556 if (force || (adv_instance && adv_instance->timeout &&
2557 !adv_instance->remaining_time)) {
2558 /* Don't advertise a removed instance. */
2559 if (next_instance &&
2560 next_instance->instance == instance)
2561 next_instance = NULL;
2563 err = hci_remove_adv_instance(hdev, instance);
2565 mgmt_advertising_removed(sk, hdev, instance);
2569 if (!req || !hdev_is_powered(hdev) ||
2570 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2573 if (next_instance && !ext_adv_capable(hdev))
2574 __hci_req_schedule_adv_instance(req, next_instance->instance,
2578 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2579 bool use_rpa, u8 *own_addr_type)
2581 struct hci_dev *hdev = req->hdev;
2584 /* If privacy is enabled use a resolvable private address. If
2585 * current RPA has expired or there is something else than
2586 * the current RPA in use, then generate a new one.
2589 /* If Controller supports LL Privacy use own address type is
2592 if (use_ll_privacy(hdev) &&
2593 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2594 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2596 *own_addr_type = ADDR_LE_DEV_RANDOM;
2598 if (rpa_valid(hdev))
2601 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2603 bt_dev_err(hdev, "failed to generate new RPA");
2607 set_random_addr(req, &hdev->rpa);
2612 /* In case of required privacy without resolvable private address,
2613 * use an non-resolvable private address. This is useful for active
2614 * scanning and non-connectable advertising.
2616 if (require_privacy) {
2620 /* The non-resolvable private address is generated
2621 * from random six bytes with the two most significant
2624 get_random_bytes(&nrpa, 6);
2627 /* The non-resolvable private address shall not be
2628 * equal to the public address.
2630 if (bacmp(&hdev->bdaddr, &nrpa))
2634 *own_addr_type = ADDR_LE_DEV_RANDOM;
2635 set_random_addr(req, &nrpa);
2639 /* If forcing static address is in use or there is no public
2640 * address use the static address as random address (but skip
2641 * the HCI command if the current random address is already the
2644 * In case BR/EDR has been disabled on a dual-mode controller
2645 * and a static address has been configured, then use that
2646 * address instead of the public BR/EDR address.
2648 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2649 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2650 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2651 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2652 *own_addr_type = ADDR_LE_DEV_RANDOM;
2653 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2654 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2655 &hdev->static_addr);
2659 /* Neither privacy nor static address is being used so use a
2662 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2667 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2669 struct bdaddr_list *b;
2671 list_for_each_entry(b, &hdev->accept_list, list) {
2672 struct hci_conn *conn;
2674 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2678 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2685 void __hci_req_update_scan(struct hci_request *req)
2687 struct hci_dev *hdev = req->hdev;
2690 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2693 if (!hdev_is_powered(hdev))
2696 if (mgmt_powering_down(hdev))
2699 if (hdev->scanning_paused)
2702 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2703 disconnected_accept_list_entries(hdev))
2706 scan = SCAN_DISABLED;
2708 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2709 scan |= SCAN_INQUIRY;
2711 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2712 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2715 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2718 static int update_scan(struct hci_request *req, unsigned long opt)
2720 hci_dev_lock(req->hdev);
2721 __hci_req_update_scan(req);
2722 hci_dev_unlock(req->hdev);
2726 static void scan_update_work(struct work_struct *work)
2728 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2730 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2733 static int connectable_update(struct hci_request *req, unsigned long opt)
2735 struct hci_dev *hdev = req->hdev;
2739 __hci_req_update_scan(req);
2741 /* If BR/EDR is not enabled and we disable advertising as a
2742 * by-product of disabling connectable, we need to update the
2743 * advertising flags.
2745 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2746 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2748 /* Update the advertising parameters if necessary */
2749 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2750 !list_empty(&hdev->adv_instances)) {
2751 if (ext_adv_capable(hdev))
2752 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2754 __hci_req_enable_advertising(req);
2757 __hci_update_background_scan(req);
2759 hci_dev_unlock(hdev);
2764 static void connectable_update_work(struct work_struct *work)
2766 struct hci_dev *hdev = container_of(work, struct hci_dev,
2767 connectable_update);
2770 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2771 mgmt_set_connectable_complete(hdev, status);
2774 static u8 get_service_classes(struct hci_dev *hdev)
2776 struct bt_uuid *uuid;
2779 list_for_each_entry(uuid, &hdev->uuids, list)
2780 val |= uuid->svc_hint;
2785 void __hci_req_update_class(struct hci_request *req)
2787 struct hci_dev *hdev = req->hdev;
2790 bt_dev_dbg(hdev, "");
2792 if (!hdev_is_powered(hdev))
2795 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2798 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2801 cod[0] = hdev->minor_class;
2802 cod[1] = hdev->major_class;
2803 cod[2] = get_service_classes(hdev);
2805 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2808 if (memcmp(cod, hdev->dev_class, 3) == 0)
2811 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2814 static void write_iac(struct hci_request *req)
2816 struct hci_dev *hdev = req->hdev;
2817 struct hci_cp_write_current_iac_lap cp;
2819 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2822 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2823 /* Limited discoverable mode */
2824 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2825 cp.iac_lap[0] = 0x00; /* LIAC */
2826 cp.iac_lap[1] = 0x8b;
2827 cp.iac_lap[2] = 0x9e;
2828 cp.iac_lap[3] = 0x33; /* GIAC */
2829 cp.iac_lap[4] = 0x8b;
2830 cp.iac_lap[5] = 0x9e;
2832 /* General discoverable mode */
2834 cp.iac_lap[0] = 0x33; /* GIAC */
2835 cp.iac_lap[1] = 0x8b;
2836 cp.iac_lap[2] = 0x9e;
2839 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2840 (cp.num_iac * 3) + 1, &cp);
2843 static int discoverable_update(struct hci_request *req, unsigned long opt)
2845 struct hci_dev *hdev = req->hdev;
2849 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2851 __hci_req_update_scan(req);
2852 __hci_req_update_class(req);
2855 /* Advertising instances don't use the global discoverable setting, so
2856 * only update AD if advertising was enabled using Set Advertising.
2858 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2859 __hci_req_update_adv_data(req, 0x00);
2861 /* Discoverable mode affects the local advertising
2862 * address in limited privacy mode.
2864 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2865 if (ext_adv_capable(hdev))
2866 __hci_req_start_ext_adv(req, 0x00);
2868 __hci_req_enable_advertising(req);
2872 hci_dev_unlock(hdev);
2877 static void discoverable_update_work(struct work_struct *work)
2879 struct hci_dev *hdev = container_of(work, struct hci_dev,
2880 discoverable_update);
2883 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2884 mgmt_set_discoverable_complete(hdev, status);
2887 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2890 switch (conn->state) {
2893 if (conn->type == AMP_LINK) {
2894 struct hci_cp_disconn_phy_link cp;
2896 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2898 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2901 struct hci_cp_disconnect dc;
2903 dc.handle = cpu_to_le16(conn->handle);
2905 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2908 conn->state = BT_DISCONN;
2913 if (conn->type == LE_LINK && bacmp(&conn->dst, BDADDR_ANY)) {
2915 if (conn->type == LE_LINK) {
2917 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2919 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2921 } else if (conn->type == ACL_LINK) {
2922 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2924 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2929 if (conn->type == ACL_LINK) {
2930 struct hci_cp_reject_conn_req rej;
2932 bacpy(&rej.bdaddr, &conn->dst);
2933 rej.reason = reason;
2935 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2937 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2938 struct hci_cp_reject_sync_conn_req rej;
2940 bacpy(&rej.bdaddr, &conn->dst);
2942 /* SCO rejection has its own limited set of
2943 * allowed error values (0x0D-0x0F) which isn't
2944 * compatible with most values passed to this
2945 * function. To be safe hard-code one of the
2946 * values that's suitable for SCO.
2948 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2950 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2955 conn->state = BT_CLOSED;
2960 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2963 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2966 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2968 struct hci_request req;
2971 hci_req_init(&req, conn->hdev);
2973 __hci_abort_conn(&req, conn, reason);
2975 err = hci_req_run(&req, abort_conn_complete);
2976 if (err && err != -ENODATA) {
2977 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2984 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2986 hci_dev_lock(req->hdev);
2987 __hci_update_background_scan(req);
2988 hci_dev_unlock(req->hdev);
2992 static void bg_scan_update(struct work_struct *work)
2994 struct hci_dev *hdev = container_of(work, struct hci_dev,
2996 struct hci_conn *conn;
3000 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
3006 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3008 hci_le_conn_failed(conn, status);
3010 hci_dev_unlock(hdev);
3013 static int le_scan_disable(struct hci_request *req, unsigned long opt)
3015 hci_req_add_le_scan_disable(req, false);
3019 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
3022 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
3023 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
3024 struct hci_cp_inquiry cp;
3026 if (test_bit(HCI_INQUIRY, &req->hdev->flags))
3029 bt_dev_dbg(req->hdev, "");
3031 hci_dev_lock(req->hdev);
3032 hci_inquiry_cache_flush(req->hdev);
3033 hci_dev_unlock(req->hdev);
3035 memset(&cp, 0, sizeof(cp));
3037 if (req->hdev->discovery.limited)
3038 memcpy(&cp.lap, liac, sizeof(cp.lap));
3040 memcpy(&cp.lap, giac, sizeof(cp.lap));
3044 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3049 static void le_scan_disable_work(struct work_struct *work)
3051 struct hci_dev *hdev = container_of(work, struct hci_dev,
3052 le_scan_disable.work);
3055 bt_dev_dbg(hdev, "");
3057 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3060 cancel_delayed_work(&hdev->le_scan_restart);
3062 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
3064 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
3069 hdev->discovery.scan_start = 0;
3071 /* If we were running LE only scan, change discovery state. If
3072 * we were running both LE and BR/EDR inquiry simultaneously,
3073 * and BR/EDR inquiry is already finished, stop discovery,
3074 * otherwise BR/EDR inquiry will stop discovery when finished.
3075 * If we will resolve remote device name, do not change
3079 if (hdev->discovery.type == DISCOV_TYPE_LE)
3080 goto discov_stopped;
3082 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3085 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3086 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3087 hdev->discovery.state != DISCOVERY_RESOLVING)
3088 goto discov_stopped;
3093 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3094 HCI_CMD_TIMEOUT, &status);
3096 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3097 goto discov_stopped;
3104 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3105 hci_dev_unlock(hdev);
3108 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3110 struct hci_dev *hdev = req->hdev;
3112 /* If controller is not scanning we are done. */
3113 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3116 if (hdev->scanning_paused) {
3117 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3121 hci_req_add_le_scan_disable(req, false);
3123 if (use_ext_scan(hdev)) {
3124 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3126 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3127 ext_enable_cp.enable = LE_SCAN_ENABLE;
3128 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3130 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3131 sizeof(ext_enable_cp), &ext_enable_cp);
3133 struct hci_cp_le_set_scan_enable cp;
3135 memset(&cp, 0, sizeof(cp));
3136 cp.enable = LE_SCAN_ENABLE;
3137 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3138 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3144 static void le_scan_restart_work(struct work_struct *work)
3146 struct hci_dev *hdev = container_of(work, struct hci_dev,
3147 le_scan_restart.work);
3148 unsigned long timeout, duration, scan_start, now;
3151 bt_dev_dbg(hdev, "");
3153 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3155 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3162 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3163 !hdev->discovery.scan_start)
3166 /* When the scan was started, hdev->le_scan_disable has been queued
3167 * after duration from scan_start. During scan restart this job
3168 * has been canceled, and we need to queue it again after proper
3169 * timeout, to make sure that scan does not run indefinitely.
3171 duration = hdev->discovery.scan_duration;
3172 scan_start = hdev->discovery.scan_start;
3174 if (now - scan_start <= duration) {
3177 if (now >= scan_start)
3178 elapsed = now - scan_start;
3180 elapsed = ULONG_MAX - scan_start + now;
3182 timeout = duration - elapsed;
3187 queue_delayed_work(hdev->req_workqueue,
3188 &hdev->le_scan_disable, timeout);
3191 hci_dev_unlock(hdev);
3194 static int active_scan(struct hci_request *req, unsigned long opt)
3196 uint16_t interval = opt;
3197 struct hci_dev *hdev = req->hdev;
3199 /* Accept list is not used for discovery */
3200 u8 filter_policy = 0x00;
3201 /* Default is to enable duplicates filter */
3202 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3203 /* Discovery doesn't require controller address resolution */
3204 bool addr_resolv = false;
3207 bt_dev_dbg(hdev, "");
3209 /* If controller is scanning, it means the background scanning is
3210 * running. Thus, we should temporarily stop it in order to set the
3211 * discovery scanning parameters.
3213 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3214 hci_req_add_le_scan_disable(req, false);
3215 cancel_interleave_scan(hdev);
3218 /* All active scans will be done with either a resolvable private
3219 * address (when privacy feature has been enabled) or non-resolvable
3222 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3225 own_addr_type = ADDR_LE_DEV_PUBLIC;
3228 if (hci_is_adv_monitoring(hdev)) {
3229 /* Duplicate filter should be disabled when some advertisement
3230 * monitor is activated, otherwise AdvMon can only receive one
3231 * advertisement for one peer(*) during active scanning, and
3232 * might report loss to these peers.
3234 * Note that different controllers have different meanings of
3235 * |duplicate|. Some of them consider packets with the same
3236 * address as duplicate, and others consider packets with the
3237 * same address and the same RSSI as duplicate. Although in the
3238 * latter case we don't need to disable duplicate filter, but
3239 * it is common to have active scanning for a short period of
3240 * time, the power impact should be neglectable.
3242 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
3244 hci_dev_unlock(hdev);
3246 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3247 hdev->le_scan_window_discovery, own_addr_type,
3248 filter_policy, filter_dup, addr_resolv);
3252 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3256 bt_dev_dbg(req->hdev, "");
3258 err = active_scan(req, opt);
3262 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3265 static void start_discovery(struct hci_dev *hdev, u8 *status)
3267 unsigned long timeout;
3269 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3271 switch (hdev->discovery.type) {
3272 case DISCOV_TYPE_BREDR:
3273 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3274 hci_req_sync(hdev, bredr_inquiry,
3275 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3278 case DISCOV_TYPE_INTERLEAVED:
3279 /* When running simultaneous discovery, the LE scanning time
3280 * should occupy the whole discovery time sine BR/EDR inquiry
3281 * and LE scanning are scheduled by the controller.
3283 * For interleaving discovery in comparison, BR/EDR inquiry
3284 * and LE scanning are done sequentially with separate
3287 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3289 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3290 /* During simultaneous discovery, we double LE scan
3291 * interval. We must leave some time for the controller
3292 * to do BR/EDR inquiry.
3294 hci_req_sync(hdev, interleaved_discov,
3295 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3300 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3301 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3302 HCI_CMD_TIMEOUT, status);
3304 case DISCOV_TYPE_LE:
3305 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3306 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3307 HCI_CMD_TIMEOUT, status);
3310 *status = HCI_ERROR_UNSPECIFIED;
3317 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3319 /* When service discovery is used and the controller has a
3320 * strict duplicate filter, it is important to remember the
3321 * start and duration of the scan. This is required for
3322 * restarting scanning during the discovery phase.
3324 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3325 hdev->discovery.result_filtering) {
3326 hdev->discovery.scan_start = jiffies;
3327 hdev->discovery.scan_duration = timeout;
3330 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3334 bool hci_req_stop_discovery(struct hci_request *req)
3336 struct hci_dev *hdev = req->hdev;
3337 struct discovery_state *d = &hdev->discovery;
3338 struct hci_cp_remote_name_req_cancel cp;
3339 struct inquiry_entry *e;
3342 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3344 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3345 if (test_bit(HCI_INQUIRY, &hdev->flags))
3346 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3348 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3349 cancel_delayed_work(&hdev->le_scan_disable);
3350 cancel_delayed_work(&hdev->le_scan_restart);
3351 hci_req_add_le_scan_disable(req, false);
3356 /* Passive scanning */
3357 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3358 hci_req_add_le_scan_disable(req, false);
3363 /* No further actions needed for LE-only discovery */
3364 if (d->type == DISCOV_TYPE_LE)
3367 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3368 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3373 bacpy(&cp.bdaddr, &e->data.bdaddr);
3374 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3382 static int stop_discovery(struct hci_request *req, unsigned long opt)
3384 hci_dev_lock(req->hdev);
3385 hci_req_stop_discovery(req);
3386 hci_dev_unlock(req->hdev);
3391 static void discov_update(struct work_struct *work)
3393 struct hci_dev *hdev = container_of(work, struct hci_dev,
3397 switch (hdev->discovery.state) {
3398 case DISCOVERY_STARTING:
3399 start_discovery(hdev, &status);
3400 mgmt_start_discovery_complete(hdev, status);
3402 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3404 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3406 case DISCOVERY_STOPPING:
3407 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3408 mgmt_stop_discovery_complete(hdev, status);
3410 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3412 case DISCOVERY_STOPPED:
3418 static void discov_off(struct work_struct *work)
3420 struct hci_dev *hdev = container_of(work, struct hci_dev,
3423 bt_dev_dbg(hdev, "");
3427 /* When discoverable timeout triggers, then just make sure
3428 * the limited discoverable flag is cleared. Even in the case
3429 * of a timeout triggered from general discoverable, it is
3430 * safe to unconditionally clear the flag.
3432 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3433 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3434 hdev->discov_timeout = 0;
3436 hci_dev_unlock(hdev);
3438 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3439 mgmt_new_settings(hdev);
3442 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3444 struct hci_dev *hdev = req->hdev;
3449 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3450 !lmp_host_ssp_capable(hdev)) {
3453 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3455 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3458 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3459 sizeof(support), &support);
3463 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3464 lmp_bredr_capable(hdev)) {
3465 struct hci_cp_write_le_host_supported cp;
3470 /* Check first if we already have the right
3471 * host state (host features set)
3473 if (cp.le != lmp_host_le_capable(hdev) ||
3474 cp.simul != lmp_host_le_br_capable(hdev))
3475 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3479 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3480 /* Make sure the controller has a good default for
3481 * advertising data. This also applies to the case
3482 * where BR/EDR was toggled during the AUTO_OFF phase.
3484 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3485 list_empty(&hdev->adv_instances)) {
3488 if (ext_adv_capable(hdev)) {
3489 err = __hci_req_setup_ext_adv_instance(req,
3492 __hci_req_update_scan_rsp_data(req,
3496 __hci_req_update_adv_data(req, 0x00);
3497 __hci_req_update_scan_rsp_data(req, 0x00);
3500 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3501 if (!ext_adv_capable(hdev))
3502 __hci_req_enable_advertising(req);
3504 __hci_req_enable_ext_advertising(req,
3507 } else if (!list_empty(&hdev->adv_instances)) {
3508 struct adv_info *adv_instance;
3510 adv_instance = list_first_entry(&hdev->adv_instances,
3511 struct adv_info, list);
3512 __hci_req_schedule_adv_instance(req,
3513 adv_instance->instance,
3518 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3519 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3520 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3521 sizeof(link_sec), &link_sec);
3523 if (lmp_bredr_capable(hdev)) {
3524 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3525 __hci_req_write_fast_connectable(req, true);
3527 __hci_req_write_fast_connectable(req, false);
3528 __hci_req_update_scan(req);
3529 __hci_req_update_class(req);
3530 __hci_req_update_name(req);
3531 __hci_req_update_eir(req);
3534 hci_dev_unlock(hdev);
3538 int __hci_req_hci_power_on(struct hci_dev *hdev)
3540 /* Register the available SMP channels (BR/EDR and LE) only when
3541 * successfully powering on the controller. This late
3542 * registration is required so that LE SMP can clearly decide if
3543 * the public address or static address is used.
3547 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3551 void hci_request_setup(struct hci_dev *hdev)
3553 INIT_WORK(&hdev->discov_update, discov_update);
3554 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3555 INIT_WORK(&hdev->scan_update, scan_update_work);
3556 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3557 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3558 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3559 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3560 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3561 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3562 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3565 void hci_request_cancel_all(struct hci_dev *hdev)
3567 hci_req_sync_cancel(hdev, ENODEV);
3569 cancel_work_sync(&hdev->discov_update);
3570 cancel_work_sync(&hdev->bg_scan_update);
3571 cancel_work_sync(&hdev->scan_update);
3572 cancel_work_sync(&hdev->connectable_update);
3573 cancel_work_sync(&hdev->discoverable_update);
3574 cancel_delayed_work_sync(&hdev->discov_off);
3575 cancel_delayed_work_sync(&hdev->le_scan_disable);
3576 cancel_delayed_work_sync(&hdev->le_scan_restart);
3578 if (hdev->adv_instance_timeout) {
3579 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3580 hdev->adv_instance_timeout = 0;
3583 cancel_interleave_scan(hdev);