2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
37 skb_queue_head_init(&req->cmd_q);
42 void hci_req_purge(struct hci_request *req)
44 skb_queue_purge(&req->cmd_q);
47 bool hci_req_status_pend(struct hci_dev *hdev)
49 return hdev->req_status == HCI_REQ_PEND;
52 static int req_run(struct hci_request *req, hci_req_complete_t complete,
53 hci_req_complete_skb_t complete_skb)
55 struct hci_dev *hdev = req->hdev;
59 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
61 /* If an error occurred during request building, remove all HCI
62 * commands queued on the HCI request queue.
65 skb_queue_purge(&req->cmd_q);
69 /* Do not allow empty requests */
70 if (skb_queue_empty(&req->cmd_q))
73 skb = skb_peek_tail(&req->cmd_q);
75 bt_cb(skb)->hci.req_complete = complete;
76 } else if (complete_skb) {
77 bt_cb(skb)->hci.req_complete_skb = complete_skb;
78 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
90 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
92 return req_run(req, complete, NULL);
95 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
97 return req_run(req, NULL, complete);
100 void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 bt_dev_dbg(hdev, "result 0x%2.2x", result);
105 if (hdev->req_status == HCI_REQ_PEND) {
106 hdev->req_result = result;
107 hdev->req_status = HCI_REQ_DONE;
109 hdev->req_skb = skb_get(skb);
110 wake_up_interruptible(&hdev->req_wait_q);
114 /* Execute request and wait for completion. */
115 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
117 unsigned long opt, u32 timeout, u8 *hci_status)
119 struct hci_request req;
122 bt_dev_dbg(hdev, "start");
124 hci_req_init(&req, hdev);
126 hdev->req_status = HCI_REQ_PEND;
128 err = func(&req, opt);
131 *hci_status = HCI_ERROR_UNSPECIFIED;
135 err = hci_req_run_skb(&req, hci_req_sync_complete);
137 hdev->req_status = 0;
139 /* ENODATA means the HCI request command queue is empty.
140 * This can happen when a request with conditionals doesn't
141 * trigger any commands to be sent. This is normal behavior
142 * and should not trigger an error return.
144 if (err == -ENODATA) {
151 *hci_status = HCI_ERROR_UNSPECIFIED;
156 err = wait_event_interruptible_timeout(hdev->req_wait_q,
157 hdev->req_status != HCI_REQ_PEND, timeout);
159 if (err == -ERESTARTSYS)
162 switch (hdev->req_status) {
164 err = -bt_to_errno(hdev->req_result);
166 *hci_status = hdev->req_result;
169 case HCI_REQ_CANCELED:
170 err = -hdev->req_result;
172 *hci_status = HCI_ERROR_UNSPECIFIED;
178 *hci_status = HCI_ERROR_UNSPECIFIED;
182 kfree_skb(hdev->req_skb);
183 hdev->req_skb = NULL;
184 hdev->req_status = hdev->req_result = 0;
186 bt_dev_dbg(hdev, "end: err %d", err);
191 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
193 unsigned long opt, u32 timeout, u8 *hci_status)
197 /* Serialize all requests */
198 hci_req_sync_lock(hdev);
199 /* check the state after obtaing the lock to protect the HCI_UP
200 * against any races from hci_dev_do_close when the controller
203 if (test_bit(HCI_UP, &hdev->flags))
204 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
207 hci_req_sync_unlock(hdev);
212 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
215 int len = HCI_COMMAND_HDR_SIZE + plen;
216 struct hci_command_hdr *hdr;
219 skb = bt_skb_alloc(len, GFP_ATOMIC);
223 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
224 hdr->opcode = cpu_to_le16(opcode);
228 skb_put_data(skb, param, plen);
230 bt_dev_dbg(hdev, "skb len %d", skb->len);
232 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
233 hci_skb_opcode(skb) = opcode;
238 /* Queue a command to an asynchronous HCI request */
239 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
240 const void *param, u8 event)
242 struct hci_dev *hdev = req->hdev;
245 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
247 /* If an error occurred during request building, there is no point in
248 * queueing the HCI command. We can simply return.
253 skb = hci_prepare_cmd(hdev, opcode, plen, param);
255 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
261 if (skb_queue_empty(&req->cmd_q))
262 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
264 bt_cb(skb)->hci.req_event = event;
266 skb_queue_tail(&req->cmd_q, skb);
269 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
272 hci_req_add_ev(req, opcode, plen, param, 0);
275 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
277 struct hci_dev *hdev = req->hdev;
278 struct hci_cp_write_page_scan_activity acp;
281 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
284 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
288 type = PAGE_SCAN_TYPE_INTERLACED;
290 /* 160 msec page scan interval */
291 acp.interval = cpu_to_le16(0x0100);
293 type = hdev->def_page_scan_type;
294 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
297 acp.window = cpu_to_le16(hdev->def_page_scan_window);
299 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
300 __cpu_to_le16(hdev->page_scan_window) != acp.window)
301 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
304 if (hdev->page_scan_type != type)
305 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
308 static void start_interleave_scan(struct hci_dev *hdev)
310 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
311 queue_delayed_work(hdev->req_workqueue,
312 &hdev->interleave_scan, 0);
315 static bool is_interleave_scanning(struct hci_dev *hdev)
317 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
320 static void cancel_interleave_scan(struct hci_dev *hdev)
322 bt_dev_dbg(hdev, "cancelling interleave scan");
324 cancel_delayed_work_sync(&hdev->interleave_scan);
326 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
329 /* Return true if interleave_scan wasn't started until exiting this function,
330 * otherwise, return false
332 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
334 /* Do interleaved scan only if all of the following are true:
335 * - There is at least one ADV monitor
336 * - At least one pending LE connection or one device to be scanned for
337 * - Monitor offloading is not supported
338 * If so, we should alternate between allowlist scan and one without
339 * any filters to save power.
341 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
342 !(list_empty(&hdev->pend_le_conns) &&
343 list_empty(&hdev->pend_le_reports)) &&
344 hci_get_adv_monitor_offload_ext(hdev) ==
345 HCI_ADV_MONITOR_EXT_NONE;
346 bool is_interleaving = is_interleave_scanning(hdev);
348 if (use_interleaving && !is_interleaving) {
349 start_interleave_scan(hdev);
350 bt_dev_dbg(hdev, "starting interleave scan");
354 if (!use_interleaving && is_interleaving)
355 cancel_interleave_scan(hdev);
360 void __hci_req_update_name(struct hci_request *req)
362 struct hci_dev *hdev = req->hdev;
363 struct hci_cp_write_local_name cp;
365 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
367 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
370 void __hci_req_update_eir(struct hci_request *req)
372 struct hci_dev *hdev = req->hdev;
373 struct hci_cp_write_eir cp;
375 if (!hdev_is_powered(hdev))
378 if (!lmp_ext_inq_capable(hdev))
381 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
384 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
387 memset(&cp, 0, sizeof(cp));
389 eir_create(hdev, cp.data);
391 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
394 memcpy(hdev->eir, cp.data, sizeof(cp.data));
396 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
399 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
401 struct hci_dev *hdev = req->hdev;
403 if (hdev->scanning_paused) {
404 bt_dev_dbg(hdev, "Scanning is paused for suspend");
408 if (use_ext_scan(hdev)) {
409 struct hci_cp_le_set_ext_scan_enable cp;
411 memset(&cp, 0, sizeof(cp));
412 cp.enable = LE_SCAN_DISABLE;
413 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
416 struct hci_cp_le_set_scan_enable cp;
418 memset(&cp, 0, sizeof(cp));
419 cp.enable = LE_SCAN_DISABLE;
420 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
423 /* Disable address resolution */
424 if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
427 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
431 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
434 struct hci_cp_le_del_from_accept_list cp;
436 cp.bdaddr_type = bdaddr_type;
437 bacpy(&cp.bdaddr, bdaddr);
439 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
441 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
443 if (use_ll_privacy(req->hdev)) {
446 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
448 struct hci_cp_le_del_from_resolv_list cp;
450 cp.bdaddr_type = bdaddr_type;
451 bacpy(&cp.bdaddr, bdaddr);
453 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
459 /* Adds connection to accept list if needed. On error, returns -1. */
460 static int add_to_accept_list(struct hci_request *req,
461 struct hci_conn_params *params, u8 *num_entries,
464 struct hci_cp_le_add_to_accept_list cp;
465 struct hci_dev *hdev = req->hdev;
467 /* Already in accept list */
468 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr,
472 /* Select filter policy to accept all advertising */
473 if (*num_entries >= hdev->le_accept_list_size)
476 /* Accept list can not be used with RPAs */
478 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
479 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
483 /* During suspend, only wakeable devices can be in accept list */
484 if (hdev->suspended &&
485 !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags))
489 cp.bdaddr_type = params->addr_type;
490 bacpy(&cp.bdaddr, ¶ms->addr);
492 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
494 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
496 if (use_ll_privacy(hdev)) {
499 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
502 struct hci_cp_le_add_to_resolv_list cp;
504 cp.bdaddr_type = params->addr_type;
505 bacpy(&cp.bdaddr, ¶ms->addr);
506 memcpy(cp.peer_irk, irk->val, 16);
508 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
509 memcpy(cp.local_irk, hdev->irk, 16);
511 memset(cp.local_irk, 0, 16);
513 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
521 static u8 update_accept_list(struct hci_request *req)
523 struct hci_dev *hdev = req->hdev;
524 struct hci_conn_params *params;
525 struct bdaddr_list *b;
527 bool pend_conn, pend_report;
528 /* We allow usage of accept list even with RPAs in suspend. In the worst
529 * case, we won't be able to wake from devices that use the privacy1.2
530 * features. Additionally, once we support privacy1.2 and IRK
531 * offloading, we can update this to also check for those conditions.
533 bool allow_rpa = hdev->suspended;
535 if (use_ll_privacy(hdev))
538 /* Go through the current accept list programmed into the
539 * controller one by one and check if that address is still
540 * in the list of pending connections or list of devices to
541 * report. If not present in either list, then queue the
542 * command to remove it from the controller.
544 list_for_each_entry(b, &hdev->le_accept_list, list) {
545 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
548 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
552 /* If the device is not likely to connect or report,
553 * remove it from the accept list.
555 if (!pend_conn && !pend_report) {
556 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
560 /* Accept list can not be used with RPAs */
562 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
563 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
570 /* Since all no longer valid accept list entries have been
571 * removed, walk through the list of pending connections
572 * and ensure that any new device gets programmed into
575 * If the list of the devices is larger than the list of
576 * available accept list entries in the controller, then
577 * just abort and return filer policy value to not use the
580 list_for_each_entry(params, &hdev->pend_le_conns, action) {
581 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
585 /* After adding all new pending connections, walk through
586 * the list of pending reports and also add these to the
587 * accept list if there is still space. Abort if space runs out.
589 list_for_each_entry(params, &hdev->pend_le_reports, action) {
590 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
594 /* Use the allowlist unless the following conditions are all true:
595 * - We are not currently suspending
596 * - There are 1 or more ADV monitors registered and it's not offloaded
597 * - Interleaved scanning is not currently using the allowlist
599 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
600 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
601 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
604 /* Select filter policy to use accept list */
608 static bool scan_use_rpa(struct hci_dev *hdev)
610 return hci_dev_test_flag(hdev, HCI_PRIVACY);
613 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
614 u16 window, u8 own_addr_type, u8 filter_policy,
615 bool filter_dup, bool addr_resolv)
617 struct hci_dev *hdev = req->hdev;
619 if (hdev->scanning_paused) {
620 bt_dev_dbg(hdev, "Scanning is paused for suspend");
624 if (use_ll_privacy(hdev) && addr_resolv) {
627 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
630 /* Use ext scanning if set ext scan param and ext scan enable is
633 if (use_ext_scan(hdev)) {
634 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
635 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
636 struct hci_cp_le_scan_phy_params *phy_params;
637 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
640 ext_param_cp = (void *)data;
641 phy_params = (void *)ext_param_cp->data;
643 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
644 ext_param_cp->own_addr_type = own_addr_type;
645 ext_param_cp->filter_policy = filter_policy;
647 plen = sizeof(*ext_param_cp);
649 if (scan_1m(hdev) || scan_2m(hdev)) {
650 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
652 memset(phy_params, 0, sizeof(*phy_params));
653 phy_params->type = type;
654 phy_params->interval = cpu_to_le16(interval);
655 phy_params->window = cpu_to_le16(window);
657 plen += sizeof(*phy_params);
661 if (scan_coded(hdev)) {
662 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
664 memset(phy_params, 0, sizeof(*phy_params));
665 phy_params->type = type;
666 phy_params->interval = cpu_to_le16(interval);
667 phy_params->window = cpu_to_le16(window);
669 plen += sizeof(*phy_params);
673 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
676 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
677 ext_enable_cp.enable = LE_SCAN_ENABLE;
678 ext_enable_cp.filter_dup = filter_dup;
680 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
681 sizeof(ext_enable_cp), &ext_enable_cp);
683 struct hci_cp_le_set_scan_param param_cp;
684 struct hci_cp_le_set_scan_enable enable_cp;
686 memset(¶m_cp, 0, sizeof(param_cp));
687 param_cp.type = type;
688 param_cp.interval = cpu_to_le16(interval);
689 param_cp.window = cpu_to_le16(window);
690 param_cp.own_address_type = own_addr_type;
691 param_cp.filter_policy = filter_policy;
692 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
695 memset(&enable_cp, 0, sizeof(enable_cp));
696 enable_cp.enable = LE_SCAN_ENABLE;
697 enable_cp.filter_dup = filter_dup;
698 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
703 /* Returns true if an le connection is in the scanning state */
704 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
706 struct hci_conn_hash *h = &hdev->conn_hash;
711 list_for_each_entry_rcu(c, &h->list, list) {
712 if (c->type == LE_LINK && c->state == BT_CONNECT &&
713 test_bit(HCI_CONN_SCANNING, &c->flags)) {
724 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
725 * controller based address resolution to be able to reconfigure
728 void hci_req_add_le_passive_scan(struct hci_request *req)
730 struct hci_dev *hdev = req->hdev;
733 u16 window, interval;
734 /* Default is to enable duplicates filter */
735 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
736 /* Background scanning should run with address resolution */
737 bool addr_resolv = true;
739 if (hdev->scanning_paused) {
740 bt_dev_dbg(hdev, "Scanning is paused for suspend");
744 /* Set require_privacy to false since no SCAN_REQ are send
745 * during passive scanning. Not using an non-resolvable address
746 * here is important so that peer devices using direct
747 * advertising with our address will be correctly reported
750 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
754 if (hdev->enable_advmon_interleave_scan &&
755 __hci_update_interleaved_scan(hdev))
758 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
759 /* Adding or removing entries from the accept list must
760 * happen before enabling scanning. The controller does
761 * not allow accept list modification while scanning.
763 filter_policy = update_accept_list(req);
765 /* When the controller is using random resolvable addresses and
766 * with that having LE privacy enabled, then controllers with
767 * Extended Scanner Filter Policies support can now enable support
768 * for handling directed advertising.
770 * So instead of using filter polices 0x00 (no accept list)
771 * and 0x01 (accept list enabled) use the new filter policies
772 * 0x02 (no accept list) and 0x03 (accept list enabled).
774 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
775 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
776 filter_policy |= 0x02;
778 if (hdev->suspended) {
779 window = hdev->le_scan_window_suspend;
780 interval = hdev->le_scan_int_suspend;
781 } else if (hci_is_le_conn_scanning(hdev)) {
782 window = hdev->le_scan_window_connect;
783 interval = hdev->le_scan_int_connect;
784 } else if (hci_is_adv_monitoring(hdev)) {
785 window = hdev->le_scan_window_adv_monitor;
786 interval = hdev->le_scan_int_adv_monitor;
788 /* Disable duplicates filter when scanning for advertisement
789 * monitor for the following reasons.
791 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
792 * controllers ignore RSSI_Sampling_Period when the duplicates
795 * For SW pattern filtering, when we're not doing interleaved
796 * scanning, it is necessary to disable duplicates filter,
797 * otherwise hosts can only receive one advertisement and it's
798 * impossible to know if a peer is still in range.
800 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
802 window = hdev->le_scan_window;
803 interval = hdev->le_scan_interval;
806 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
808 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
809 own_addr_type, filter_policy, filter_dup,
813 static void cancel_adv_timeout(struct hci_dev *hdev)
815 if (hdev->adv_instance_timeout) {
816 hdev->adv_instance_timeout = 0;
817 cancel_delayed_work(&hdev->adv_instance_expire);
821 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
823 return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
826 void __hci_req_disable_advertising(struct hci_request *req)
828 if (ext_adv_capable(req->hdev)) {
829 __hci_req_disable_ext_adv_instance(req, 0x00);
834 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
838 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
840 /* If privacy is not enabled don't use RPA */
841 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
844 /* If basic privacy mode is enabled use RPA */
845 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
848 /* If limited privacy mode is enabled don't use RPA if we're
849 * both discoverable and bondable.
851 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
852 hci_dev_test_flag(hdev, HCI_BONDABLE))
855 /* We're neither bondable nor discoverable in the limited
856 * privacy mode, therefore use RPA.
861 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
863 /* If there is no connection we are OK to advertise. */
864 if (hci_conn_num(hdev, LE_LINK) == 0)
867 /* Check le_states if there is any connection in peripheral role. */
868 if (hdev->conn_hash.le_num_peripheral > 0) {
869 /* Peripheral connection state and non connectable mode bit 20.
871 if (!connectable && !(hdev->le_states[2] & 0x10))
874 /* Peripheral connection state and connectable mode bit 38
875 * and scannable bit 21.
877 if (connectable && (!(hdev->le_states[4] & 0x40) ||
878 !(hdev->le_states[2] & 0x20)))
882 /* Check le_states if there is any connection in central role. */
883 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
884 /* Central connection state and non connectable mode bit 18. */
885 if (!connectable && !(hdev->le_states[2] & 0x02))
888 /* Central connection state and connectable mode bit 35 and
891 if (connectable && (!(hdev->le_states[4] & 0x08) ||
892 !(hdev->le_states[2] & 0x08)))
899 void __hci_req_enable_advertising(struct hci_request *req)
901 struct hci_dev *hdev = req->hdev;
902 struct adv_info *adv;
903 struct hci_cp_le_set_adv_param cp;
904 u8 own_addr_type, enable = 0x01;
906 u16 adv_min_interval, adv_max_interval;
909 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
910 adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
912 /* If the "connectable" instance flag was not set, then choose between
913 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
915 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
916 mgmt_get_connectable(hdev);
918 if (!is_advertising_allowed(hdev, connectable))
921 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
922 __hci_req_disable_advertising(req);
924 /* Clear the HCI_LE_ADV bit temporarily so that the
925 * hci_update_random_address knows that it's safe to go ahead
926 * and write a new random address. The flag will be set back on
927 * as soon as the SET_ADV_ENABLE HCI command completes.
929 hci_dev_clear_flag(hdev, HCI_LE_ADV);
931 /* Set require_privacy to true only when non-connectable
932 * advertising is used. In that case it is fine to use a
933 * non-resolvable private address.
935 if (hci_update_random_address(req, !connectable,
936 adv_use_rpa(hdev, flags),
940 memset(&cp, 0, sizeof(cp));
943 adv_min_interval = adv->min_interval;
944 adv_max_interval = adv->max_interval;
946 adv_min_interval = hdev->le_adv_min_interval;
947 adv_max_interval = hdev->le_adv_max_interval;
951 cp.type = LE_ADV_IND;
953 if (adv_cur_instance_is_scannable(hdev))
954 cp.type = LE_ADV_SCAN_IND;
956 cp.type = LE_ADV_NONCONN_IND;
958 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
959 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
960 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
961 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
965 cp.min_interval = cpu_to_le16(adv_min_interval);
966 cp.max_interval = cpu_to_le16(adv_max_interval);
967 cp.own_address_type = own_addr_type;
968 cp.channel_map = hdev->le_adv_channel_map;
970 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
972 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
975 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
977 struct hci_dev *hdev = req->hdev;
980 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
983 if (ext_adv_capable(hdev)) {
985 struct hci_cp_le_set_ext_scan_rsp_data cp;
986 u8 data[HCI_MAX_EXT_AD_LENGTH];
989 memset(&pdu, 0, sizeof(pdu));
991 len = eir_create_scan_rsp(hdev, instance, pdu.data);
993 if (hdev->scan_rsp_data_len == len &&
994 !memcmp(pdu.data, hdev->scan_rsp_data, len))
997 memcpy(hdev->scan_rsp_data, pdu.data, len);
998 hdev->scan_rsp_data_len = len;
1000 pdu.cp.handle = instance;
1001 pdu.cp.length = len;
1002 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1003 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1005 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1006 sizeof(pdu.cp) + len, &pdu.cp);
1008 struct hci_cp_le_set_scan_rsp_data cp;
1010 memset(&cp, 0, sizeof(cp));
1012 len = eir_create_scan_rsp(hdev, instance, cp.data);
1014 if (hdev->scan_rsp_data_len == len &&
1015 !memcmp(cp.data, hdev->scan_rsp_data, len))
1018 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1019 hdev->scan_rsp_data_len = len;
1023 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1027 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1029 struct hci_dev *hdev = req->hdev;
1032 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1035 if (ext_adv_capable(hdev)) {
1037 struct hci_cp_le_set_ext_adv_data cp;
1038 u8 data[HCI_MAX_EXT_AD_LENGTH];
1041 memset(&pdu, 0, sizeof(pdu));
1043 len = eir_create_adv_data(hdev, instance, pdu.data);
1045 /* There's nothing to do if the data hasn't changed */
1046 if (hdev->adv_data_len == len &&
1047 memcmp(pdu.data, hdev->adv_data, len) == 0)
1050 memcpy(hdev->adv_data, pdu.data, len);
1051 hdev->adv_data_len = len;
1053 pdu.cp.length = len;
1054 pdu.cp.handle = instance;
1055 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1056 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1058 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1059 sizeof(pdu.cp) + len, &pdu.cp);
1061 struct hci_cp_le_set_adv_data cp;
1063 memset(&cp, 0, sizeof(cp));
1065 len = eir_create_adv_data(hdev, instance, cp.data);
1067 /* There's nothing to do if the data hasn't changed */
1068 if (hdev->adv_data_len == len &&
1069 memcmp(cp.data, hdev->adv_data, len) == 0)
1072 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1073 hdev->adv_data_len = len;
1077 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1081 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1083 struct hci_request req;
1085 hci_req_init(&req, hdev);
1086 __hci_req_update_adv_data(&req, instance);
1088 return hci_req_run(&req, NULL);
1091 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1094 BT_DBG("%s status %u", hdev->name, status);
1097 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1099 struct hci_request req;
1102 if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1105 hci_req_init(&req, hdev);
1107 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1109 hci_req_run(&req, enable_addr_resolution_complete);
1112 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1114 bt_dev_dbg(hdev, "status %u", status);
1117 void hci_req_reenable_advertising(struct hci_dev *hdev)
1119 struct hci_request req;
1121 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1122 list_empty(&hdev->adv_instances))
1125 hci_req_init(&req, hdev);
1127 if (hdev->cur_adv_instance) {
1128 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1131 if (ext_adv_capable(hdev)) {
1132 __hci_req_start_ext_adv(&req, 0x00);
1134 __hci_req_update_adv_data(&req, 0x00);
1135 __hci_req_update_scan_rsp_data(&req, 0x00);
1136 __hci_req_enable_advertising(&req);
1140 hci_req_run(&req, adv_enable_complete);
1143 static void adv_timeout_expire(struct work_struct *work)
1145 struct hci_dev *hdev = container_of(work, struct hci_dev,
1146 adv_instance_expire.work);
1148 struct hci_request req;
1151 bt_dev_dbg(hdev, "");
1155 hdev->adv_instance_timeout = 0;
1157 instance = hdev->cur_adv_instance;
1158 if (instance == 0x00)
1161 hci_req_init(&req, hdev);
1163 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1165 if (list_empty(&hdev->adv_instances))
1166 __hci_req_disable_advertising(&req);
1168 hci_req_run(&req, NULL);
1171 hci_dev_unlock(hdev);
1174 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1177 struct hci_dev *hdev = req->hdev;
1182 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1183 hci_req_add_le_scan_disable(req, false);
1184 hci_req_add_le_passive_scan(req);
1186 switch (hdev->interleave_scan_state) {
1187 case INTERLEAVE_SCAN_ALLOWLIST:
1188 bt_dev_dbg(hdev, "next state: allowlist");
1189 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1191 case INTERLEAVE_SCAN_NO_FILTER:
1192 bt_dev_dbg(hdev, "next state: no filter");
1193 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1195 case INTERLEAVE_SCAN_NONE:
1196 BT_ERR("unexpected error");
1200 hci_dev_unlock(hdev);
1205 static void interleave_scan_work(struct work_struct *work)
1207 struct hci_dev *hdev = container_of(work, struct hci_dev,
1208 interleave_scan.work);
1210 unsigned long timeout;
1212 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1213 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1214 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1215 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1217 bt_dev_err(hdev, "unexpected error");
1221 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1222 HCI_CMD_TIMEOUT, &status);
1224 /* Don't continue interleaving if it was canceled */
1225 if (is_interleave_scanning(hdev))
1226 queue_delayed_work(hdev->req_workqueue,
1227 &hdev->interleave_scan, timeout);
1230 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1231 bool use_rpa, struct adv_info *adv_instance,
1232 u8 *own_addr_type, bdaddr_t *rand_addr)
1236 bacpy(rand_addr, BDADDR_ANY);
1238 /* If privacy is enabled use a resolvable private address. If
1239 * current RPA has expired then generate a new one.
1242 /* If Controller supports LL Privacy use own address type is
1245 if (use_ll_privacy(hdev))
1246 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1248 *own_addr_type = ADDR_LE_DEV_RANDOM;
1251 if (adv_rpa_valid(adv_instance))
1254 if (rpa_valid(hdev))
1258 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1260 bt_dev_err(hdev, "failed to generate new RPA");
1264 bacpy(rand_addr, &hdev->rpa);
1269 /* In case of required privacy without resolvable private address,
1270 * use an non-resolvable private address. This is useful for
1271 * non-connectable advertising.
1273 if (require_privacy) {
1277 /* The non-resolvable private address is generated
1278 * from random six bytes with the two most significant
1281 get_random_bytes(&nrpa, 6);
1284 /* The non-resolvable private address shall not be
1285 * equal to the public address.
1287 if (bacmp(&hdev->bdaddr, &nrpa))
1291 *own_addr_type = ADDR_LE_DEV_RANDOM;
1292 bacpy(rand_addr, &nrpa);
1297 /* No privacy so use a public address. */
1298 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1303 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1305 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1308 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1310 struct hci_dev *hdev = req->hdev;
1312 /* If we're advertising or initiating an LE connection we can't
1313 * go ahead and change the random address at this time. This is
1314 * because the eventual initiator address used for the
1315 * subsequently created connection will be undefined (some
1316 * controllers use the new address and others the one we had
1317 * when the operation started).
1319 * In this kind of scenario skip the update and let the random
1320 * address be updated at the next cycle.
1322 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1323 hci_lookup_le_connect(hdev)) {
1324 bt_dev_dbg(hdev, "Deferring random address update");
1325 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1329 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1332 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1334 struct hci_cp_le_set_ext_adv_params cp;
1335 struct hci_dev *hdev = req->hdev;
1338 bdaddr_t random_addr;
1341 struct adv_info *adv_instance;
1345 adv_instance = hci_find_adv_instance(hdev, instance);
1349 adv_instance = NULL;
1352 flags = hci_adv_instance_flags(hdev, instance);
1354 /* If the "connectable" instance flag was not set, then choose between
1355 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1357 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1358 mgmt_get_connectable(hdev);
1360 if (!is_advertising_allowed(hdev, connectable))
1363 /* Set require_privacy to true only when non-connectable
1364 * advertising is used. In that case it is fine to use a
1365 * non-resolvable private address.
1367 err = hci_get_random_address(hdev, !connectable,
1368 adv_use_rpa(hdev, flags), adv_instance,
1369 &own_addr_type, &random_addr);
1373 memset(&cp, 0, sizeof(cp));
1376 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
1377 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
1378 cp.tx_power = adv_instance->tx_power;
1380 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1381 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1382 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1385 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1389 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1391 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1392 } else if (hci_adv_instance_is_scannable(hdev, instance) ||
1393 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1395 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1397 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1400 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1402 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1405 cp.own_addr_type = own_addr_type;
1406 cp.channel_map = hdev->le_adv_channel_map;
1407 cp.handle = instance;
1409 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1410 cp.primary_phy = HCI_ADV_PHY_1M;
1411 cp.secondary_phy = HCI_ADV_PHY_2M;
1412 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1413 cp.primary_phy = HCI_ADV_PHY_CODED;
1414 cp.secondary_phy = HCI_ADV_PHY_CODED;
1416 /* In all other cases use 1M */
1417 cp.primary_phy = HCI_ADV_PHY_1M;
1418 cp.secondary_phy = HCI_ADV_PHY_1M;
1421 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1423 if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1424 own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1425 bacmp(&random_addr, BDADDR_ANY)) {
1426 struct hci_cp_le_set_adv_set_rand_addr cp;
1428 /* Check if random address need to be updated */
1430 if (!bacmp(&random_addr, &adv_instance->random_addr))
1433 if (!bacmp(&random_addr, &hdev->random_addr))
1435 /* Instance 0x00 doesn't have an adv_info, instead it
1436 * uses hdev->random_addr to track its address so
1437 * whenever it needs to be updated this also set the
1438 * random address since hdev->random_addr is shared with
1439 * scan state machine.
1441 set_random_addr(req, &random_addr);
1444 memset(&cp, 0, sizeof(cp));
1446 cp.handle = instance;
1447 bacpy(&cp.bdaddr, &random_addr);
1450 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1457 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1459 struct hci_dev *hdev = req->hdev;
1460 struct hci_cp_le_set_ext_adv_enable *cp;
1461 struct hci_cp_ext_adv_set *adv_set;
1462 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1463 struct adv_info *adv_instance;
1466 adv_instance = hci_find_adv_instance(hdev, instance);
1470 adv_instance = NULL;
1474 adv_set = (void *) cp->data;
1476 memset(cp, 0, sizeof(*cp));
1479 cp->num_of_sets = 0x01;
1481 memset(adv_set, 0, sizeof(*adv_set));
1483 adv_set->handle = instance;
1485 /* Set duration per instance since controller is responsible for
1488 if (adv_instance && adv_instance->duration) {
1489 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1491 /* Time = N * 10 ms */
1492 adv_set->duration = cpu_to_le16(duration / 10);
1495 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1496 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1502 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1504 struct hci_dev *hdev = req->hdev;
1505 struct hci_cp_le_set_ext_adv_enable *cp;
1506 struct hci_cp_ext_adv_set *adv_set;
1507 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1510 /* If request specifies an instance that doesn't exist, fail */
1511 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1514 memset(data, 0, sizeof(data));
1517 adv_set = (void *)cp->data;
1519 /* Instance 0x00 indicates all advertising instances will be disabled */
1520 cp->num_of_sets = !!instance;
1523 adv_set->handle = instance;
1525 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1526 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1531 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1533 struct hci_dev *hdev = req->hdev;
1535 /* If request specifies an instance that doesn't exist, fail */
1536 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1539 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1544 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1546 struct hci_dev *hdev = req->hdev;
1547 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
1550 /* If instance isn't pending, the chip knows about it, and it's safe to
1553 if (adv_instance && !adv_instance->pending)
1554 __hci_req_disable_ext_adv_instance(req, instance);
1556 err = __hci_req_setup_ext_adv_instance(req, instance);
1560 __hci_req_update_scan_rsp_data(req, instance);
1561 __hci_req_enable_ext_advertising(req, instance);
1566 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1569 struct hci_dev *hdev = req->hdev;
1570 struct adv_info *adv_instance = NULL;
1573 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1574 list_empty(&hdev->adv_instances))
1577 if (hdev->adv_instance_timeout)
1580 adv_instance = hci_find_adv_instance(hdev, instance);
1584 /* A zero timeout means unlimited advertising. As long as there is
1585 * only one instance, duration should be ignored. We still set a timeout
1586 * in case further instances are being added later on.
1588 * If the remaining lifetime of the instance is more than the duration
1589 * then the timeout corresponds to the duration, otherwise it will be
1590 * reduced to the remaining instance lifetime.
1592 if (adv_instance->timeout == 0 ||
1593 adv_instance->duration <= adv_instance->remaining_time)
1594 timeout = adv_instance->duration;
1596 timeout = adv_instance->remaining_time;
1598 /* The remaining time is being reduced unless the instance is being
1599 * advertised without time limit.
1601 if (adv_instance->timeout)
1602 adv_instance->remaining_time =
1603 adv_instance->remaining_time - timeout;
1605 /* Only use work for scheduling instances with legacy advertising */
1606 if (!ext_adv_capable(hdev)) {
1607 hdev->adv_instance_timeout = timeout;
1608 queue_delayed_work(hdev->req_workqueue,
1609 &hdev->adv_instance_expire,
1610 msecs_to_jiffies(timeout * 1000));
1613 /* If we're just re-scheduling the same instance again then do not
1614 * execute any HCI commands. This happens when a single instance is
1617 if (!force && hdev->cur_adv_instance == instance &&
1618 hci_dev_test_flag(hdev, HCI_LE_ADV))
1621 hdev->cur_adv_instance = instance;
1622 if (ext_adv_capable(hdev)) {
1623 __hci_req_start_ext_adv(req, instance);
1625 __hci_req_update_adv_data(req, instance);
1626 __hci_req_update_scan_rsp_data(req, instance);
1627 __hci_req_enable_advertising(req);
1633 /* For a single instance:
1634 * - force == true: The instance will be removed even when its remaining
1635 * lifetime is not zero.
1636 * - force == false: the instance will be deactivated but kept stored unless
1637 * the remaining lifetime is zero.
1639 * For instance == 0x00:
1640 * - force == true: All instances will be removed regardless of their timeout
1642 * - force == false: Only instances that have a timeout will be removed.
1644 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1645 struct hci_request *req, u8 instance,
1648 struct adv_info *adv_instance, *n, *next_instance = NULL;
1652 /* Cancel any timeout concerning the removed instance(s). */
1653 if (!instance || hdev->cur_adv_instance == instance)
1654 cancel_adv_timeout(hdev);
1656 /* Get the next instance to advertise BEFORE we remove
1657 * the current one. This can be the same instance again
1658 * if there is only one instance.
1660 if (instance && hdev->cur_adv_instance == instance)
1661 next_instance = hci_get_next_instance(hdev, instance);
1663 if (instance == 0x00) {
1664 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1666 if (!(force || adv_instance->timeout))
1669 rem_inst = adv_instance->instance;
1670 err = hci_remove_adv_instance(hdev, rem_inst);
1672 mgmt_advertising_removed(sk, hdev, rem_inst);
1675 adv_instance = hci_find_adv_instance(hdev, instance);
1677 if (force || (adv_instance && adv_instance->timeout &&
1678 !adv_instance->remaining_time)) {
1679 /* Don't advertise a removed instance. */
1680 if (next_instance &&
1681 next_instance->instance == instance)
1682 next_instance = NULL;
1684 err = hci_remove_adv_instance(hdev, instance);
1686 mgmt_advertising_removed(sk, hdev, instance);
1690 if (!req || !hdev_is_powered(hdev) ||
1691 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1694 if (next_instance && !ext_adv_capable(hdev))
1695 __hci_req_schedule_adv_instance(req, next_instance->instance,
1699 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1700 bool use_rpa, u8 *own_addr_type)
1702 struct hci_dev *hdev = req->hdev;
1705 /* If privacy is enabled use a resolvable private address. If
1706 * current RPA has expired or there is something else than
1707 * the current RPA in use, then generate a new one.
1710 /* If Controller supports LL Privacy use own address type is
1713 if (use_ll_privacy(hdev))
1714 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1716 *own_addr_type = ADDR_LE_DEV_RANDOM;
1718 if (rpa_valid(hdev))
1721 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1723 bt_dev_err(hdev, "failed to generate new RPA");
1727 set_random_addr(req, &hdev->rpa);
1732 /* In case of required privacy without resolvable private address,
1733 * use an non-resolvable private address. This is useful for active
1734 * scanning and non-connectable advertising.
1736 if (require_privacy) {
1740 /* The non-resolvable private address is generated
1741 * from random six bytes with the two most significant
1744 get_random_bytes(&nrpa, 6);
1747 /* The non-resolvable private address shall not be
1748 * equal to the public address.
1750 if (bacmp(&hdev->bdaddr, &nrpa))
1754 *own_addr_type = ADDR_LE_DEV_RANDOM;
1755 set_random_addr(req, &nrpa);
1759 /* If forcing static address is in use or there is no public
1760 * address use the static address as random address (but skip
1761 * the HCI command if the current random address is already the
1764 * In case BR/EDR has been disabled on a dual-mode controller
1765 * and a static address has been configured, then use that
1766 * address instead of the public BR/EDR address.
1768 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1769 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1770 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1771 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1772 *own_addr_type = ADDR_LE_DEV_RANDOM;
1773 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1774 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1775 &hdev->static_addr);
1779 /* Neither privacy nor static address is being used so use a
1782 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1787 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
1789 struct bdaddr_list *b;
1791 list_for_each_entry(b, &hdev->accept_list, list) {
1792 struct hci_conn *conn;
1794 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1798 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1805 void __hci_req_update_scan(struct hci_request *req)
1807 struct hci_dev *hdev = req->hdev;
1810 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1813 if (!hdev_is_powered(hdev))
1816 if (mgmt_powering_down(hdev))
1819 if (hdev->scanning_paused)
1822 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1823 disconnected_accept_list_entries(hdev))
1826 scan = SCAN_DISABLED;
1828 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1829 scan |= SCAN_INQUIRY;
1831 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1832 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1835 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1838 static int update_scan(struct hci_request *req, unsigned long opt)
1840 hci_dev_lock(req->hdev);
1841 __hci_req_update_scan(req);
1842 hci_dev_unlock(req->hdev);
1846 static void scan_update_work(struct work_struct *work)
1848 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1850 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1853 static u8 get_service_classes(struct hci_dev *hdev)
1855 struct bt_uuid *uuid;
1858 list_for_each_entry(uuid, &hdev->uuids, list)
1859 val |= uuid->svc_hint;
1864 void __hci_req_update_class(struct hci_request *req)
1866 struct hci_dev *hdev = req->hdev;
1869 bt_dev_dbg(hdev, "");
1871 if (!hdev_is_powered(hdev))
1874 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1877 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1880 cod[0] = hdev->minor_class;
1881 cod[1] = hdev->major_class;
1882 cod[2] = get_service_classes(hdev);
1884 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1887 if (memcmp(cod, hdev->dev_class, 3) == 0)
1890 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1893 static void write_iac(struct hci_request *req)
1895 struct hci_dev *hdev = req->hdev;
1896 struct hci_cp_write_current_iac_lap cp;
1898 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1901 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1902 /* Limited discoverable mode */
1903 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1904 cp.iac_lap[0] = 0x00; /* LIAC */
1905 cp.iac_lap[1] = 0x8b;
1906 cp.iac_lap[2] = 0x9e;
1907 cp.iac_lap[3] = 0x33; /* GIAC */
1908 cp.iac_lap[4] = 0x8b;
1909 cp.iac_lap[5] = 0x9e;
1911 /* General discoverable mode */
1913 cp.iac_lap[0] = 0x33; /* GIAC */
1914 cp.iac_lap[1] = 0x8b;
1915 cp.iac_lap[2] = 0x9e;
1918 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1919 (cp.num_iac * 3) + 1, &cp);
1922 static int discoverable_update(struct hci_request *req, unsigned long opt)
1924 struct hci_dev *hdev = req->hdev;
1928 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1930 __hci_req_update_scan(req);
1931 __hci_req_update_class(req);
1934 /* Advertising instances don't use the global discoverable setting, so
1935 * only update AD if advertising was enabled using Set Advertising.
1937 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1938 __hci_req_update_adv_data(req, 0x00);
1940 /* Discoverable mode affects the local advertising
1941 * address in limited privacy mode.
1943 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
1944 if (ext_adv_capable(hdev))
1945 __hci_req_start_ext_adv(req, 0x00);
1947 __hci_req_enable_advertising(req);
1951 hci_dev_unlock(hdev);
1956 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1959 switch (conn->state) {
1962 if (conn->type == AMP_LINK) {
1963 struct hci_cp_disconn_phy_link cp;
1965 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1967 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1970 struct hci_cp_disconnect dc;
1972 dc.handle = cpu_to_le16(conn->handle);
1974 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1977 conn->state = BT_DISCONN;
1981 if (conn->type == LE_LINK) {
1982 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1984 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1986 } else if (conn->type == ACL_LINK) {
1987 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1989 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1994 if (conn->type == ACL_LINK) {
1995 struct hci_cp_reject_conn_req rej;
1997 bacpy(&rej.bdaddr, &conn->dst);
1998 rej.reason = reason;
2000 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2002 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2003 struct hci_cp_reject_sync_conn_req rej;
2005 bacpy(&rej.bdaddr, &conn->dst);
2007 /* SCO rejection has its own limited set of
2008 * allowed error values (0x0D-0x0F) which isn't
2009 * compatible with most values passed to this
2010 * function. To be safe hard-code one of the
2011 * values that's suitable for SCO.
2013 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2015 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2020 conn->state = BT_CLOSED;
2025 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2028 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2031 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2033 struct hci_request req;
2036 hci_req_init(&req, conn->hdev);
2038 __hci_abort_conn(&req, conn, reason);
2040 err = hci_req_run(&req, abort_conn_complete);
2041 if (err && err != -ENODATA) {
2042 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2049 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2051 hci_req_add_le_scan_disable(req, false);
2055 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2058 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2059 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2060 struct hci_cp_inquiry cp;
2062 if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2065 bt_dev_dbg(req->hdev, "");
2067 hci_dev_lock(req->hdev);
2068 hci_inquiry_cache_flush(req->hdev);
2069 hci_dev_unlock(req->hdev);
2071 memset(&cp, 0, sizeof(cp));
2073 if (req->hdev->discovery.limited)
2074 memcpy(&cp.lap, liac, sizeof(cp.lap));
2076 memcpy(&cp.lap, giac, sizeof(cp.lap));
2080 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2085 static void le_scan_disable_work(struct work_struct *work)
2087 struct hci_dev *hdev = container_of(work, struct hci_dev,
2088 le_scan_disable.work);
2091 bt_dev_dbg(hdev, "");
2093 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2096 cancel_delayed_work(&hdev->le_scan_restart);
2098 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2100 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2105 hdev->discovery.scan_start = 0;
2107 /* If we were running LE only scan, change discovery state. If
2108 * we were running both LE and BR/EDR inquiry simultaneously,
2109 * and BR/EDR inquiry is already finished, stop discovery,
2110 * otherwise BR/EDR inquiry will stop discovery when finished.
2111 * If we will resolve remote device name, do not change
2115 if (hdev->discovery.type == DISCOV_TYPE_LE)
2116 goto discov_stopped;
2118 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2121 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2122 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2123 hdev->discovery.state != DISCOVERY_RESOLVING)
2124 goto discov_stopped;
2129 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2130 HCI_CMD_TIMEOUT, &status);
2132 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2133 goto discov_stopped;
2140 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2141 hci_dev_unlock(hdev);
2144 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2146 struct hci_dev *hdev = req->hdev;
2148 /* If controller is not scanning we are done. */
2149 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2152 if (hdev->scanning_paused) {
2153 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2157 hci_req_add_le_scan_disable(req, false);
2159 if (use_ext_scan(hdev)) {
2160 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2162 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2163 ext_enable_cp.enable = LE_SCAN_ENABLE;
2164 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2166 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2167 sizeof(ext_enable_cp), &ext_enable_cp);
2169 struct hci_cp_le_set_scan_enable cp;
2171 memset(&cp, 0, sizeof(cp));
2172 cp.enable = LE_SCAN_ENABLE;
2173 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2174 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2180 static void le_scan_restart_work(struct work_struct *work)
2182 struct hci_dev *hdev = container_of(work, struct hci_dev,
2183 le_scan_restart.work);
2184 unsigned long timeout, duration, scan_start, now;
2187 bt_dev_dbg(hdev, "");
2189 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2191 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2198 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2199 !hdev->discovery.scan_start)
2202 /* When the scan was started, hdev->le_scan_disable has been queued
2203 * after duration from scan_start. During scan restart this job
2204 * has been canceled, and we need to queue it again after proper
2205 * timeout, to make sure that scan does not run indefinitely.
2207 duration = hdev->discovery.scan_duration;
2208 scan_start = hdev->discovery.scan_start;
2210 if (now - scan_start <= duration) {
2213 if (now >= scan_start)
2214 elapsed = now - scan_start;
2216 elapsed = ULONG_MAX - scan_start + now;
2218 timeout = duration - elapsed;
2223 queue_delayed_work(hdev->req_workqueue,
2224 &hdev->le_scan_disable, timeout);
2227 hci_dev_unlock(hdev);
2230 static int active_scan(struct hci_request *req, unsigned long opt)
2232 uint16_t interval = opt;
2233 struct hci_dev *hdev = req->hdev;
2235 /* Accept list is not used for discovery */
2236 u8 filter_policy = 0x00;
2237 /* Default is to enable duplicates filter */
2238 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2239 /* Discovery doesn't require controller address resolution */
2240 bool addr_resolv = false;
2243 bt_dev_dbg(hdev, "");
2245 /* If controller is scanning, it means the background scanning is
2246 * running. Thus, we should temporarily stop it in order to set the
2247 * discovery scanning parameters.
2249 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2250 hci_req_add_le_scan_disable(req, false);
2251 cancel_interleave_scan(hdev);
2254 /* All active scans will be done with either a resolvable private
2255 * address (when privacy feature has been enabled) or non-resolvable
2258 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2261 own_addr_type = ADDR_LE_DEV_PUBLIC;
2263 if (hci_is_adv_monitoring(hdev)) {
2264 /* Duplicate filter should be disabled when some advertisement
2265 * monitor is activated, otherwise AdvMon can only receive one
2266 * advertisement for one peer(*) during active scanning, and
2267 * might report loss to these peers.
2269 * Note that different controllers have different meanings of
2270 * |duplicate|. Some of them consider packets with the same
2271 * address as duplicate, and others consider packets with the
2272 * same address and the same RSSI as duplicate. Although in the
2273 * latter case we don't need to disable duplicate filter, but
2274 * it is common to have active scanning for a short period of
2275 * time, the power impact should be neglectable.
2277 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2280 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2281 hdev->le_scan_window_discovery, own_addr_type,
2282 filter_policy, filter_dup, addr_resolv);
2286 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2290 bt_dev_dbg(req->hdev, "");
2292 err = active_scan(req, opt);
2296 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2299 static void start_discovery(struct hci_dev *hdev, u8 *status)
2301 unsigned long timeout;
2303 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
2305 switch (hdev->discovery.type) {
2306 case DISCOV_TYPE_BREDR:
2307 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2308 hci_req_sync(hdev, bredr_inquiry,
2309 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2312 case DISCOV_TYPE_INTERLEAVED:
2313 /* When running simultaneous discovery, the LE scanning time
2314 * should occupy the whole discovery time sine BR/EDR inquiry
2315 * and LE scanning are scheduled by the controller.
2317 * For interleaving discovery in comparison, BR/EDR inquiry
2318 * and LE scanning are done sequentially with separate
2321 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2323 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2324 /* During simultaneous discovery, we double LE scan
2325 * interval. We must leave some time for the controller
2326 * to do BR/EDR inquiry.
2328 hci_req_sync(hdev, interleaved_discov,
2329 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
2334 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2335 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2336 HCI_CMD_TIMEOUT, status);
2338 case DISCOV_TYPE_LE:
2339 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2340 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2341 HCI_CMD_TIMEOUT, status);
2344 *status = HCI_ERROR_UNSPECIFIED;
2351 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
2353 /* When service discovery is used and the controller has a
2354 * strict duplicate filter, it is important to remember the
2355 * start and duration of the scan. This is required for
2356 * restarting scanning during the discovery phase.
2358 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2359 hdev->discovery.result_filtering) {
2360 hdev->discovery.scan_start = jiffies;
2361 hdev->discovery.scan_duration = timeout;
2364 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2368 bool hci_req_stop_discovery(struct hci_request *req)
2370 struct hci_dev *hdev = req->hdev;
2371 struct discovery_state *d = &hdev->discovery;
2372 struct hci_cp_remote_name_req_cancel cp;
2373 struct inquiry_entry *e;
2376 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
2378 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2379 if (test_bit(HCI_INQUIRY, &hdev->flags))
2380 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2382 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2383 cancel_delayed_work(&hdev->le_scan_disable);
2384 cancel_delayed_work(&hdev->le_scan_restart);
2385 hci_req_add_le_scan_disable(req, false);
2390 /* Passive scanning */
2391 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2392 hci_req_add_le_scan_disable(req, false);
2397 /* No further actions needed for LE-only discovery */
2398 if (d->type == DISCOV_TYPE_LE)
2401 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2402 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2407 bacpy(&cp.bdaddr, &e->data.bdaddr);
2408 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2416 static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2419 bt_dev_dbg(hdev, "status %u", status);
2422 int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2424 struct hci_request req;
2426 __u8 vnd_len, *vnd_data = NULL;
2427 struct hci_op_configure_data_path *cmd = NULL;
2429 hci_req_init(&req, hdev);
2431 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2436 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2442 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2446 cmd->vnd_len = vnd_len;
2447 memcpy(cmd->vnd_data, vnd_data, vnd_len);
2449 cmd->direction = 0x00;
2450 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2452 cmd->direction = 0x01;
2453 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2455 err = hci_req_run(&req, config_data_path_complete);
2463 static int stop_discovery(struct hci_request *req, unsigned long opt)
2465 hci_dev_lock(req->hdev);
2466 hci_req_stop_discovery(req);
2467 hci_dev_unlock(req->hdev);
2472 static void discov_update(struct work_struct *work)
2474 struct hci_dev *hdev = container_of(work, struct hci_dev,
2478 switch (hdev->discovery.state) {
2479 case DISCOVERY_STARTING:
2480 start_discovery(hdev, &status);
2481 mgmt_start_discovery_complete(hdev, status);
2483 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2485 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2487 case DISCOVERY_STOPPING:
2488 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2489 mgmt_stop_discovery_complete(hdev, status);
2491 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2493 case DISCOVERY_STOPPED:
2499 static void discov_off(struct work_struct *work)
2501 struct hci_dev *hdev = container_of(work, struct hci_dev,
2504 bt_dev_dbg(hdev, "");
2508 /* When discoverable timeout triggers, then just make sure
2509 * the limited discoverable flag is cleared. Even in the case
2510 * of a timeout triggered from general discoverable, it is
2511 * safe to unconditionally clear the flag.
2513 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2514 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2515 hdev->discov_timeout = 0;
2517 hci_dev_unlock(hdev);
2519 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2520 mgmt_new_settings(hdev);
2523 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2525 struct hci_dev *hdev = req->hdev;
2530 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2531 !lmp_host_ssp_capable(hdev)) {
2534 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2536 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2539 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2540 sizeof(support), &support);
2544 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2545 lmp_bredr_capable(hdev)) {
2546 struct hci_cp_write_le_host_supported cp;
2551 /* Check first if we already have the right
2552 * host state (host features set)
2554 if (cp.le != lmp_host_le_capable(hdev) ||
2555 cp.simul != lmp_host_le_br_capable(hdev))
2556 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2560 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2561 /* Make sure the controller has a good default for
2562 * advertising data. This also applies to the case
2563 * where BR/EDR was toggled during the AUTO_OFF phase.
2565 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2566 list_empty(&hdev->adv_instances)) {
2569 if (ext_adv_capable(hdev)) {
2570 err = __hci_req_setup_ext_adv_instance(req,
2573 __hci_req_update_scan_rsp_data(req,
2577 __hci_req_update_adv_data(req, 0x00);
2578 __hci_req_update_scan_rsp_data(req, 0x00);
2581 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2582 if (!ext_adv_capable(hdev))
2583 __hci_req_enable_advertising(req);
2585 __hci_req_enable_ext_advertising(req,
2588 } else if (!list_empty(&hdev->adv_instances)) {
2589 struct adv_info *adv_instance;
2591 adv_instance = list_first_entry(&hdev->adv_instances,
2592 struct adv_info, list);
2593 __hci_req_schedule_adv_instance(req,
2594 adv_instance->instance,
2599 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2600 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2601 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2602 sizeof(link_sec), &link_sec);
2604 if (lmp_bredr_capable(hdev)) {
2605 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2606 __hci_req_write_fast_connectable(req, true);
2608 __hci_req_write_fast_connectable(req, false);
2609 __hci_req_update_scan(req);
2610 __hci_req_update_class(req);
2611 __hci_req_update_name(req);
2612 __hci_req_update_eir(req);
2615 hci_dev_unlock(hdev);
2619 int __hci_req_hci_power_on(struct hci_dev *hdev)
2621 /* Register the available SMP channels (BR/EDR and LE) only when
2622 * successfully powering on the controller. This late
2623 * registration is required so that LE SMP can clearly decide if
2624 * the public address or static address is used.
2628 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2632 void hci_request_setup(struct hci_dev *hdev)
2634 INIT_WORK(&hdev->discov_update, discov_update);
2635 INIT_WORK(&hdev->scan_update, scan_update_work);
2636 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2637 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2638 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2639 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2640 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
2643 void hci_request_cancel_all(struct hci_dev *hdev)
2645 __hci_cmd_sync_cancel(hdev, ENODEV);
2647 cancel_work_sync(&hdev->discov_update);
2648 cancel_work_sync(&hdev->scan_update);
2649 cancel_delayed_work_sync(&hdev->discov_off);
2650 cancel_delayed_work_sync(&hdev->le_scan_disable);
2651 cancel_delayed_work_sync(&hdev->le_scan_restart);
2653 if (hdev->adv_instance_timeout) {
2654 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2655 hdev->adv_instance_timeout = 0;
2658 cancel_interleave_scan(hdev);