8b8f22f37226ba8a313c214c3b5839aaf4a31cad
[platform/kernel/linux-rpi.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33
34 #define HCI_REQ_DONE      0
35 #define HCI_REQ_PEND      1
36 #define HCI_REQ_CANCELED  2
37
38 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 {
40         skb_queue_head_init(&req->cmd_q);
41         req->hdev = hdev;
42         req->err = 0;
43 }
44
45 void hci_req_purge(struct hci_request *req)
46 {
47         skb_queue_purge(&req->cmd_q);
48 }
49
50 bool hci_req_status_pend(struct hci_dev *hdev)
51 {
52         return hdev->req_status == HCI_REQ_PEND;
53 }
54
55 static int req_run(struct hci_request *req, hci_req_complete_t complete,
56                    hci_req_complete_skb_t complete_skb)
57 {
58         struct hci_dev *hdev = req->hdev;
59         struct sk_buff *skb;
60         unsigned long flags;
61
62         bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
63
64         /* If an error occurred during request building, remove all HCI
65          * commands queued on the HCI request queue.
66          */
67         if (req->err) {
68                 skb_queue_purge(&req->cmd_q);
69                 return req->err;
70         }
71
72         /* Do not allow empty requests */
73         if (skb_queue_empty(&req->cmd_q))
74                 return -ENODATA;
75
76         skb = skb_peek_tail(&req->cmd_q);
77         if (complete) {
78                 bt_cb(skb)->hci.req_complete = complete;
79         } else if (complete_skb) {
80                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
81                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
82         }
83
84         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87
88         queue_work(hdev->workqueue, &hdev->cmd_work);
89
90         return 0;
91 }
92
93 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 {
95         return req_run(req, complete, NULL);
96 }
97
98 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 {
100         return req_run(req, NULL, complete);
101 }
102
103 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
104                                   struct sk_buff *skb)
105 {
106         bt_dev_dbg(hdev, "result 0x%2.2x", result);
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = result;
110                 hdev->req_status = HCI_REQ_DONE;
111                 if (skb)
112                         hdev->req_skb = skb_get(skb);
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
118 {
119         bt_dev_dbg(hdev, "err 0x%2.2x", err);
120
121         if (hdev->req_status == HCI_REQ_PEND) {
122                 hdev->req_result = err;
123                 hdev->req_status = HCI_REQ_CANCELED;
124                 wake_up_interruptible(&hdev->req_wait_q);
125         }
126 }
127
128 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129                                   const void *param, u8 event, u32 timeout)
130 {
131         struct hci_request req;
132         struct sk_buff *skb;
133         int err = 0;
134
135         bt_dev_dbg(hdev, "");
136
137         hci_req_init(&req, hdev);
138
139         hci_req_add_ev(&req, opcode, plen, param, event);
140
141         hdev->req_status = HCI_REQ_PEND;
142
143         err = hci_req_run_skb(&req, hci_req_sync_complete);
144         if (err < 0)
145                 return ERR_PTR(err);
146
147         err = wait_event_interruptible_timeout(hdev->req_wait_q,
148                         hdev->req_status != HCI_REQ_PEND, timeout);
149
150         if (err == -ERESTARTSYS)
151                 return ERR_PTR(-EINTR);
152
153         switch (hdev->req_status) {
154         case HCI_REQ_DONE:
155                 err = -bt_to_errno(hdev->req_result);
156                 break;
157
158         case HCI_REQ_CANCELED:
159                 err = -hdev->req_result;
160                 break;
161
162         default:
163                 err = -ETIMEDOUT;
164                 break;
165         }
166
167         hdev->req_status = hdev->req_result = 0;
168         skb = hdev->req_skb;
169         hdev->req_skb = NULL;
170
171         bt_dev_dbg(hdev, "end: err %d", err);
172
173         if (err < 0) {
174                 kfree_skb(skb);
175                 return ERR_PTR(err);
176         }
177
178         if (!skb)
179                 return ERR_PTR(-ENODATA);
180
181         return skb;
182 }
183 EXPORT_SYMBOL(__hci_cmd_sync_ev);
184
185 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186                                const void *param, u32 timeout)
187 {
188         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189 }
190 EXPORT_SYMBOL(__hci_cmd_sync);
191
192 /* Execute request and wait for completion. */
193 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194                                                      unsigned long opt),
195                    unsigned long opt, u32 timeout, u8 *hci_status)
196 {
197         struct hci_request req;
198         int err = 0;
199
200         bt_dev_dbg(hdev, "start");
201
202         hci_req_init(&req, hdev);
203
204         hdev->req_status = HCI_REQ_PEND;
205
206         err = func(&req, opt);
207         if (err) {
208                 if (hci_status)
209                         *hci_status = HCI_ERROR_UNSPECIFIED;
210                 return err;
211         }
212
213         err = hci_req_run_skb(&req, hci_req_sync_complete);
214         if (err < 0) {
215                 hdev->req_status = 0;
216
217                 /* ENODATA means the HCI request command queue is empty.
218                  * This can happen when a request with conditionals doesn't
219                  * trigger any commands to be sent. This is normal behavior
220                  * and should not trigger an error return.
221                  */
222                 if (err == -ENODATA) {
223                         if (hci_status)
224                                 *hci_status = 0;
225                         return 0;
226                 }
227
228                 if (hci_status)
229                         *hci_status = HCI_ERROR_UNSPECIFIED;
230
231                 return err;
232         }
233
234         err = wait_event_interruptible_timeout(hdev->req_wait_q,
235                         hdev->req_status != HCI_REQ_PEND, timeout);
236
237         if (err == -ERESTARTSYS)
238                 return -EINTR;
239
240         switch (hdev->req_status) {
241         case HCI_REQ_DONE:
242                 err = -bt_to_errno(hdev->req_result);
243                 if (hci_status)
244                         *hci_status = hdev->req_result;
245                 break;
246
247         case HCI_REQ_CANCELED:
248                 err = -hdev->req_result;
249                 if (hci_status)
250                         *hci_status = HCI_ERROR_UNSPECIFIED;
251                 break;
252
253         default:
254                 err = -ETIMEDOUT;
255                 if (hci_status)
256                         *hci_status = HCI_ERROR_UNSPECIFIED;
257                 break;
258         }
259
260         kfree_skb(hdev->req_skb);
261         hdev->req_skb = NULL;
262         hdev->req_status = hdev->req_result = 0;
263
264         bt_dev_dbg(hdev, "end: err %d", err);
265
266         return err;
267 }
268
269 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270                                                   unsigned long opt),
271                  unsigned long opt, u32 timeout, u8 *hci_status)
272 {
273         int ret;
274
275         /* Serialize all requests */
276         hci_req_sync_lock(hdev);
277         /* check the state after obtaing the lock to protect the HCI_UP
278          * against any races from hci_dev_do_close when the controller
279          * gets removed.
280          */
281         if (test_bit(HCI_UP, &hdev->flags))
282                 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
283         else
284                 ret = -ENETDOWN;
285         hci_req_sync_unlock(hdev);
286
287         return ret;
288 }
289
290 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
291                                 const void *param)
292 {
293         int len = HCI_COMMAND_HDR_SIZE + plen;
294         struct hci_command_hdr *hdr;
295         struct sk_buff *skb;
296
297         skb = bt_skb_alloc(len, GFP_ATOMIC);
298         if (!skb)
299                 return NULL;
300
301         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
302         hdr->opcode = cpu_to_le16(opcode);
303         hdr->plen   = plen;
304
305         if (plen)
306                 skb_put_data(skb, param, plen);
307
308         bt_dev_dbg(hdev, "skb len %d", skb->len);
309
310         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
311         hci_skb_opcode(skb) = opcode;
312
313         return skb;
314 }
315
316 /* Queue a command to an asynchronous HCI request */
317 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
318                     const void *param, u8 event)
319 {
320         struct hci_dev *hdev = req->hdev;
321         struct sk_buff *skb;
322
323         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
324
325         /* If an error occurred during request building, there is no point in
326          * queueing the HCI command. We can simply return.
327          */
328         if (req->err)
329                 return;
330
331         skb = hci_prepare_cmd(hdev, opcode, plen, param);
332         if (!skb) {
333                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
334                            opcode);
335                 req->err = -ENOMEM;
336                 return;
337         }
338
339         if (skb_queue_empty(&req->cmd_q))
340                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
341
342         bt_cb(skb)->hci.req_event = event;
343
344         skb_queue_tail(&req->cmd_q, skb);
345 }
346
347 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
348                  const void *param)
349 {
350         hci_req_add_ev(req, opcode, plen, param, 0);
351 }
352
353 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
354 {
355         struct hci_dev *hdev = req->hdev;
356         struct hci_cp_write_page_scan_activity acp;
357         u8 type;
358
359         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
360                 return;
361
362         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
363                 return;
364
365         if (enable) {
366                 type = PAGE_SCAN_TYPE_INTERLACED;
367
368                 /* 160 msec page scan interval */
369                 acp.interval = cpu_to_le16(0x0100);
370         } else {
371                 type = hdev->def_page_scan_type;
372                 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
373         }
374
375         acp.window = cpu_to_le16(hdev->def_page_scan_window);
376
377         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
378             __cpu_to_le16(hdev->page_scan_window) != acp.window)
379                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
380                             sizeof(acp), &acp);
381
382         if (hdev->page_scan_type != type)
383                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
384 }
385
386 static void start_interleave_scan(struct hci_dev *hdev)
387 {
388         hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
389         queue_delayed_work(hdev->req_workqueue,
390                            &hdev->interleave_scan, 0);
391 }
392
393 static bool is_interleave_scanning(struct hci_dev *hdev)
394 {
395         return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
396 }
397
398 static void cancel_interleave_scan(struct hci_dev *hdev)
399 {
400         bt_dev_dbg(hdev, "cancelling interleave scan");
401
402         cancel_delayed_work_sync(&hdev->interleave_scan);
403
404         hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
405 }
406
407 /* Return true if interleave_scan wasn't started until exiting this function,
408  * otherwise, return false
409  */
410 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
411 {
412         /* Do interleaved scan only if all of the following are true:
413          * - There is at least one ADV monitor
414          * - At least one pending LE connection or one device to be scanned for
415          * - Monitor offloading is not supported
416          * If so, we should alternate between allowlist scan and one without
417          * any filters to save power.
418          */
419         bool use_interleaving = hci_is_adv_monitoring(hdev) &&
420                                 !(list_empty(&hdev->pend_le_conns) &&
421                                   list_empty(&hdev->pend_le_reports)) &&
422                                 hci_get_adv_monitor_offload_ext(hdev) ==
423                                     HCI_ADV_MONITOR_EXT_NONE;
424         bool is_interleaving = is_interleave_scanning(hdev);
425
426         if (use_interleaving && !is_interleaving) {
427                 start_interleave_scan(hdev);
428                 bt_dev_dbg(hdev, "starting interleave scan");
429                 return true;
430         }
431
432         if (!use_interleaving && is_interleaving)
433                 cancel_interleave_scan(hdev);
434
435         return false;
436 }
437
438 /* This function controls the background scanning based on hdev->pend_le_conns
439  * list. If there are pending LE connection we start the background scanning,
440  * otherwise we stop it.
441  *
442  * This function requires the caller holds hdev->lock.
443  */
444 static void __hci_update_background_scan(struct hci_request *req)
445 {
446         struct hci_dev *hdev = req->hdev;
447
448         if (!test_bit(HCI_UP, &hdev->flags) ||
449             test_bit(HCI_INIT, &hdev->flags) ||
450             hci_dev_test_flag(hdev, HCI_SETUP) ||
451             hci_dev_test_flag(hdev, HCI_CONFIG) ||
452             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
453             hci_dev_test_flag(hdev, HCI_UNREGISTER))
454                 return;
455
456         /* No point in doing scanning if LE support hasn't been enabled */
457         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
458                 return;
459
460         /* If discovery is active don't interfere with it */
461         if (hdev->discovery.state != DISCOVERY_STOPPED)
462                 return;
463
464         /* Reset RSSI and UUID filters when starting background scanning
465          * since these filters are meant for service discovery only.
466          *
467          * The Start Discovery and Start Service Discovery operations
468          * ensure to set proper values for RSSI threshold and UUID
469          * filter list. So it is safe to just reset them here.
470          */
471         hci_discovery_filter_clear(hdev);
472
473         bt_dev_dbg(hdev, "ADV monitoring is %s",
474                    hci_is_adv_monitoring(hdev) ? "on" : "off");
475
476         if (list_empty(&hdev->pend_le_conns) &&
477             list_empty(&hdev->pend_le_reports) &&
478             !hci_is_adv_monitoring(hdev)) {
479                 /* If there is no pending LE connections or devices
480                  * to be scanned for or no ADV monitors, we should stop the
481                  * background scanning.
482                  */
483
484                 /* If controller is not scanning we are done. */
485                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
486                         return;
487
488                 hci_req_add_le_scan_disable(req, false);
489
490                 bt_dev_dbg(hdev, "stopping background scanning");
491         } else {
492                 /* If there is at least one pending LE connection, we should
493                  * keep the background scan running.
494                  */
495
496                 /* If controller is connecting, we should not start scanning
497                  * since some controllers are not able to scan and connect at
498                  * the same time.
499                  */
500                 if (hci_lookup_le_connect(hdev))
501                         return;
502
503                 /* If controller is currently scanning, we stop it to ensure we
504                  * don't miss any advertising (due to duplicates filter).
505                  */
506                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
507                         hci_req_add_le_scan_disable(req, false);
508
509                 hci_req_add_le_passive_scan(req);
510                 bt_dev_dbg(hdev, "starting background scanning");
511         }
512 }
513
514 void __hci_req_update_name(struct hci_request *req)
515 {
516         struct hci_dev *hdev = req->hdev;
517         struct hci_cp_write_local_name cp;
518
519         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
520
521         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
522 }
523
524 #define PNP_INFO_SVCLASS_ID             0x1200
525
526 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
527 {
528         u8 *ptr = data, *uuids_start = NULL;
529         struct bt_uuid *uuid;
530
531         if (len < 4)
532                 return ptr;
533
534         list_for_each_entry(uuid, &hdev->uuids, list) {
535                 u16 uuid16;
536
537                 if (uuid->size != 16)
538                         continue;
539
540                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
541                 if (uuid16 < 0x1100)
542                         continue;
543
544                 if (uuid16 == PNP_INFO_SVCLASS_ID)
545                         continue;
546
547                 if (!uuids_start) {
548                         uuids_start = ptr;
549                         uuids_start[0] = 1;
550                         uuids_start[1] = EIR_UUID16_ALL;
551                         ptr += 2;
552                 }
553
554                 /* Stop if not enough space to put next UUID */
555                 if ((ptr - data) + sizeof(u16) > len) {
556                         uuids_start[1] = EIR_UUID16_SOME;
557                         break;
558                 }
559
560                 *ptr++ = (uuid16 & 0x00ff);
561                 *ptr++ = (uuid16 & 0xff00) >> 8;
562                 uuids_start[0] += sizeof(uuid16);
563         }
564
565         return ptr;
566 }
567
568 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
569 {
570         u8 *ptr = data, *uuids_start = NULL;
571         struct bt_uuid *uuid;
572
573         if (len < 6)
574                 return ptr;
575
576         list_for_each_entry(uuid, &hdev->uuids, list) {
577                 if (uuid->size != 32)
578                         continue;
579
580                 if (!uuids_start) {
581                         uuids_start = ptr;
582                         uuids_start[0] = 1;
583                         uuids_start[1] = EIR_UUID32_ALL;
584                         ptr += 2;
585                 }
586
587                 /* Stop if not enough space to put next UUID */
588                 if ((ptr - data) + sizeof(u32) > len) {
589                         uuids_start[1] = EIR_UUID32_SOME;
590                         break;
591                 }
592
593                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
594                 ptr += sizeof(u32);
595                 uuids_start[0] += sizeof(u32);
596         }
597
598         return ptr;
599 }
600
601 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
602 {
603         u8 *ptr = data, *uuids_start = NULL;
604         struct bt_uuid *uuid;
605
606         if (len < 18)
607                 return ptr;
608
609         list_for_each_entry(uuid, &hdev->uuids, list) {
610                 if (uuid->size != 128)
611                         continue;
612
613                 if (!uuids_start) {
614                         uuids_start = ptr;
615                         uuids_start[0] = 1;
616                         uuids_start[1] = EIR_UUID128_ALL;
617                         ptr += 2;
618                 }
619
620                 /* Stop if not enough space to put next UUID */
621                 if ((ptr - data) + 16 > len) {
622                         uuids_start[1] = EIR_UUID128_SOME;
623                         break;
624                 }
625
626                 memcpy(ptr, uuid->uuid, 16);
627                 ptr += 16;
628                 uuids_start[0] += 16;
629         }
630
631         return ptr;
632 }
633
634 static void create_eir(struct hci_dev *hdev, u8 *data)
635 {
636         u8 *ptr = data;
637         size_t name_len;
638
639         name_len = strlen(hdev->dev_name);
640
641         if (name_len > 0) {
642                 /* EIR Data type */
643                 if (name_len > 48) {
644                         name_len = 48;
645                         ptr[1] = EIR_NAME_SHORT;
646                 } else
647                         ptr[1] = EIR_NAME_COMPLETE;
648
649                 /* EIR Data length */
650                 ptr[0] = name_len + 1;
651
652                 memcpy(ptr + 2, hdev->dev_name, name_len);
653
654                 ptr += (name_len + 2);
655         }
656
657         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
658                 ptr[0] = 2;
659                 ptr[1] = EIR_TX_POWER;
660                 ptr[2] = (u8) hdev->inq_tx_power;
661
662                 ptr += 3;
663         }
664
665         if (hdev->devid_source > 0) {
666                 ptr[0] = 9;
667                 ptr[1] = EIR_DEVICE_ID;
668
669                 put_unaligned_le16(hdev->devid_source, ptr + 2);
670                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
671                 put_unaligned_le16(hdev->devid_product, ptr + 6);
672                 put_unaligned_le16(hdev->devid_version, ptr + 8);
673
674                 ptr += 10;
675         }
676
677         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
678         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
679         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
680 }
681
682 void __hci_req_update_eir(struct hci_request *req)
683 {
684         struct hci_dev *hdev = req->hdev;
685         struct hci_cp_write_eir cp;
686
687         if (!hdev_is_powered(hdev))
688                 return;
689
690         if (!lmp_ext_inq_capable(hdev))
691                 return;
692
693         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
694                 return;
695
696         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
697                 return;
698
699         memset(&cp, 0, sizeof(cp));
700
701         create_eir(hdev, cp.data);
702
703         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
704                 return;
705
706         memcpy(hdev->eir, cp.data, sizeof(cp.data));
707
708         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
709 }
710
711 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
712 {
713         struct hci_dev *hdev = req->hdev;
714
715         if (hdev->scanning_paused) {
716                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
717                 return;
718         }
719
720         if (hdev->suspended)
721                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
722
723         if (use_ext_scan(hdev)) {
724                 struct hci_cp_le_set_ext_scan_enable cp;
725
726                 memset(&cp, 0, sizeof(cp));
727                 cp.enable = LE_SCAN_DISABLE;
728                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
729                             &cp);
730         } else {
731                 struct hci_cp_le_set_scan_enable cp;
732
733                 memset(&cp, 0, sizeof(cp));
734                 cp.enable = LE_SCAN_DISABLE;
735                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
736         }
737
738         /* Disable address resolution */
739         if (use_ll_privacy(hdev) &&
740             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
741             hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
742                 __u8 enable = 0x00;
743
744                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
745         }
746 }
747
748 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
749                                  u8 bdaddr_type)
750 {
751         struct hci_cp_le_del_from_accept_list cp;
752
753         cp.bdaddr_type = bdaddr_type;
754         bacpy(&cp.bdaddr, bdaddr);
755
756         bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
757                    cp.bdaddr_type);
758         hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
759
760         if (use_ll_privacy(req->hdev) &&
761             hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
762                 struct smp_irk *irk;
763
764                 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
765                 if (irk) {
766                         struct hci_cp_le_del_from_resolv_list cp;
767
768                         cp.bdaddr_type = bdaddr_type;
769                         bacpy(&cp.bdaddr, bdaddr);
770
771                         hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
772                                     sizeof(cp), &cp);
773                 }
774         }
775 }
776
777 /* Adds connection to accept list if needed. On error, returns -1. */
778 static int add_to_accept_list(struct hci_request *req,
779                               struct hci_conn_params *params, u8 *num_entries,
780                               bool allow_rpa)
781 {
782         struct hci_cp_le_add_to_accept_list cp;
783         struct hci_dev *hdev = req->hdev;
784
785         /* Already in accept list */
786         if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
787                                    params->addr_type))
788                 return 0;
789
790         /* Select filter policy to accept all advertising */
791         if (*num_entries >= hdev->le_accept_list_size)
792                 return -1;
793
794         /* Accept list can not be used with RPAs */
795         if (!allow_rpa &&
796             !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
797             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
798                 return -1;
799         }
800
801         /* During suspend, only wakeable devices can be in accept list */
802         if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
803                                                    params->current_flags))
804                 return 0;
805
806         *num_entries += 1;
807         cp.bdaddr_type = params->addr_type;
808         bacpy(&cp.bdaddr, &params->addr);
809
810         bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
811                    cp.bdaddr_type);
812         hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
813
814         if (use_ll_privacy(hdev) &&
815             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
816                 struct smp_irk *irk;
817
818                 irk = hci_find_irk_by_addr(hdev, &params->addr,
819                                            params->addr_type);
820                 if (irk) {
821                         struct hci_cp_le_add_to_resolv_list cp;
822
823                         cp.bdaddr_type = params->addr_type;
824                         bacpy(&cp.bdaddr, &params->addr);
825                         memcpy(cp.peer_irk, irk->val, 16);
826
827                         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
828                                 memcpy(cp.local_irk, hdev->irk, 16);
829                         else
830                                 memset(cp.local_irk, 0, 16);
831
832                         hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
833                                     sizeof(cp), &cp);
834                 }
835         }
836
837         return 0;
838 }
839
840 static u8 update_accept_list(struct hci_request *req)
841 {
842         struct hci_dev *hdev = req->hdev;
843         struct hci_conn_params *params;
844         struct bdaddr_list *b;
845         u8 num_entries = 0;
846         bool pend_conn, pend_report;
847         /* We allow usage of accept list even with RPAs in suspend. In the worst
848          * case, we won't be able to wake from devices that use the privacy1.2
849          * features. Additionally, once we support privacy1.2 and IRK
850          * offloading, we can update this to also check for those conditions.
851          */
852         bool allow_rpa = hdev->suspended;
853
854         if (use_ll_privacy(hdev) &&
855             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
856                 allow_rpa = true;
857
858         /* Go through the current accept list programmed into the
859          * controller one by one and check if that address is still
860          * in the list of pending connections or list of devices to
861          * report. If not present in either list, then queue the
862          * command to remove it from the controller.
863          */
864         list_for_each_entry(b, &hdev->le_accept_list, list) {
865                 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
866                                                       &b->bdaddr,
867                                                       b->bdaddr_type);
868                 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
869                                                         &b->bdaddr,
870                                                         b->bdaddr_type);
871
872                 /* If the device is not likely to connect or report,
873                  * remove it from the accept list.
874                  */
875                 if (!pend_conn && !pend_report) {
876                         del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
877                         continue;
878                 }
879
880                 /* Accept list can not be used with RPAs */
881                 if (!allow_rpa &&
882                     !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
883                     hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
884                         return 0x00;
885                 }
886
887                 num_entries++;
888         }
889
890         /* Since all no longer valid accept list entries have been
891          * removed, walk through the list of pending connections
892          * and ensure that any new device gets programmed into
893          * the controller.
894          *
895          * If the list of the devices is larger than the list of
896          * available accept list entries in the controller, then
897          * just abort and return filer policy value to not use the
898          * accept list.
899          */
900         list_for_each_entry(params, &hdev->pend_le_conns, action) {
901                 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
902                         return 0x00;
903         }
904
905         /* After adding all new pending connections, walk through
906          * the list of pending reports and also add these to the
907          * accept list if there is still space. Abort if space runs out.
908          */
909         list_for_each_entry(params, &hdev->pend_le_reports, action) {
910                 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
911                         return 0x00;
912         }
913
914         /* Use the allowlist unless the following conditions are all true:
915          * - We are not currently suspending
916          * - There are 1 or more ADV monitors registered and it's not offloaded
917          * - Interleaved scanning is not currently using the allowlist
918          */
919         if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
920             hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
921             hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
922                 return 0x00;
923
924         /* Select filter policy to use accept list */
925         return 0x01;
926 }
927
928 static bool scan_use_rpa(struct hci_dev *hdev)
929 {
930         return hci_dev_test_flag(hdev, HCI_PRIVACY);
931 }
932
933 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
934                                u16 window, u8 own_addr_type, u8 filter_policy,
935                                bool filter_dup, bool addr_resolv)
936 {
937         struct hci_dev *hdev = req->hdev;
938
939         if (hdev->scanning_paused) {
940                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
941                 return;
942         }
943
944         if (use_ll_privacy(hdev) &&
945             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
946             addr_resolv) {
947                 u8 enable = 0x01;
948
949                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
950         }
951
952         /* Use ext scanning if set ext scan param and ext scan enable is
953          * supported
954          */
955         if (use_ext_scan(hdev)) {
956                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
957                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
958                 struct hci_cp_le_scan_phy_params *phy_params;
959                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
960                 u32 plen;
961
962                 ext_param_cp = (void *)data;
963                 phy_params = (void *)ext_param_cp->data;
964
965                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
966                 ext_param_cp->own_addr_type = own_addr_type;
967                 ext_param_cp->filter_policy = filter_policy;
968
969                 plen = sizeof(*ext_param_cp);
970
971                 if (scan_1m(hdev) || scan_2m(hdev)) {
972                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
973
974                         memset(phy_params, 0, sizeof(*phy_params));
975                         phy_params->type = type;
976                         phy_params->interval = cpu_to_le16(interval);
977                         phy_params->window = cpu_to_le16(window);
978
979                         plen += sizeof(*phy_params);
980                         phy_params++;
981                 }
982
983                 if (scan_coded(hdev)) {
984                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
985
986                         memset(phy_params, 0, sizeof(*phy_params));
987                         phy_params->type = type;
988                         phy_params->interval = cpu_to_le16(interval);
989                         phy_params->window = cpu_to_le16(window);
990
991                         plen += sizeof(*phy_params);
992                         phy_params++;
993                 }
994
995                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
996                             plen, ext_param_cp);
997
998                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
999                 ext_enable_cp.enable = LE_SCAN_ENABLE;
1000                 ext_enable_cp.filter_dup = filter_dup;
1001
1002                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
1003                             sizeof(ext_enable_cp), &ext_enable_cp);
1004         } else {
1005                 struct hci_cp_le_set_scan_param param_cp;
1006                 struct hci_cp_le_set_scan_enable enable_cp;
1007
1008                 memset(&param_cp, 0, sizeof(param_cp));
1009                 param_cp.type = type;
1010                 param_cp.interval = cpu_to_le16(interval);
1011                 param_cp.window = cpu_to_le16(window);
1012                 param_cp.own_address_type = own_addr_type;
1013                 param_cp.filter_policy = filter_policy;
1014                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1015                             &param_cp);
1016
1017                 memset(&enable_cp, 0, sizeof(enable_cp));
1018                 enable_cp.enable = LE_SCAN_ENABLE;
1019                 enable_cp.filter_dup = filter_dup;
1020                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1021                             &enable_cp);
1022         }
1023 }
1024
1025 /* Returns true if an le connection is in the scanning state */
1026 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1027 {
1028         struct hci_conn_hash *h = &hdev->conn_hash;
1029         struct hci_conn  *c;
1030
1031         rcu_read_lock();
1032
1033         list_for_each_entry_rcu(c, &h->list, list) {
1034                 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1035                     test_bit(HCI_CONN_SCANNING, &c->flags)) {
1036                         rcu_read_unlock();
1037                         return true;
1038                 }
1039         }
1040
1041         rcu_read_unlock();
1042
1043         return false;
1044 }
1045
1046 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1047  * controller based address resolution to be able to reconfigure
1048  * resolving list.
1049  */
1050 void hci_req_add_le_passive_scan(struct hci_request *req)
1051 {
1052         struct hci_dev *hdev = req->hdev;
1053         u8 own_addr_type;
1054         u8 filter_policy;
1055         u16 window, interval;
1056         /* Default is to enable duplicates filter */
1057         u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1058         /* Background scanning should run with address resolution */
1059         bool addr_resolv = true;
1060
1061         if (hdev->scanning_paused) {
1062                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1063                 return;
1064         }
1065
1066         /* Set require_privacy to false since no SCAN_REQ are send
1067          * during passive scanning. Not using an non-resolvable address
1068          * here is important so that peer devices using direct
1069          * advertising with our address will be correctly reported
1070          * by the controller.
1071          */
1072         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1073                                       &own_addr_type))
1074                 return;
1075
1076         if (hdev->enable_advmon_interleave_scan &&
1077             __hci_update_interleaved_scan(hdev))
1078                 return;
1079
1080         bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1081         /* Adding or removing entries from the accept list must
1082          * happen before enabling scanning. The controller does
1083          * not allow accept list modification while scanning.
1084          */
1085         filter_policy = update_accept_list(req);
1086
1087         /* When the controller is using random resolvable addresses and
1088          * with that having LE privacy enabled, then controllers with
1089          * Extended Scanner Filter Policies support can now enable support
1090          * for handling directed advertising.
1091          *
1092          * So instead of using filter polices 0x00 (no accept list)
1093          * and 0x01 (accept list enabled) use the new filter policies
1094          * 0x02 (no accept list) and 0x03 (accept list enabled).
1095          */
1096         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1097             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1098                 filter_policy |= 0x02;
1099
1100         if (hdev->suspended) {
1101                 window = hdev->le_scan_window_suspend;
1102                 interval = hdev->le_scan_int_suspend;
1103
1104                 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1105         } else if (hci_is_le_conn_scanning(hdev)) {
1106                 window = hdev->le_scan_window_connect;
1107                 interval = hdev->le_scan_int_connect;
1108         } else if (hci_is_adv_monitoring(hdev)) {
1109                 window = hdev->le_scan_window_adv_monitor;
1110                 interval = hdev->le_scan_int_adv_monitor;
1111
1112                 /* Disable duplicates filter when scanning for advertisement
1113                  * monitor for the following reasons.
1114                  *
1115                  * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
1116                  * controllers ignore RSSI_Sampling_Period when the duplicates
1117                  * filter is enabled.
1118                  *
1119                  * For SW pattern filtering, when we're not doing interleaved
1120                  * scanning, it is necessary to disable duplicates filter,
1121                  * otherwise hosts can only receive one advertisement and it's
1122                  * impossible to know if a peer is still in range.
1123                  */
1124                 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
1125         } else {
1126                 window = hdev->le_scan_window;
1127                 interval = hdev->le_scan_interval;
1128         }
1129
1130         bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1131                    filter_policy);
1132         hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1133                            own_addr_type, filter_policy, filter_dup,
1134                            addr_resolv);
1135 }
1136
1137 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1138 {
1139         struct adv_info *adv_instance;
1140
1141         /* Instance 0x00 always set local name */
1142         if (instance == 0x00)
1143                 return true;
1144
1145         adv_instance = hci_find_adv_instance(hdev, instance);
1146         if (!adv_instance)
1147                 return false;
1148
1149         if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1150             adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1151                 return true;
1152
1153         return adv_instance->scan_rsp_len ? true : false;
1154 }
1155
1156 static void hci_req_clear_event_filter(struct hci_request *req)
1157 {
1158         struct hci_cp_set_event_filter f;
1159
1160         if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
1161                 return;
1162
1163         if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
1164                 memset(&f, 0, sizeof(f));
1165                 f.flt_type = HCI_FLT_CLEAR_ALL;
1166                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1167         }
1168 }
1169
1170 static void hci_req_set_event_filter(struct hci_request *req)
1171 {
1172         struct bdaddr_list_with_flags *b;
1173         struct hci_cp_set_event_filter f;
1174         struct hci_dev *hdev = req->hdev;
1175         u8 scan = SCAN_DISABLED;
1176         bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1177
1178         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1179                 return;
1180
1181         /* Always clear event filter when starting */
1182         hci_req_clear_event_filter(req);
1183
1184         list_for_each_entry(b, &hdev->accept_list, list) {
1185                 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1186                                         b->current_flags))
1187                         continue;
1188
1189                 memset(&f, 0, sizeof(f));
1190                 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1191                 f.flt_type = HCI_FLT_CONN_SETUP;
1192                 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1193                 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1194
1195                 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1196                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1197                 scan = SCAN_PAGE;
1198         }
1199
1200         if (scan && !scanning) {
1201                 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1202                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1203         } else if (!scan && scanning) {
1204                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1205                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1206         }
1207 }
1208
1209 static void cancel_adv_timeout(struct hci_dev *hdev)
1210 {
1211         if (hdev->adv_instance_timeout) {
1212                 hdev->adv_instance_timeout = 0;
1213                 cancel_delayed_work(&hdev->adv_instance_expire);
1214         }
1215 }
1216
1217 /* This function requires the caller holds hdev->lock */
1218 void __hci_req_pause_adv_instances(struct hci_request *req)
1219 {
1220         bt_dev_dbg(req->hdev, "Pausing advertising instances");
1221
1222         /* Call to disable any advertisements active on the controller.
1223          * This will succeed even if no advertisements are configured.
1224          */
1225         __hci_req_disable_advertising(req);
1226
1227         /* If we are using software rotation, pause the loop */
1228         if (!ext_adv_capable(req->hdev))
1229                 cancel_adv_timeout(req->hdev);
1230 }
1231
1232 /* This function requires the caller holds hdev->lock */
1233 static void __hci_req_resume_adv_instances(struct hci_request *req)
1234 {
1235         struct adv_info *adv;
1236
1237         bt_dev_dbg(req->hdev, "Resuming advertising instances");
1238
1239         if (ext_adv_capable(req->hdev)) {
1240                 /* Call for each tracked instance to be re-enabled */
1241                 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1242                         __hci_req_enable_ext_advertising(req,
1243                                                          adv->instance);
1244                 }
1245
1246         } else {
1247                 /* Schedule for most recent instance to be restarted and begin
1248                  * the software rotation loop
1249                  */
1250                 __hci_req_schedule_adv_instance(req,
1251                                                 req->hdev->cur_adv_instance,
1252                                                 true);
1253         }
1254 }
1255
1256 /* This function requires the caller holds hdev->lock */
1257 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1258 {
1259         struct hci_request req;
1260
1261         hci_req_init(&req, hdev);
1262         __hci_req_resume_adv_instances(&req);
1263
1264         return hci_req_run(&req, NULL);
1265 }
1266
1267 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1268 {
1269         bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1270                    status);
1271         if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1272             test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1273                 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1274                 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1275                 wake_up(&hdev->suspend_wait_q);
1276         }
1277
1278         if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1279                 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1280                 wake_up(&hdev->suspend_wait_q);
1281         }
1282 }
1283
1284 static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1285                                               bool enable)
1286 {
1287         struct hci_dev *hdev = req->hdev;
1288
1289         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1290         case HCI_ADV_MONITOR_EXT_MSFT:
1291                 msft_req_add_set_filter_enable(req, enable);
1292                 break;
1293         default:
1294                 return;
1295         }
1296
1297         /* No need to block when enabling since it's on resume path */
1298         if (hdev->suspended && !enable)
1299                 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1300 }
1301
1302 /* Call with hci_dev_lock */
1303 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1304 {
1305         int old_state;
1306         struct hci_conn *conn;
1307         struct hci_request req;
1308         u8 page_scan;
1309         int disconnect_counter;
1310
1311         if (next == hdev->suspend_state) {
1312                 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1313                 goto done;
1314         }
1315
1316         hdev->suspend_state = next;
1317         hci_req_init(&req, hdev);
1318
1319         if (next == BT_SUSPEND_DISCONNECT) {
1320                 /* Mark device as suspended */
1321                 hdev->suspended = true;
1322
1323                 /* Pause discovery if not already stopped */
1324                 old_state = hdev->discovery.state;
1325                 if (old_state != DISCOVERY_STOPPED) {
1326                         set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1327                         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1328                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1329                 }
1330
1331                 hdev->discovery_paused = true;
1332                 hdev->discovery_old_state = old_state;
1333
1334                 /* Stop directed advertising */
1335                 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1336                 if (old_state) {
1337                         set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1338                         cancel_delayed_work(&hdev->discov_off);
1339                         queue_delayed_work(hdev->req_workqueue,
1340                                            &hdev->discov_off, 0);
1341                 }
1342
1343                 /* Pause other advertisements */
1344                 if (hdev->adv_instance_cnt)
1345                         __hci_req_pause_adv_instances(&req);
1346
1347                 hdev->advertising_paused = true;
1348                 hdev->advertising_old_state = old_state;
1349
1350                 /* Disable page scan if enabled */
1351                 if (test_bit(HCI_PSCAN, &hdev->flags)) {
1352                         page_scan = SCAN_DISABLED;
1353                         hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1354                                     &page_scan);
1355                         set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1356                 }
1357
1358                 /* Disable LE passive scan if enabled */
1359                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1360                         cancel_interleave_scan(hdev);
1361                         hci_req_add_le_scan_disable(&req, false);
1362                 }
1363
1364                 /* Disable advertisement filters */
1365                 hci_req_add_set_adv_filter_enable(&req, false);
1366
1367                 /* Prevent disconnects from causing scanning to be re-enabled */
1368                 hdev->scanning_paused = true;
1369
1370                 /* Run commands before disconnecting */
1371                 hci_req_run(&req, suspend_req_complete);
1372
1373                 disconnect_counter = 0;
1374                 /* Soft disconnect everything (power off) */
1375                 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1376                         hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1377                         disconnect_counter++;
1378                 }
1379
1380                 if (disconnect_counter > 0) {
1381                         bt_dev_dbg(hdev,
1382                                    "Had %d disconnects. Will wait on them",
1383                                    disconnect_counter);
1384                         set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1385                 }
1386         } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1387                 /* Unpause to take care of updating scanning params */
1388                 hdev->scanning_paused = false;
1389                 /* Enable event filter for paired devices */
1390                 hci_req_set_event_filter(&req);
1391                 /* Enable passive scan at lower duty cycle */
1392                 __hci_update_background_scan(&req);
1393                 /* Pause scan changes again. */
1394                 hdev->scanning_paused = true;
1395                 hci_req_run(&req, suspend_req_complete);
1396         } else {
1397                 hdev->suspended = false;
1398                 hdev->scanning_paused = false;
1399
1400                 /* Clear any event filters and restore scan state */
1401                 hci_req_clear_event_filter(&req);
1402                 __hci_req_update_scan(&req);
1403
1404                 /* Reset passive/background scanning to normal */
1405                 __hci_update_background_scan(&req);
1406                 /* Enable all of the advertisement filters */
1407                 hci_req_add_set_adv_filter_enable(&req, true);
1408
1409                 /* Unpause directed advertising */
1410                 hdev->advertising_paused = false;
1411                 if (hdev->advertising_old_state) {
1412                         set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1413                                 hdev->suspend_tasks);
1414                         hci_dev_set_flag(hdev, HCI_ADVERTISING);
1415                         queue_work(hdev->req_workqueue,
1416                                    &hdev->discoverable_update);
1417                         hdev->advertising_old_state = 0;
1418                 }
1419
1420                 /* Resume other advertisements */
1421                 if (hdev->adv_instance_cnt)
1422                         __hci_req_resume_adv_instances(&req);
1423
1424                 /* Unpause discovery */
1425                 hdev->discovery_paused = false;
1426                 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1427                     hdev->discovery_old_state != DISCOVERY_STOPPING) {
1428                         set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1429                         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1430                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1431                 }
1432
1433                 hci_req_run(&req, suspend_req_complete);
1434         }
1435
1436         hdev->suspend_state = next;
1437
1438 done:
1439         clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1440         wake_up(&hdev->suspend_wait_q);
1441 }
1442
1443 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1444 {
1445         return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1446 }
1447
1448 void __hci_req_disable_advertising(struct hci_request *req)
1449 {
1450         if (ext_adv_capable(req->hdev)) {
1451                 __hci_req_disable_ext_adv_instance(req, 0x00);
1452
1453         } else {
1454                 u8 enable = 0x00;
1455
1456                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1457         }
1458 }
1459
1460 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1461 {
1462         u32 flags;
1463         struct adv_info *adv_instance;
1464
1465         if (instance == 0x00) {
1466                 /* Instance 0 always manages the "Tx Power" and "Flags"
1467                  * fields
1468                  */
1469                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1470
1471                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1472                  * corresponds to the "connectable" instance flag.
1473                  */
1474                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1475                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1476
1477                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1478                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1479                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1480                         flags |= MGMT_ADV_FLAG_DISCOV;
1481
1482                 return flags;
1483         }
1484
1485         adv_instance = hci_find_adv_instance(hdev, instance);
1486
1487         /* Return 0 when we got an invalid instance identifier. */
1488         if (!adv_instance)
1489                 return 0;
1490
1491         return adv_instance->flags;
1492 }
1493
1494 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1495 {
1496         /* If privacy is not enabled don't use RPA */
1497         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1498                 return false;
1499
1500         /* If basic privacy mode is enabled use RPA */
1501         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1502                 return true;
1503
1504         /* If limited privacy mode is enabled don't use RPA if we're
1505          * both discoverable and bondable.
1506          */
1507         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1508             hci_dev_test_flag(hdev, HCI_BONDABLE))
1509                 return false;
1510
1511         /* We're neither bondable nor discoverable in the limited
1512          * privacy mode, therefore use RPA.
1513          */
1514         return true;
1515 }
1516
1517 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1518 {
1519         /* If there is no connection we are OK to advertise. */
1520         if (hci_conn_num(hdev, LE_LINK) == 0)
1521                 return true;
1522
1523         /* Check le_states if there is any connection in peripheral role. */
1524         if (hdev->conn_hash.le_num_peripheral > 0) {
1525                 /* Peripheral connection state and non connectable mode bit 20.
1526                  */
1527                 if (!connectable && !(hdev->le_states[2] & 0x10))
1528                         return false;
1529
1530                 /* Peripheral connection state and connectable mode bit 38
1531                  * and scannable bit 21.
1532                  */
1533                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1534                                     !(hdev->le_states[2] & 0x20)))
1535                         return false;
1536         }
1537
1538         /* Check le_states if there is any connection in central role. */
1539         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1540                 /* Central connection state and non connectable mode bit 18. */
1541                 if (!connectable && !(hdev->le_states[2] & 0x02))
1542                         return false;
1543
1544                 /* Central connection state and connectable mode bit 35 and
1545                  * scannable 19.
1546                  */
1547                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1548                                     !(hdev->le_states[2] & 0x08)))
1549                         return false;
1550         }
1551
1552         return true;
1553 }
1554
1555 void __hci_req_enable_advertising(struct hci_request *req)
1556 {
1557         struct hci_dev *hdev = req->hdev;
1558         struct adv_info *adv_instance;
1559         struct hci_cp_le_set_adv_param cp;
1560         u8 own_addr_type, enable = 0x01;
1561         bool connectable;
1562         u16 adv_min_interval, adv_max_interval;
1563         u32 flags;
1564
1565         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1566         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1567
1568         /* If the "connectable" instance flag was not set, then choose between
1569          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1570          */
1571         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1572                       mgmt_get_connectable(hdev);
1573
1574         if (!is_advertising_allowed(hdev, connectable))
1575                 return;
1576
1577         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1578                 __hci_req_disable_advertising(req);
1579
1580         /* Clear the HCI_LE_ADV bit temporarily so that the
1581          * hci_update_random_address knows that it's safe to go ahead
1582          * and write a new random address. The flag will be set back on
1583          * as soon as the SET_ADV_ENABLE HCI command completes.
1584          */
1585         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1586
1587         /* Set require_privacy to true only when non-connectable
1588          * advertising is used. In that case it is fine to use a
1589          * non-resolvable private address.
1590          */
1591         if (hci_update_random_address(req, !connectable,
1592                                       adv_use_rpa(hdev, flags),
1593                                       &own_addr_type) < 0)
1594                 return;
1595
1596         memset(&cp, 0, sizeof(cp));
1597
1598         if (adv_instance) {
1599                 adv_min_interval = adv_instance->min_interval;
1600                 adv_max_interval = adv_instance->max_interval;
1601         } else {
1602                 adv_min_interval = hdev->le_adv_min_interval;
1603                 adv_max_interval = hdev->le_adv_max_interval;
1604         }
1605
1606         if (connectable) {
1607                 cp.type = LE_ADV_IND;
1608         } else {
1609                 if (adv_cur_instance_is_scannable(hdev))
1610                         cp.type = LE_ADV_SCAN_IND;
1611                 else
1612                         cp.type = LE_ADV_NONCONN_IND;
1613
1614                 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1615                     hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1616                         adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1617                         adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1618                 }
1619         }
1620
1621         cp.min_interval = cpu_to_le16(adv_min_interval);
1622         cp.max_interval = cpu_to_le16(adv_max_interval);
1623
1624 #ifdef TIZEN_BT
1625         cp.filter_policy = hdev->adv_filter_policy;
1626         cp.type = hdev->adv_type;
1627 #endif
1628
1629         cp.own_address_type = own_addr_type;
1630         cp.channel_map = hdev->le_adv_channel_map;
1631
1632         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1633
1634         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1635 }
1636
1637 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1638 {
1639         size_t short_len;
1640         size_t complete_len;
1641
1642         /* no space left for name (+ NULL + type + len) */
1643         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1644                 return ad_len;
1645
1646         /* use complete name if present and fits */
1647         complete_len = strlen(hdev->dev_name);
1648         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1649                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1650                                        hdev->dev_name, complete_len + 1);
1651
1652         /* use short name if present */
1653         short_len = strlen(hdev->short_name);
1654         if (short_len)
1655                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1656                                        hdev->short_name, short_len + 1);
1657
1658         /* use shortened full name if present, we already know that name
1659          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1660          */
1661         if (complete_len) {
1662                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1663
1664                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1665                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1666
1667                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1668                                        sizeof(name));
1669         }
1670
1671         return ad_len;
1672 }
1673
1674 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1675 {
1676         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1677 }
1678
1679 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1680 {
1681         u8 scan_rsp_len = 0;
1682
1683         if (hdev->appearance)
1684                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1685
1686         return append_local_name(hdev, ptr, scan_rsp_len);
1687 }
1688
1689 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1690                                         u8 *ptr)
1691 {
1692         struct adv_info *adv_instance;
1693         u32 instance_flags;
1694         u8 scan_rsp_len = 0;
1695
1696         adv_instance = hci_find_adv_instance(hdev, instance);
1697         if (!adv_instance)
1698                 return 0;
1699
1700         instance_flags = adv_instance->flags;
1701
1702         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
1703                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1704
1705         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1706                adv_instance->scan_rsp_len);
1707
1708         scan_rsp_len += adv_instance->scan_rsp_len;
1709
1710         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1711                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1712
1713         return scan_rsp_len;
1714 }
1715
1716 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1717 {
1718         struct hci_dev *hdev = req->hdev;
1719         u8 len;
1720
1721         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1722                 return;
1723
1724         if (ext_adv_capable(hdev)) {
1725                 struct {
1726                         struct hci_cp_le_set_ext_scan_rsp_data cp;
1727                         u8 data[HCI_MAX_EXT_AD_LENGTH];
1728                 } pdu;
1729
1730                 memset(&pdu, 0, sizeof(pdu));
1731
1732                 if (instance)
1733                         len = create_instance_scan_rsp_data(hdev, instance,
1734                                                             pdu.data);
1735                 else
1736                         len = create_default_scan_rsp_data(hdev, pdu.data);
1737 #ifdef TIZEN_BT
1738         /* Advertising scan response data is handled in bluez.
1739          * This value will be updated only when application request the update
1740          * using adapter_set_scan_rsp_data()
1741          */
1742         return;
1743 #else
1744
1745                 if (hdev->scan_rsp_data_len == len &&
1746                     !memcmp(pdu.data, hdev->scan_rsp_data, len))
1747                         return;
1748
1749                 memcpy(hdev->scan_rsp_data, pdu.data, len);
1750                 hdev->scan_rsp_data_len = len;
1751
1752                 pdu.cp.handle = instance;
1753                 pdu.cp.length = len;
1754                 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1755                 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1756
1757                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1758                             sizeof(pdu.cp) + len, &pdu.cp);
1759 #endif
1760         } else {
1761                 struct hci_cp_le_set_scan_rsp_data cp;
1762
1763                 memset(&cp, 0, sizeof(cp));
1764
1765                 if (instance)
1766                         len = create_instance_scan_rsp_data(hdev, instance,
1767                                                             cp.data);
1768                 else
1769                         len = create_default_scan_rsp_data(hdev, cp.data);
1770 #ifdef TIZEN_BT
1771         /* Advertising scan response data is handled in bluez.
1772          * This value will be updated only when application request the update
1773          * using adapter_set_scan_rsp_data()
1774          */
1775         return;
1776 #else
1777                 if (hdev->scan_rsp_data_len == len &&
1778                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1779                         return;
1780
1781                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1782                 hdev->scan_rsp_data_len = len;
1783
1784                 cp.length = len;
1785
1786                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1787 #endif
1788         }
1789 }
1790
1791 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1792 {
1793         struct adv_info *adv_instance = NULL;
1794         u8 ad_len = 0, flags = 0;
1795         u32 instance_flags;
1796
1797         /* Return 0 when the current instance identifier is invalid. */
1798         if (instance) {
1799                 adv_instance = hci_find_adv_instance(hdev, instance);
1800                 if (!adv_instance)
1801                         return 0;
1802         }
1803
1804         instance_flags = get_adv_instance_flags(hdev, instance);
1805
1806         /* If instance already has the flags set skip adding it once
1807          * again.
1808          */
1809         if (adv_instance && eir_get_data(adv_instance->adv_data,
1810                                          adv_instance->adv_data_len, EIR_FLAGS,
1811                                          NULL))
1812                 goto skip_flags;
1813
1814         /* The Add Advertising command allows userspace to set both the general
1815          * and limited discoverable flags.
1816          */
1817         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1818                 flags |= LE_AD_GENERAL;
1819
1820         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1821                 flags |= LE_AD_LIMITED;
1822
1823         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1824                 flags |= LE_AD_NO_BREDR;
1825
1826         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1827                 /* If a discovery flag wasn't provided, simply use the global
1828                  * settings.
1829                  */
1830                 if (!flags)
1831                         flags |= mgmt_get_adv_discov_flags(hdev);
1832
1833                 /* If flags would still be empty, then there is no need to
1834                  * include the "Flags" AD field".
1835                  */
1836                 if (flags) {
1837                         ptr[0] = 0x02;
1838                         ptr[1] = EIR_FLAGS;
1839                         ptr[2] = flags;
1840
1841                         ad_len += 3;
1842                         ptr += 3;
1843                 }
1844         }
1845
1846 skip_flags:
1847         if (adv_instance) {
1848                 memcpy(ptr, adv_instance->adv_data,
1849                        adv_instance->adv_data_len);
1850                 ad_len += adv_instance->adv_data_len;
1851                 ptr += adv_instance->adv_data_len;
1852         }
1853
1854         if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1855                 s8 adv_tx_power;
1856
1857                 if (ext_adv_capable(hdev)) {
1858                         if (adv_instance)
1859                                 adv_tx_power = adv_instance->tx_power;
1860                         else
1861                                 adv_tx_power = hdev->adv_tx_power;
1862                 } else {
1863                         adv_tx_power = hdev->adv_tx_power;
1864                 }
1865
1866                 /* Provide Tx Power only if we can provide a valid value for it */
1867                 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1868                         ptr[0] = 0x02;
1869                         ptr[1] = EIR_TX_POWER;
1870                         ptr[2] = (u8)adv_tx_power;
1871
1872                         ad_len += 3;
1873                         ptr += 3;
1874                 }
1875         }
1876
1877         return ad_len;
1878 }
1879
1880 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1881 {
1882         struct hci_dev *hdev = req->hdev;
1883         u8 len;
1884
1885         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1886                 return;
1887
1888         if (ext_adv_capable(hdev)) {
1889                 struct {
1890                         struct hci_cp_le_set_ext_adv_data cp;
1891                         u8 data[HCI_MAX_EXT_AD_LENGTH];
1892                 } pdu;
1893
1894                 memset(&pdu, 0, sizeof(pdu));
1895
1896                 len = create_instance_adv_data(hdev, instance, pdu.data);
1897
1898 #ifdef TIZEN_BT
1899         /* Bluez will handle the advertising data including the flag and tx
1900          * power. This value will be updated only when application request the
1901          * update using adapter_set_advertising_data().
1902         */
1903         return;
1904 #else
1905                 /* There's nothing to do if the data hasn't changed */
1906                 if (hdev->adv_data_len == len &&
1907                     memcmp(pdu.data, hdev->adv_data, len) == 0)
1908                         return;
1909
1910                 memcpy(hdev->adv_data, pdu.data, len);
1911                 hdev->adv_data_len = len;
1912
1913                 pdu.cp.length = len;
1914                 pdu.cp.handle = instance;
1915                 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1916                 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1917
1918                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1919                             sizeof(pdu.cp) + len, &pdu.cp);
1920 #endif
1921         } else {
1922                 struct hci_cp_le_set_adv_data cp;
1923
1924                 memset(&cp, 0, sizeof(cp));
1925
1926                 len = create_instance_adv_data(hdev, instance, cp.data);
1927
1928 #ifdef TIZEN_BT
1929         /* Bluez will handle the advertising data including the flag and tx
1930          * power. This value will be updated only when application request the
1931          * update using adapter_set_advertising_data().
1932         */
1933         return;
1934 #else
1935                 /* There's nothing to do if the data hasn't changed */
1936                 if (hdev->adv_data_len == len &&
1937                     memcmp(cp.data, hdev->adv_data, len) == 0)
1938                         return;
1939
1940                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1941                 hdev->adv_data_len = len;
1942
1943                 cp.length = len;
1944
1945                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1946 #endif
1947         }
1948 }
1949
1950 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1951 {
1952         struct hci_request req;
1953
1954         hci_req_init(&req, hdev);
1955         __hci_req_update_adv_data(&req, instance);
1956
1957         return hci_req_run(&req, NULL);
1958 }
1959
1960 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1961                                             u16 opcode)
1962 {
1963         BT_DBG("%s status %u", hdev->name, status);
1964 }
1965
1966 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1967 {
1968         struct hci_request req;
1969         __u8 enable = 0x00;
1970
1971         if (!use_ll_privacy(hdev) &&
1972             !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1973                 return;
1974
1975         hci_req_init(&req, hdev);
1976
1977         hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1978
1979         hci_req_run(&req, enable_addr_resolution_complete);
1980 }
1981
1982 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1983 {
1984         bt_dev_dbg(hdev, "status %u", status);
1985 }
1986
1987 void hci_req_reenable_advertising(struct hci_dev *hdev)
1988 {
1989         struct hci_request req;
1990
1991         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1992             list_empty(&hdev->adv_instances))
1993                 return;
1994
1995         hci_req_init(&req, hdev);
1996
1997         if (hdev->cur_adv_instance) {
1998                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1999                                                 true);
2000         } else {
2001                 if (ext_adv_capable(hdev)) {
2002                         __hci_req_start_ext_adv(&req, 0x00);
2003                 } else {
2004                         __hci_req_update_adv_data(&req, 0x00);
2005                         __hci_req_update_scan_rsp_data(&req, 0x00);
2006                         __hci_req_enable_advertising(&req);
2007                 }
2008         }
2009
2010         hci_req_run(&req, adv_enable_complete);
2011 }
2012
2013 static void adv_timeout_expire(struct work_struct *work)
2014 {
2015         struct hci_dev *hdev = container_of(work, struct hci_dev,
2016                                             adv_instance_expire.work);
2017
2018         struct hci_request req;
2019         u8 instance;
2020
2021         bt_dev_dbg(hdev, "");
2022
2023         hci_dev_lock(hdev);
2024
2025         hdev->adv_instance_timeout = 0;
2026
2027         instance = hdev->cur_adv_instance;
2028         if (instance == 0x00)
2029                 goto unlock;
2030
2031         hci_req_init(&req, hdev);
2032
2033         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
2034
2035         if (list_empty(&hdev->adv_instances))
2036                 __hci_req_disable_advertising(&req);
2037
2038         hci_req_run(&req, NULL);
2039
2040 unlock:
2041         hci_dev_unlock(hdev);
2042 }
2043
2044 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
2045                                            unsigned long opt)
2046 {
2047         struct hci_dev *hdev = req->hdev;
2048         int ret = 0;
2049
2050         hci_dev_lock(hdev);
2051
2052         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2053                 hci_req_add_le_scan_disable(req, false);
2054         hci_req_add_le_passive_scan(req);
2055
2056         switch (hdev->interleave_scan_state) {
2057         case INTERLEAVE_SCAN_ALLOWLIST:
2058                 bt_dev_dbg(hdev, "next state: allowlist");
2059                 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2060                 break;
2061         case INTERLEAVE_SCAN_NO_FILTER:
2062                 bt_dev_dbg(hdev, "next state: no filter");
2063                 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
2064                 break;
2065         case INTERLEAVE_SCAN_NONE:
2066                 BT_ERR("unexpected error");
2067                 ret = -1;
2068         }
2069
2070         hci_dev_unlock(hdev);
2071
2072         return ret;
2073 }
2074
2075 static void interleave_scan_work(struct work_struct *work)
2076 {
2077         struct hci_dev *hdev = container_of(work, struct hci_dev,
2078                                             interleave_scan.work);
2079         u8 status;
2080         unsigned long timeout;
2081
2082         if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2083                 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2084         } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2085                 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2086         } else {
2087                 bt_dev_err(hdev, "unexpected error");
2088                 return;
2089         }
2090
2091         hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2092                      HCI_CMD_TIMEOUT, &status);
2093
2094         /* Don't continue interleaving if it was canceled */
2095         if (is_interleave_scanning(hdev))
2096                 queue_delayed_work(hdev->req_workqueue,
2097                                    &hdev->interleave_scan, timeout);
2098 }
2099
2100 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2101                            bool use_rpa, struct adv_info *adv_instance,
2102                            u8 *own_addr_type, bdaddr_t *rand_addr)
2103 {
2104         int err;
2105
2106         bacpy(rand_addr, BDADDR_ANY);
2107
2108         /* If privacy is enabled use a resolvable private address. If
2109          * current RPA has expired then generate a new one.
2110          */
2111         if (use_rpa) {
2112                 /* If Controller supports LL Privacy use own address type is
2113                  * 0x03
2114                  */
2115                 if (use_ll_privacy(hdev) &&
2116                     hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2117                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2118                 else
2119                         *own_addr_type = ADDR_LE_DEV_RANDOM;
2120
2121                 if (adv_instance) {
2122                         if (adv_rpa_valid(adv_instance))
2123                                 return 0;
2124                 } else {
2125                         if (rpa_valid(hdev))
2126                                 return 0;
2127                 }
2128
2129                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2130                 if (err < 0) {
2131                         bt_dev_err(hdev, "failed to generate new RPA");
2132                         return err;
2133                 }
2134
2135                 bacpy(rand_addr, &hdev->rpa);
2136
2137                 return 0;
2138         }
2139
2140         /* In case of required privacy without resolvable private address,
2141          * use an non-resolvable private address. This is useful for
2142          * non-connectable advertising.
2143          */
2144         if (require_privacy) {
2145                 bdaddr_t nrpa;
2146
2147                 while (true) {
2148                         /* The non-resolvable private address is generated
2149                          * from random six bytes with the two most significant
2150                          * bits cleared.
2151                          */
2152                         get_random_bytes(&nrpa, 6);
2153                         nrpa.b[5] &= 0x3f;
2154
2155                         /* The non-resolvable private address shall not be
2156                          * equal to the public address.
2157                          */
2158                         if (bacmp(&hdev->bdaddr, &nrpa))
2159                                 break;
2160                 }
2161
2162                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2163                 bacpy(rand_addr, &nrpa);
2164
2165                 return 0;
2166         }
2167
2168         /* No privacy so use a public address. */
2169         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2170
2171         return 0;
2172 }
2173
2174 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2175 {
2176         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2177 }
2178
2179 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2180 {
2181         struct hci_dev *hdev = req->hdev;
2182
2183         /* If we're advertising or initiating an LE connection we can't
2184          * go ahead and change the random address at this time. This is
2185          * because the eventual initiator address used for the
2186          * subsequently created connection will be undefined (some
2187          * controllers use the new address and others the one we had
2188          * when the operation started).
2189          *
2190          * In this kind of scenario skip the update and let the random
2191          * address be updated at the next cycle.
2192          */
2193         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2194             hci_lookup_le_connect(hdev)) {
2195                 bt_dev_dbg(hdev, "Deferring random address update");
2196                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2197                 return;
2198         }
2199
2200         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2201 }
2202
2203 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2204 {
2205         struct hci_cp_le_set_ext_adv_params cp;
2206         struct hci_dev *hdev = req->hdev;
2207         bool connectable;
2208         u32 flags;
2209         bdaddr_t random_addr;
2210         u8 own_addr_type;
2211         int err;
2212         struct adv_info *adv_instance;
2213         bool secondary_adv;
2214
2215         if (instance > 0) {
2216                 adv_instance = hci_find_adv_instance(hdev, instance);
2217                 if (!adv_instance)
2218                         return -EINVAL;
2219         } else {
2220                 adv_instance = NULL;
2221         }
2222
2223         flags = get_adv_instance_flags(hdev, instance);
2224
2225         /* If the "connectable" instance flag was not set, then choose between
2226          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2227          */
2228         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2229                       mgmt_get_connectable(hdev);
2230
2231         if (!is_advertising_allowed(hdev, connectable))
2232                 return -EPERM;
2233
2234         /* Set require_privacy to true only when non-connectable
2235          * advertising is used. In that case it is fine to use a
2236          * non-resolvable private address.
2237          */
2238         err = hci_get_random_address(hdev, !connectable,
2239                                      adv_use_rpa(hdev, flags), adv_instance,
2240                                      &own_addr_type, &random_addr);
2241         if (err < 0)
2242                 return err;
2243
2244         memset(&cp, 0, sizeof(cp));
2245
2246         if (adv_instance) {
2247                 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2248                 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2249                 cp.tx_power = adv_instance->tx_power;
2250         } else {
2251                 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2252                 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2253                 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2254         }
2255
2256         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2257
2258         if (connectable) {
2259                 if (secondary_adv)
2260                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2261                 else
2262                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2263         } else if (adv_instance_is_scannable(hdev, instance) ||
2264                    (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
2265                 if (secondary_adv)
2266                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2267                 else
2268                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2269         } else {
2270                 if (secondary_adv)
2271                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2272                 else
2273                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2274         }
2275
2276         cp.own_addr_type = own_addr_type;
2277         cp.channel_map = hdev->le_adv_channel_map;
2278         cp.handle = instance;
2279
2280         if (flags & MGMT_ADV_FLAG_SEC_2M) {
2281                 cp.primary_phy = HCI_ADV_PHY_1M;
2282                 cp.secondary_phy = HCI_ADV_PHY_2M;
2283         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2284                 cp.primary_phy = HCI_ADV_PHY_CODED;
2285                 cp.secondary_phy = HCI_ADV_PHY_CODED;
2286         } else {
2287                 /* In all other cases use 1M */
2288                 cp.primary_phy = HCI_ADV_PHY_1M;
2289                 cp.secondary_phy = HCI_ADV_PHY_1M;
2290         }
2291
2292         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2293
2294         if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2295             bacmp(&random_addr, BDADDR_ANY)) {
2296                 struct hci_cp_le_set_adv_set_rand_addr cp;
2297
2298                 /* Check if random address need to be updated */
2299                 if (adv_instance) {
2300                         if (!bacmp(&random_addr, &adv_instance->random_addr))
2301                                 return 0;
2302                 } else {
2303                         if (!bacmp(&random_addr, &hdev->random_addr))
2304                                 return 0;
2305                         /* Instance 0x00 doesn't have an adv_info, instead it
2306                          * uses hdev->random_addr to track its address so
2307                          * whenever it needs to be updated this also set the
2308                          * random address since hdev->random_addr is shared with
2309                          * scan state machine.
2310                          */
2311                         set_random_addr(req, &random_addr);
2312                 }
2313
2314                 memset(&cp, 0, sizeof(cp));
2315
2316                 cp.handle = instance;
2317                 bacpy(&cp.bdaddr, &random_addr);
2318
2319                 hci_req_add(req,
2320                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2321                             sizeof(cp), &cp);
2322         }
2323
2324         return 0;
2325 }
2326
2327 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2328 {
2329         struct hci_dev *hdev = req->hdev;
2330         struct hci_cp_le_set_ext_adv_enable *cp;
2331         struct hci_cp_ext_adv_set *adv_set;
2332         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2333         struct adv_info *adv_instance;
2334
2335         if (instance > 0) {
2336                 adv_instance = hci_find_adv_instance(hdev, instance);
2337                 if (!adv_instance)
2338                         return -EINVAL;
2339         } else {
2340                 adv_instance = NULL;
2341         }
2342
2343         cp = (void *) data;
2344         adv_set = (void *) cp->data;
2345
2346         memset(cp, 0, sizeof(*cp));
2347
2348         cp->enable = 0x01;
2349         cp->num_of_sets = 0x01;
2350
2351         memset(adv_set, 0, sizeof(*adv_set));
2352
2353         adv_set->handle = instance;
2354
2355         /* Set duration per instance since controller is responsible for
2356          * scheduling it.
2357          */
2358         if (adv_instance && adv_instance->timeout) {
2359                 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2360
2361                 /* Time = N * 10 ms */
2362                 adv_set->duration = cpu_to_le16(duration / 10);
2363         }
2364
2365         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2366                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2367                     data);
2368
2369         return 0;
2370 }
2371
2372 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2373 {
2374         struct hci_dev *hdev = req->hdev;
2375         struct hci_cp_le_set_ext_adv_enable *cp;
2376         struct hci_cp_ext_adv_set *adv_set;
2377         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2378         u8 req_size;
2379
2380         /* If request specifies an instance that doesn't exist, fail */
2381         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2382                 return -EINVAL;
2383
2384         memset(data, 0, sizeof(data));
2385
2386         cp = (void *)data;
2387         adv_set = (void *)cp->data;
2388
2389         /* Instance 0x00 indicates all advertising instances will be disabled */
2390         cp->num_of_sets = !!instance;
2391         cp->enable = 0x00;
2392
2393         adv_set->handle = instance;
2394
2395         req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2396         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2397
2398         return 0;
2399 }
2400
2401 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2402 {
2403         struct hci_dev *hdev = req->hdev;
2404
2405         /* If request specifies an instance that doesn't exist, fail */
2406         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2407                 return -EINVAL;
2408
2409         hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2410
2411         return 0;
2412 }
2413
2414 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2415 {
2416         struct hci_dev *hdev = req->hdev;
2417         struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2418         int err;
2419
2420         /* If instance isn't pending, the chip knows about it, and it's safe to
2421          * disable
2422          */
2423         if (adv_instance && !adv_instance->pending)
2424                 __hci_req_disable_ext_adv_instance(req, instance);
2425
2426         err = __hci_req_setup_ext_adv_instance(req, instance);
2427         if (err < 0)
2428                 return err;
2429
2430         __hci_req_update_scan_rsp_data(req, instance);
2431         __hci_req_enable_ext_advertising(req, instance);
2432
2433         return 0;
2434 }
2435
2436 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2437                                     bool force)
2438 {
2439         struct hci_dev *hdev = req->hdev;
2440         struct adv_info *adv_instance = NULL;
2441         u16 timeout;
2442
2443         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2444             list_empty(&hdev->adv_instances))
2445                 return -EPERM;
2446
2447         if (hdev->adv_instance_timeout)
2448                 return -EBUSY;
2449
2450         adv_instance = hci_find_adv_instance(hdev, instance);
2451         if (!adv_instance)
2452                 return -ENOENT;
2453
2454         /* A zero timeout means unlimited advertising. As long as there is
2455          * only one instance, duration should be ignored. We still set a timeout
2456          * in case further instances are being added later on.
2457          *
2458          * If the remaining lifetime of the instance is more than the duration
2459          * then the timeout corresponds to the duration, otherwise it will be
2460          * reduced to the remaining instance lifetime.
2461          */
2462         if (adv_instance->timeout == 0 ||
2463             adv_instance->duration <= adv_instance->remaining_time)
2464                 timeout = adv_instance->duration;
2465         else
2466                 timeout = adv_instance->remaining_time;
2467
2468         /* The remaining time is being reduced unless the instance is being
2469          * advertised without time limit.
2470          */
2471         if (adv_instance->timeout)
2472                 adv_instance->remaining_time =
2473                                 adv_instance->remaining_time - timeout;
2474
2475         /* Only use work for scheduling instances with legacy advertising */
2476         if (!ext_adv_capable(hdev)) {
2477                 hdev->adv_instance_timeout = timeout;
2478                 queue_delayed_work(hdev->req_workqueue,
2479                            &hdev->adv_instance_expire,
2480                            msecs_to_jiffies(timeout * 1000));
2481         }
2482
2483         /* If we're just re-scheduling the same instance again then do not
2484          * execute any HCI commands. This happens when a single instance is
2485          * being advertised.
2486          */
2487         if (!force && hdev->cur_adv_instance == instance &&
2488             hci_dev_test_flag(hdev, HCI_LE_ADV))
2489                 return 0;
2490
2491         hdev->cur_adv_instance = instance;
2492         if (ext_adv_capable(hdev)) {
2493                 __hci_req_start_ext_adv(req, instance);
2494         } else {
2495                 __hci_req_update_adv_data(req, instance);
2496                 __hci_req_update_scan_rsp_data(req, instance);
2497                 __hci_req_enable_advertising(req);
2498         }
2499
2500         return 0;
2501 }
2502
2503 /* For a single instance:
2504  * - force == true: The instance will be removed even when its remaining
2505  *   lifetime is not zero.
2506  * - force == false: the instance will be deactivated but kept stored unless
2507  *   the remaining lifetime is zero.
2508  *
2509  * For instance == 0x00:
2510  * - force == true: All instances will be removed regardless of their timeout
2511  *   setting.
2512  * - force == false: Only instances that have a timeout will be removed.
2513  */
2514 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2515                                 struct hci_request *req, u8 instance,
2516                                 bool force)
2517 {
2518         struct adv_info *adv_instance, *n, *next_instance = NULL;
2519         int err;
2520         u8 rem_inst;
2521
2522         /* Cancel any timeout concerning the removed instance(s). */
2523         if (!instance || hdev->cur_adv_instance == instance)
2524                 cancel_adv_timeout(hdev);
2525
2526         /* Get the next instance to advertise BEFORE we remove
2527          * the current one. This can be the same instance again
2528          * if there is only one instance.
2529          */
2530         if (instance && hdev->cur_adv_instance == instance)
2531                 next_instance = hci_get_next_instance(hdev, instance);
2532
2533         if (instance == 0x00) {
2534                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2535                                          list) {
2536                         if (!(force || adv_instance->timeout))
2537                                 continue;
2538
2539                         rem_inst = adv_instance->instance;
2540                         err = hci_remove_adv_instance(hdev, rem_inst);
2541                         if (!err)
2542                                 mgmt_advertising_removed(sk, hdev, rem_inst);
2543                 }
2544         } else {
2545                 adv_instance = hci_find_adv_instance(hdev, instance);
2546
2547                 if (force || (adv_instance && adv_instance->timeout &&
2548                               !adv_instance->remaining_time)) {
2549                         /* Don't advertise a removed instance. */
2550                         if (next_instance &&
2551                             next_instance->instance == instance)
2552                                 next_instance = NULL;
2553
2554                         err = hci_remove_adv_instance(hdev, instance);
2555                         if (!err)
2556                                 mgmt_advertising_removed(sk, hdev, instance);
2557                 }
2558         }
2559
2560         if (!req || !hdev_is_powered(hdev) ||
2561             hci_dev_test_flag(hdev, HCI_ADVERTISING))
2562                 return;
2563
2564         if (next_instance && !ext_adv_capable(hdev))
2565                 __hci_req_schedule_adv_instance(req, next_instance->instance,
2566                                                 false);
2567 }
2568
2569 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2570                               bool use_rpa, u8 *own_addr_type)
2571 {
2572         struct hci_dev *hdev = req->hdev;
2573         int err;
2574
2575         /* If privacy is enabled use a resolvable private address. If
2576          * current RPA has expired or there is something else than
2577          * the current RPA in use, then generate a new one.
2578          */
2579         if (use_rpa) {
2580                 /* If Controller supports LL Privacy use own address type is
2581                  * 0x03
2582                  */
2583                 if (use_ll_privacy(hdev) &&
2584                     hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2585                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2586                 else
2587                         *own_addr_type = ADDR_LE_DEV_RANDOM;
2588
2589                 if (rpa_valid(hdev))
2590                         return 0;
2591
2592                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2593                 if (err < 0) {
2594                         bt_dev_err(hdev, "failed to generate new RPA");
2595                         return err;
2596                 }
2597
2598                 set_random_addr(req, &hdev->rpa);
2599
2600                 return 0;
2601         }
2602
2603         /* In case of required privacy without resolvable private address,
2604          * use an non-resolvable private address. This is useful for active
2605          * scanning and non-connectable advertising.
2606          */
2607         if (require_privacy) {
2608                 bdaddr_t nrpa;
2609
2610                 while (true) {
2611                         /* The non-resolvable private address is generated
2612                          * from random six bytes with the two most significant
2613                          * bits cleared.
2614                          */
2615                         get_random_bytes(&nrpa, 6);
2616                         nrpa.b[5] &= 0x3f;
2617
2618                         /* The non-resolvable private address shall not be
2619                          * equal to the public address.
2620                          */
2621                         if (bacmp(&hdev->bdaddr, &nrpa))
2622                                 break;
2623                 }
2624
2625                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2626                 set_random_addr(req, &nrpa);
2627                 return 0;
2628         }
2629
2630         /* If forcing static address is in use or there is no public
2631          * address use the static address as random address (but skip
2632          * the HCI command if the current random address is already the
2633          * static one.
2634          *
2635          * In case BR/EDR has been disabled on a dual-mode controller
2636          * and a static address has been configured, then use that
2637          * address instead of the public BR/EDR address.
2638          */
2639         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2640             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2641             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2642              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2643                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2644                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2645                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2646                                     &hdev->static_addr);
2647                 return 0;
2648         }
2649
2650         /* Neither privacy nor static address is being used so use a
2651          * public address.
2652          */
2653         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2654
2655         return 0;
2656 }
2657
2658 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2659 {
2660         struct bdaddr_list *b;
2661
2662         list_for_each_entry(b, &hdev->accept_list, list) {
2663                 struct hci_conn *conn;
2664
2665                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2666                 if (!conn)
2667                         return true;
2668
2669                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2670                         return true;
2671         }
2672
2673         return false;
2674 }
2675
2676 void __hci_req_update_scan(struct hci_request *req)
2677 {
2678         struct hci_dev *hdev = req->hdev;
2679         u8 scan;
2680
2681         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2682                 return;
2683
2684         if (!hdev_is_powered(hdev))
2685                 return;
2686
2687         if (mgmt_powering_down(hdev))
2688                 return;
2689
2690         if (hdev->scanning_paused)
2691                 return;
2692
2693         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2694             disconnected_accept_list_entries(hdev))
2695                 scan = SCAN_PAGE;
2696         else
2697                 scan = SCAN_DISABLED;
2698
2699         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2700                 scan |= SCAN_INQUIRY;
2701
2702         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2703             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2704                 return;
2705
2706         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2707 }
2708
2709 static int update_scan(struct hci_request *req, unsigned long opt)
2710 {
2711         hci_dev_lock(req->hdev);
2712         __hci_req_update_scan(req);
2713         hci_dev_unlock(req->hdev);
2714         return 0;
2715 }
2716
2717 static void scan_update_work(struct work_struct *work)
2718 {
2719         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2720
2721         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2722 }
2723
2724 static int connectable_update(struct hci_request *req, unsigned long opt)
2725 {
2726         struct hci_dev *hdev = req->hdev;
2727
2728         hci_dev_lock(hdev);
2729
2730         __hci_req_update_scan(req);
2731
2732         /* If BR/EDR is not enabled and we disable advertising as a
2733          * by-product of disabling connectable, we need to update the
2734          * advertising flags.
2735          */
2736         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2737                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2738
2739         /* Update the advertising parameters if necessary */
2740         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2741             !list_empty(&hdev->adv_instances)) {
2742                 if (ext_adv_capable(hdev))
2743                         __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2744                 else
2745                         __hci_req_enable_advertising(req);
2746         }
2747
2748         __hci_update_background_scan(req);
2749
2750         hci_dev_unlock(hdev);
2751
2752         return 0;
2753 }
2754
2755 static void connectable_update_work(struct work_struct *work)
2756 {
2757         struct hci_dev *hdev = container_of(work, struct hci_dev,
2758                                             connectable_update);
2759         u8 status;
2760
2761         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2762         mgmt_set_connectable_complete(hdev, status);
2763 }
2764
2765 static u8 get_service_classes(struct hci_dev *hdev)
2766 {
2767         struct bt_uuid *uuid;
2768         u8 val = 0;
2769
2770         list_for_each_entry(uuid, &hdev->uuids, list)
2771                 val |= uuid->svc_hint;
2772
2773         return val;
2774 }
2775
2776 void __hci_req_update_class(struct hci_request *req)
2777 {
2778         struct hci_dev *hdev = req->hdev;
2779         u8 cod[3];
2780
2781         bt_dev_dbg(hdev, "");
2782
2783         if (!hdev_is_powered(hdev))
2784                 return;
2785
2786         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2787                 return;
2788
2789         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2790                 return;
2791
2792         cod[0] = hdev->minor_class;
2793         cod[1] = hdev->major_class;
2794         cod[2] = get_service_classes(hdev);
2795
2796         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2797                 cod[1] |= 0x20;
2798
2799         if (memcmp(cod, hdev->dev_class, 3) == 0)
2800                 return;
2801
2802         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2803 }
2804
2805 static void write_iac(struct hci_request *req)
2806 {
2807         struct hci_dev *hdev = req->hdev;
2808         struct hci_cp_write_current_iac_lap cp;
2809
2810         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2811                 return;
2812
2813         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2814                 /* Limited discoverable mode */
2815                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2816                 cp.iac_lap[0] = 0x00;   /* LIAC */
2817                 cp.iac_lap[1] = 0x8b;
2818                 cp.iac_lap[2] = 0x9e;
2819                 cp.iac_lap[3] = 0x33;   /* GIAC */
2820                 cp.iac_lap[4] = 0x8b;
2821                 cp.iac_lap[5] = 0x9e;
2822         } else {
2823                 /* General discoverable mode */
2824                 cp.num_iac = 1;
2825                 cp.iac_lap[0] = 0x33;   /* GIAC */
2826                 cp.iac_lap[1] = 0x8b;
2827                 cp.iac_lap[2] = 0x9e;
2828         }
2829
2830         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2831                     (cp.num_iac * 3) + 1, &cp);
2832 }
2833
2834 static int discoverable_update(struct hci_request *req, unsigned long opt)
2835 {
2836         struct hci_dev *hdev = req->hdev;
2837
2838         hci_dev_lock(hdev);
2839
2840         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2841                 write_iac(req);
2842                 __hci_req_update_scan(req);
2843                 __hci_req_update_class(req);
2844         }
2845
2846         /* Advertising instances don't use the global discoverable setting, so
2847          * only update AD if advertising was enabled using Set Advertising.
2848          */
2849         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2850                 __hci_req_update_adv_data(req, 0x00);
2851
2852                 /* Discoverable mode affects the local advertising
2853                  * address in limited privacy mode.
2854                  */
2855                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2856                         if (ext_adv_capable(hdev))
2857                                 __hci_req_start_ext_adv(req, 0x00);
2858                         else
2859                                 __hci_req_enable_advertising(req);
2860                 }
2861         }
2862
2863         hci_dev_unlock(hdev);
2864
2865         return 0;
2866 }
2867
2868 static void discoverable_update_work(struct work_struct *work)
2869 {
2870         struct hci_dev *hdev = container_of(work, struct hci_dev,
2871                                             discoverable_update);
2872         u8 status;
2873
2874         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2875         mgmt_set_discoverable_complete(hdev, status);
2876 }
2877
2878 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2879                       u8 reason)
2880 {
2881         switch (conn->state) {
2882         case BT_CONNECTED:
2883         case BT_CONFIG:
2884                 if (conn->type == AMP_LINK) {
2885                         struct hci_cp_disconn_phy_link cp;
2886
2887                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2888                         cp.reason = reason;
2889                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2890                                     &cp);
2891                 } else {
2892                         struct hci_cp_disconnect dc;
2893
2894                         dc.handle = cpu_to_le16(conn->handle);
2895                         dc.reason = reason;
2896                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2897                 }
2898
2899                 conn->state = BT_DISCONN;
2900
2901                 break;
2902         case BT_CONNECT:
2903 #ifdef TIZEN_BT
2904                 if (conn->type == LE_LINK && bacmp(&conn->dst, BDADDR_ANY)) {
2905 #else
2906                 if (conn->type == LE_LINK) {
2907 #endif
2908                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2909                                 break;
2910                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2911                                     0, NULL);
2912                 } else if (conn->type == ACL_LINK) {
2913                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2914                                 break;
2915                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2916                                     6, &conn->dst);
2917                 }
2918                 break;
2919         case BT_CONNECT2:
2920                 if (conn->type == ACL_LINK) {
2921                         struct hci_cp_reject_conn_req rej;
2922
2923                         bacpy(&rej.bdaddr, &conn->dst);
2924                         rej.reason = reason;
2925
2926                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2927                                     sizeof(rej), &rej);
2928                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2929                         struct hci_cp_reject_sync_conn_req rej;
2930
2931                         bacpy(&rej.bdaddr, &conn->dst);
2932
2933                         /* SCO rejection has its own limited set of
2934                          * allowed error values (0x0D-0x0F) which isn't
2935                          * compatible with most values passed to this
2936                          * function. To be safe hard-code one of the
2937                          * values that's suitable for SCO.
2938                          */
2939                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2940
2941                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2942                                     sizeof(rej), &rej);
2943                 }
2944                 break;
2945         default:
2946                 conn->state = BT_CLOSED;
2947                 break;
2948         }
2949 }
2950
2951 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2952 {
2953         if (status)
2954                 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2955 }
2956
2957 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2958 {
2959         struct hci_request req;
2960         int err;
2961
2962         hci_req_init(&req, conn->hdev);
2963
2964         __hci_abort_conn(&req, conn, reason);
2965
2966         err = hci_req_run(&req, abort_conn_complete);
2967         if (err && err != -ENODATA) {
2968                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2969                 return err;
2970         }
2971
2972         return 0;
2973 }
2974
2975 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2976 {
2977         hci_dev_lock(req->hdev);
2978         __hci_update_background_scan(req);
2979         hci_dev_unlock(req->hdev);
2980         return 0;
2981 }
2982
2983 static void bg_scan_update(struct work_struct *work)
2984 {
2985         struct hci_dev *hdev = container_of(work, struct hci_dev,
2986                                             bg_scan_update);
2987         struct hci_conn *conn;
2988         u8 status;
2989         int err;
2990
2991         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2992         if (!err)
2993                 return;
2994
2995         hci_dev_lock(hdev);
2996
2997         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2998         if (conn)
2999                 hci_le_conn_failed(conn, status);
3000
3001         hci_dev_unlock(hdev);
3002 }
3003
3004 static int le_scan_disable(struct hci_request *req, unsigned long opt)
3005 {
3006         hci_req_add_le_scan_disable(req, false);
3007         return 0;
3008 }
3009
3010 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
3011 {
3012         u8 length = opt;
3013         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
3014         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
3015         struct hci_cp_inquiry cp;
3016
3017         if (test_bit(HCI_INQUIRY, &req->hdev->flags))
3018                 return 0;
3019
3020         bt_dev_dbg(req->hdev, "");
3021
3022         hci_dev_lock(req->hdev);
3023         hci_inquiry_cache_flush(req->hdev);
3024         hci_dev_unlock(req->hdev);
3025
3026         memset(&cp, 0, sizeof(cp));
3027
3028         if (req->hdev->discovery.limited)
3029                 memcpy(&cp.lap, liac, sizeof(cp.lap));
3030         else
3031                 memcpy(&cp.lap, giac, sizeof(cp.lap));
3032
3033         cp.length = length;
3034
3035         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3036
3037         return 0;
3038 }
3039
3040 static void le_scan_disable_work(struct work_struct *work)
3041 {
3042         struct hci_dev *hdev = container_of(work, struct hci_dev,
3043                                             le_scan_disable.work);
3044         u8 status;
3045
3046         bt_dev_dbg(hdev, "");
3047
3048         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3049                 return;
3050
3051         cancel_delayed_work(&hdev->le_scan_restart);
3052
3053         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
3054         if (status) {
3055                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
3056                            status);
3057                 return;
3058         }
3059
3060         hdev->discovery.scan_start = 0;
3061
3062         /* If we were running LE only scan, change discovery state. If
3063          * we were running both LE and BR/EDR inquiry simultaneously,
3064          * and BR/EDR inquiry is already finished, stop discovery,
3065          * otherwise BR/EDR inquiry will stop discovery when finished.
3066          * If we will resolve remote device name, do not change
3067          * discovery state.
3068          */
3069
3070         if (hdev->discovery.type == DISCOV_TYPE_LE)
3071                 goto discov_stopped;
3072
3073         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3074                 return;
3075
3076         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3077                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3078                     hdev->discovery.state != DISCOVERY_RESOLVING)
3079                         goto discov_stopped;
3080
3081                 return;
3082         }
3083
3084         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3085                      HCI_CMD_TIMEOUT, &status);
3086         if (status) {
3087                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3088                 goto discov_stopped;
3089         }
3090
3091         return;
3092
3093 discov_stopped:
3094         hci_dev_lock(hdev);
3095         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3096         hci_dev_unlock(hdev);
3097 }
3098
3099 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3100 {
3101         struct hci_dev *hdev = req->hdev;
3102
3103         /* If controller is not scanning we are done. */
3104         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3105                 return 0;
3106
3107         if (hdev->scanning_paused) {
3108                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3109                 return 0;
3110         }
3111
3112         hci_req_add_le_scan_disable(req, false);
3113
3114         if (use_ext_scan(hdev)) {
3115                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3116
3117                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3118                 ext_enable_cp.enable = LE_SCAN_ENABLE;
3119                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3120
3121                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3122                             sizeof(ext_enable_cp), &ext_enable_cp);
3123         } else {
3124                 struct hci_cp_le_set_scan_enable cp;
3125
3126                 memset(&cp, 0, sizeof(cp));
3127                 cp.enable = LE_SCAN_ENABLE;
3128                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3129                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3130         }
3131
3132         return 0;
3133 }
3134
3135 static void le_scan_restart_work(struct work_struct *work)
3136 {
3137         struct hci_dev *hdev = container_of(work, struct hci_dev,
3138                                             le_scan_restart.work);
3139         unsigned long timeout, duration, scan_start, now;
3140         u8 status;
3141
3142         bt_dev_dbg(hdev, "");
3143
3144         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3145         if (status) {
3146                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3147                            status);
3148                 return;
3149         }
3150
3151         hci_dev_lock(hdev);
3152
3153         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3154             !hdev->discovery.scan_start)
3155                 goto unlock;
3156
3157         /* When the scan was started, hdev->le_scan_disable has been queued
3158          * after duration from scan_start. During scan restart this job
3159          * has been canceled, and we need to queue it again after proper
3160          * timeout, to make sure that scan does not run indefinitely.
3161          */
3162         duration = hdev->discovery.scan_duration;
3163         scan_start = hdev->discovery.scan_start;
3164         now = jiffies;
3165         if (now - scan_start <= duration) {
3166                 int elapsed;
3167
3168                 if (now >= scan_start)
3169                         elapsed = now - scan_start;
3170                 else
3171                         elapsed = ULONG_MAX - scan_start + now;
3172
3173                 timeout = duration - elapsed;
3174         } else {
3175                 timeout = 0;
3176         }
3177
3178         queue_delayed_work(hdev->req_workqueue,
3179                            &hdev->le_scan_disable, timeout);
3180
3181 unlock:
3182         hci_dev_unlock(hdev);
3183 }
3184
3185 static int active_scan(struct hci_request *req, unsigned long opt)
3186 {
3187         uint16_t interval = opt;
3188         struct hci_dev *hdev = req->hdev;
3189         u8 own_addr_type;
3190         /* Accept list is not used for discovery */
3191         u8 filter_policy = 0x00;
3192         /* Default is to enable duplicates filter */
3193         u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3194         /* Discovery doesn't require controller address resolution */
3195         bool addr_resolv = false;
3196         int err;
3197
3198         bt_dev_dbg(hdev, "");
3199
3200         /* If controller is scanning, it means the background scanning is
3201          * running. Thus, we should temporarily stop it in order to set the
3202          * discovery scanning parameters.
3203          */
3204         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3205                 hci_req_add_le_scan_disable(req, false);
3206                 cancel_interleave_scan(hdev);
3207         }
3208
3209         /* All active scans will be done with either a resolvable private
3210          * address (when privacy feature has been enabled) or non-resolvable
3211          * private address.
3212          */
3213         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3214                                         &own_addr_type);
3215         if (err < 0)
3216                 own_addr_type = ADDR_LE_DEV_PUBLIC;
3217
3218         hci_dev_lock(hdev);
3219         if (hci_is_adv_monitoring(hdev)) {
3220                 /* Duplicate filter should be disabled when some advertisement
3221                  * monitor is activated, otherwise AdvMon can only receive one
3222                  * advertisement for one peer(*) during active scanning, and
3223                  * might report loss to these peers.
3224                  *
3225                  * Note that different controllers have different meanings of
3226                  * |duplicate|. Some of them consider packets with the same
3227                  * address as duplicate, and others consider packets with the
3228                  * same address and the same RSSI as duplicate. Although in the
3229                  * latter case we don't need to disable duplicate filter, but
3230                  * it is common to have active scanning for a short period of
3231                  * time, the power impact should be neglectable.
3232                  */
3233                 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
3234         }
3235         hci_dev_unlock(hdev);
3236
3237         hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3238                            hdev->le_scan_window_discovery, own_addr_type,
3239                            filter_policy, filter_dup, addr_resolv);
3240         return 0;
3241 }
3242
3243 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3244 {
3245         int err;
3246
3247         bt_dev_dbg(req->hdev, "");
3248
3249         err = active_scan(req, opt);
3250         if (err)
3251                 return err;
3252
3253         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3254 }
3255
3256 static void start_discovery(struct hci_dev *hdev, u8 *status)
3257 {
3258         unsigned long timeout;
3259
3260         bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3261
3262         switch (hdev->discovery.type) {
3263         case DISCOV_TYPE_BREDR:
3264                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3265                         hci_req_sync(hdev, bredr_inquiry,
3266                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3267                                      status);
3268                 return;
3269         case DISCOV_TYPE_INTERLEAVED:
3270                 /* When running simultaneous discovery, the LE scanning time
3271                  * should occupy the whole discovery time sine BR/EDR inquiry
3272                  * and LE scanning are scheduled by the controller.
3273                  *
3274                  * For interleaving discovery in comparison, BR/EDR inquiry
3275                  * and LE scanning are done sequentially with separate
3276                  * timeouts.
3277                  */
3278                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3279                              &hdev->quirks)) {
3280                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3281                         /* During simultaneous discovery, we double LE scan
3282                          * interval. We must leave some time for the controller
3283                          * to do BR/EDR inquiry.
3284                          */
3285                         hci_req_sync(hdev, interleaved_discov,
3286                                      hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3287                                      status);
3288                         break;
3289                 }
3290
3291                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3292                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3293                              HCI_CMD_TIMEOUT, status);
3294                 break;
3295         case DISCOV_TYPE_LE:
3296                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3297                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3298                              HCI_CMD_TIMEOUT, status);
3299                 break;
3300         default:
3301                 *status = HCI_ERROR_UNSPECIFIED;
3302                 return;
3303         }
3304
3305         if (*status)
3306                 return;
3307
3308         bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3309
3310         /* When service discovery is used and the controller has a
3311          * strict duplicate filter, it is important to remember the
3312          * start and duration of the scan. This is required for
3313          * restarting scanning during the discovery phase.
3314          */
3315         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3316                      hdev->discovery.result_filtering) {
3317                 hdev->discovery.scan_start = jiffies;
3318                 hdev->discovery.scan_duration = timeout;
3319         }
3320
3321         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3322                            timeout);
3323 }
3324
3325 bool hci_req_stop_discovery(struct hci_request *req)
3326 {
3327         struct hci_dev *hdev = req->hdev;
3328         struct discovery_state *d = &hdev->discovery;
3329         struct hci_cp_remote_name_req_cancel cp;
3330         struct inquiry_entry *e;
3331         bool ret = false;
3332
3333         bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3334
3335         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3336                 if (test_bit(HCI_INQUIRY, &hdev->flags))
3337                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3338
3339                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3340                         cancel_delayed_work(&hdev->le_scan_disable);
3341                         cancel_delayed_work(&hdev->le_scan_restart);
3342                         hci_req_add_le_scan_disable(req, false);
3343                 }
3344
3345                 ret = true;
3346         } else {
3347                 /* Passive scanning */
3348                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3349                         hci_req_add_le_scan_disable(req, false);
3350                         ret = true;
3351                 }
3352         }
3353
3354         /* No further actions needed for LE-only discovery */
3355         if (d->type == DISCOV_TYPE_LE)
3356                 return ret;
3357
3358         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3359                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3360                                                      NAME_PENDING);
3361                 if (!e)
3362                         return ret;
3363
3364                 bacpy(&cp.bdaddr, &e->data.bdaddr);
3365                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3366                             &cp);
3367                 ret = true;
3368         }
3369
3370         return ret;
3371 }
3372
3373 static int stop_discovery(struct hci_request *req, unsigned long opt)
3374 {
3375         hci_dev_lock(req->hdev);
3376         hci_req_stop_discovery(req);
3377         hci_dev_unlock(req->hdev);
3378
3379         return 0;
3380 }
3381
3382 static void discov_update(struct work_struct *work)
3383 {
3384         struct hci_dev *hdev = container_of(work, struct hci_dev,
3385                                             discov_update);
3386         u8 status = 0;
3387
3388         switch (hdev->discovery.state) {
3389         case DISCOVERY_STARTING:
3390                 start_discovery(hdev, &status);
3391                 mgmt_start_discovery_complete(hdev, status);
3392                 if (status)
3393                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3394                 else
3395                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3396                 break;
3397         case DISCOVERY_STOPPING:
3398                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3399                 mgmt_stop_discovery_complete(hdev, status);
3400                 if (!status)
3401                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3402                 break;
3403         case DISCOVERY_STOPPED:
3404         default:
3405                 return;
3406         }
3407 }
3408
3409 static void discov_off(struct work_struct *work)
3410 {
3411         struct hci_dev *hdev = container_of(work, struct hci_dev,
3412                                             discov_off.work);
3413
3414         bt_dev_dbg(hdev, "");
3415
3416         hci_dev_lock(hdev);
3417
3418         /* When discoverable timeout triggers, then just make sure
3419          * the limited discoverable flag is cleared. Even in the case
3420          * of a timeout triggered from general discoverable, it is
3421          * safe to unconditionally clear the flag.
3422          */
3423         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3424         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3425         hdev->discov_timeout = 0;
3426
3427         hci_dev_unlock(hdev);
3428
3429         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3430         mgmt_new_settings(hdev);
3431 }
3432
3433 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3434 {
3435         struct hci_dev *hdev = req->hdev;
3436         u8 link_sec;
3437
3438         hci_dev_lock(hdev);
3439
3440         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3441             !lmp_host_ssp_capable(hdev)) {
3442                 u8 mode = 0x01;
3443
3444                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3445
3446                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3447                         u8 support = 0x01;
3448
3449                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3450                                     sizeof(support), &support);
3451                 }
3452         }
3453
3454         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3455             lmp_bredr_capable(hdev)) {
3456                 struct hci_cp_write_le_host_supported cp;
3457
3458                 cp.le = 0x01;
3459                 cp.simul = 0x00;
3460
3461                 /* Check first if we already have the right
3462                  * host state (host features set)
3463                  */
3464                 if (cp.le != lmp_host_le_capable(hdev) ||
3465                     cp.simul != lmp_host_le_br_capable(hdev))
3466                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3467                                     sizeof(cp), &cp);
3468         }
3469
3470         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3471                 /* Make sure the controller has a good default for
3472                  * advertising data. This also applies to the case
3473                  * where BR/EDR was toggled during the AUTO_OFF phase.
3474                  */
3475                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3476                     list_empty(&hdev->adv_instances)) {
3477                         int err;
3478
3479                         if (ext_adv_capable(hdev)) {
3480                                 err = __hci_req_setup_ext_adv_instance(req,
3481                                                                        0x00);
3482                                 if (!err)
3483                                         __hci_req_update_scan_rsp_data(req,
3484                                                                        0x00);
3485                         } else {
3486                                 err = 0;
3487                                 __hci_req_update_adv_data(req, 0x00);
3488                                 __hci_req_update_scan_rsp_data(req, 0x00);
3489                         }
3490
3491                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3492                                 if (!ext_adv_capable(hdev))
3493                                         __hci_req_enable_advertising(req);
3494                                 else if (!err)
3495                                         __hci_req_enable_ext_advertising(req,
3496                                                                          0x00);
3497                         }
3498                 } else if (!list_empty(&hdev->adv_instances)) {
3499                         struct adv_info *adv_instance;
3500
3501                         adv_instance = list_first_entry(&hdev->adv_instances,
3502                                                         struct adv_info, list);
3503                         __hci_req_schedule_adv_instance(req,
3504                                                         adv_instance->instance,
3505                                                         true);
3506                 }
3507         }
3508
3509         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3510         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3511                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3512                             sizeof(link_sec), &link_sec);
3513
3514         if (lmp_bredr_capable(hdev)) {
3515                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3516                         __hci_req_write_fast_connectable(req, true);
3517                 else
3518                         __hci_req_write_fast_connectable(req, false);
3519                 __hci_req_update_scan(req);
3520                 __hci_req_update_class(req);
3521                 __hci_req_update_name(req);
3522                 __hci_req_update_eir(req);
3523         }
3524
3525         hci_dev_unlock(hdev);
3526         return 0;
3527 }
3528
3529 int __hci_req_hci_power_on(struct hci_dev *hdev)
3530 {
3531         /* Register the available SMP channels (BR/EDR and LE) only when
3532          * successfully powering on the controller. This late
3533          * registration is required so that LE SMP can clearly decide if
3534          * the public address or static address is used.
3535          */
3536         smp_register(hdev);
3537
3538         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3539                               NULL);
3540 }
3541
3542 void hci_request_setup(struct hci_dev *hdev)
3543 {
3544         INIT_WORK(&hdev->discov_update, discov_update);
3545         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3546         INIT_WORK(&hdev->scan_update, scan_update_work);
3547         INIT_WORK(&hdev->connectable_update, connectable_update_work);
3548         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3549         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3550         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3551         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3552         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3553         INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3554 }
3555
3556 void hci_request_cancel_all(struct hci_dev *hdev)
3557 {
3558         hci_req_sync_cancel(hdev, ENODEV);
3559
3560         cancel_work_sync(&hdev->discov_update);
3561         cancel_work_sync(&hdev->bg_scan_update);
3562         cancel_work_sync(&hdev->scan_update);
3563         cancel_work_sync(&hdev->connectable_update);
3564         cancel_work_sync(&hdev->discoverable_update);
3565         cancel_delayed_work_sync(&hdev->discov_off);
3566         cancel_delayed_work_sync(&hdev->le_scan_disable);
3567         cancel_delayed_work_sync(&hdev->le_scan_restart);
3568
3569         if (hdev->adv_instance_timeout) {
3570                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3571                 hdev->adv_instance_timeout = 0;
3572         }
3573
3574         cancel_interleave_scan(hdev);
3575 }