drm/vc4: Add gem_info node via debugfs for vc5
[platform/kernel/linux-rpi.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33
34 #define HCI_REQ_DONE      0
35 #define HCI_REQ_PEND      1
36 #define HCI_REQ_CANCELED  2
37
38 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 {
40         skb_queue_head_init(&req->cmd_q);
41         req->hdev = hdev;
42         req->err = 0;
43 }
44
45 void hci_req_purge(struct hci_request *req)
46 {
47         skb_queue_purge(&req->cmd_q);
48 }
49
50 bool hci_req_status_pend(struct hci_dev *hdev)
51 {
52         return hdev->req_status == HCI_REQ_PEND;
53 }
54
55 static int req_run(struct hci_request *req, hci_req_complete_t complete,
56                    hci_req_complete_skb_t complete_skb)
57 {
58         struct hci_dev *hdev = req->hdev;
59         struct sk_buff *skb;
60         unsigned long flags;
61
62         bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
63
64         /* If an error occurred during request building, remove all HCI
65          * commands queued on the HCI request queue.
66          */
67         if (req->err) {
68                 skb_queue_purge(&req->cmd_q);
69                 return req->err;
70         }
71
72         /* Do not allow empty requests */
73         if (skb_queue_empty(&req->cmd_q))
74                 return -ENODATA;
75
76         skb = skb_peek_tail(&req->cmd_q);
77         if (complete) {
78                 bt_cb(skb)->hci.req_complete = complete;
79         } else if (complete_skb) {
80                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
81                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
82         }
83
84         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87
88         queue_work(hdev->workqueue, &hdev->cmd_work);
89
90         return 0;
91 }
92
93 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 {
95         return req_run(req, complete, NULL);
96 }
97
98 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 {
100         return req_run(req, NULL, complete);
101 }
102
103 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
104                                   struct sk_buff *skb)
105 {
106         bt_dev_dbg(hdev, "result 0x%2.2x", result);
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = result;
110                 hdev->req_status = HCI_REQ_DONE;
111                 if (skb)
112                         hdev->req_skb = skb_get(skb);
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
118 {
119         bt_dev_dbg(hdev, "err 0x%2.2x", err);
120
121         if (hdev->req_status == HCI_REQ_PEND) {
122                 hdev->req_result = err;
123                 hdev->req_status = HCI_REQ_CANCELED;
124                 wake_up_interruptible(&hdev->req_wait_q);
125         }
126 }
127
128 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129                                   const void *param, u8 event, u32 timeout)
130 {
131         struct hci_request req;
132         struct sk_buff *skb;
133         int err = 0;
134
135         bt_dev_dbg(hdev, "");
136
137         hci_req_init(&req, hdev);
138
139         hci_req_add_ev(&req, opcode, plen, param, event);
140
141         hdev->req_status = HCI_REQ_PEND;
142
143         err = hci_req_run_skb(&req, hci_req_sync_complete);
144         if (err < 0)
145                 return ERR_PTR(err);
146
147         err = wait_event_interruptible_timeout(hdev->req_wait_q,
148                         hdev->req_status != HCI_REQ_PEND, timeout);
149
150         if (err == -ERESTARTSYS)
151                 return ERR_PTR(-EINTR);
152
153         switch (hdev->req_status) {
154         case HCI_REQ_DONE:
155                 err = -bt_to_errno(hdev->req_result);
156                 break;
157
158         case HCI_REQ_CANCELED:
159                 err = -hdev->req_result;
160                 break;
161
162         default:
163                 err = -ETIMEDOUT;
164                 break;
165         }
166
167         hdev->req_status = hdev->req_result = 0;
168         skb = hdev->req_skb;
169         hdev->req_skb = NULL;
170
171         bt_dev_dbg(hdev, "end: err %d", err);
172
173         if (err < 0) {
174                 kfree_skb(skb);
175                 return ERR_PTR(err);
176         }
177
178         if (!skb)
179                 return ERR_PTR(-ENODATA);
180
181         return skb;
182 }
183 EXPORT_SYMBOL(__hci_cmd_sync_ev);
184
185 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186                                const void *param, u32 timeout)
187 {
188         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189 }
190 EXPORT_SYMBOL(__hci_cmd_sync);
191
192 /* Execute request and wait for completion. */
193 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194                                                      unsigned long opt),
195                    unsigned long opt, u32 timeout, u8 *hci_status)
196 {
197         struct hci_request req;
198         int err = 0;
199
200         bt_dev_dbg(hdev, "start");
201
202         hci_req_init(&req, hdev);
203
204         hdev->req_status = HCI_REQ_PEND;
205
206         err = func(&req, opt);
207         if (err) {
208                 if (hci_status)
209                         *hci_status = HCI_ERROR_UNSPECIFIED;
210                 return err;
211         }
212
213         err = hci_req_run_skb(&req, hci_req_sync_complete);
214         if (err < 0) {
215                 hdev->req_status = 0;
216
217                 /* ENODATA means the HCI request command queue is empty.
218                  * This can happen when a request with conditionals doesn't
219                  * trigger any commands to be sent. This is normal behavior
220                  * and should not trigger an error return.
221                  */
222                 if (err == -ENODATA) {
223                         if (hci_status)
224                                 *hci_status = 0;
225                         return 0;
226                 }
227
228                 if (hci_status)
229                         *hci_status = HCI_ERROR_UNSPECIFIED;
230
231                 return err;
232         }
233
234         err = wait_event_interruptible_timeout(hdev->req_wait_q,
235                         hdev->req_status != HCI_REQ_PEND, timeout);
236
237         if (err == -ERESTARTSYS)
238                 return -EINTR;
239
240         switch (hdev->req_status) {
241         case HCI_REQ_DONE:
242                 err = -bt_to_errno(hdev->req_result);
243                 if (hci_status)
244                         *hci_status = hdev->req_result;
245                 break;
246
247         case HCI_REQ_CANCELED:
248                 err = -hdev->req_result;
249                 if (hci_status)
250                         *hci_status = HCI_ERROR_UNSPECIFIED;
251                 break;
252
253         default:
254                 err = -ETIMEDOUT;
255                 if (hci_status)
256                         *hci_status = HCI_ERROR_UNSPECIFIED;
257                 break;
258         }
259
260         kfree_skb(hdev->req_skb);
261         hdev->req_skb = NULL;
262         hdev->req_status = hdev->req_result = 0;
263
264         bt_dev_dbg(hdev, "end: err %d", err);
265
266         return err;
267 }
268
269 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270                                                   unsigned long opt),
271                  unsigned long opt, u32 timeout, u8 *hci_status)
272 {
273         int ret;
274
275         /* Serialize all requests */
276         hci_req_sync_lock(hdev);
277         /* check the state after obtaing the lock to protect the HCI_UP
278          * against any races from hci_dev_do_close when the controller
279          * gets removed.
280          */
281         if (test_bit(HCI_UP, &hdev->flags))
282                 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
283         else
284                 ret = -ENETDOWN;
285         hci_req_sync_unlock(hdev);
286
287         return ret;
288 }
289
290 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
291                                 const void *param)
292 {
293         int len = HCI_COMMAND_HDR_SIZE + plen;
294         struct hci_command_hdr *hdr;
295         struct sk_buff *skb;
296
297         skb = bt_skb_alloc(len, GFP_ATOMIC);
298         if (!skb)
299                 return NULL;
300
301         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
302         hdr->opcode = cpu_to_le16(opcode);
303         hdr->plen   = plen;
304
305         if (plen)
306                 skb_put_data(skb, param, plen);
307
308         bt_dev_dbg(hdev, "skb len %d", skb->len);
309
310         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
311         hci_skb_opcode(skb) = opcode;
312
313         return skb;
314 }
315
316 /* Queue a command to an asynchronous HCI request */
317 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
318                     const void *param, u8 event)
319 {
320         struct hci_dev *hdev = req->hdev;
321         struct sk_buff *skb;
322
323         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
324
325         /* If an error occurred during request building, there is no point in
326          * queueing the HCI command. We can simply return.
327          */
328         if (req->err)
329                 return;
330
331         skb = hci_prepare_cmd(hdev, opcode, plen, param);
332         if (!skb) {
333                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
334                            opcode);
335                 req->err = -ENOMEM;
336                 return;
337         }
338
339         if (skb_queue_empty(&req->cmd_q))
340                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
341
342         bt_cb(skb)->hci.req_event = event;
343
344         skb_queue_tail(&req->cmd_q, skb);
345 }
346
347 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
348                  const void *param)
349 {
350         hci_req_add_ev(req, opcode, plen, param, 0);
351 }
352
353 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
354 {
355         struct hci_dev *hdev = req->hdev;
356 #ifndef TIZEN_BT
357         struct hci_cp_write_page_scan_activity acp;
358 #endif
359         u8 type;
360
361         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
362                 return;
363
364         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
365                 return;
366
367 #ifdef TIZEN_BT
368         if (enable)
369                 type = PAGE_SCAN_TYPE_INTERLACED;
370         else
371                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
372 #else
373         if (enable) {
374                 type = PAGE_SCAN_TYPE_INTERLACED;
375
376                 /* 160 msec page scan interval */
377                 acp.interval = cpu_to_le16(0x0100);
378         } else {
379                 type = hdev->def_page_scan_type;
380                 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
381         }
382
383         acp.window = cpu_to_le16(hdev->def_page_scan_window);
384
385         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
386             __cpu_to_le16(hdev->page_scan_window) != acp.window)
387                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
388                             sizeof(acp), &acp);
389 #endif
390
391         if (hdev->page_scan_type != type)
392                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
393 }
394
395 static void start_interleave_scan(struct hci_dev *hdev)
396 {
397         hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
398         queue_delayed_work(hdev->req_workqueue,
399                            &hdev->interleave_scan, 0);
400 }
401
402 static bool is_interleave_scanning(struct hci_dev *hdev)
403 {
404         return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
405 }
406
407 static void cancel_interleave_scan(struct hci_dev *hdev)
408 {
409         bt_dev_dbg(hdev, "cancelling interleave scan");
410
411         cancel_delayed_work_sync(&hdev->interleave_scan);
412
413         hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
414 }
415
416 /* Return true if interleave_scan wasn't started until exiting this function,
417  * otherwise, return false
418  */
419 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
420 {
421         /* Do interleaved scan only if all of the following are true:
422          * - There is at least one ADV monitor
423          * - At least one pending LE connection or one device to be scanned for
424          * - Monitor offloading is not supported
425          * If so, we should alternate between allowlist scan and one without
426          * any filters to save power.
427          */
428         bool use_interleaving = hci_is_adv_monitoring(hdev) &&
429                                 !(list_empty(&hdev->pend_le_conns) &&
430                                   list_empty(&hdev->pend_le_reports)) &&
431                                 hci_get_adv_monitor_offload_ext(hdev) ==
432                                     HCI_ADV_MONITOR_EXT_NONE;
433         bool is_interleaving = is_interleave_scanning(hdev);
434
435         if (use_interleaving && !is_interleaving) {
436                 start_interleave_scan(hdev);
437                 bt_dev_dbg(hdev, "starting interleave scan");
438                 return true;
439         }
440
441         if (!use_interleaving && is_interleaving)
442                 cancel_interleave_scan(hdev);
443
444         return false;
445 }
446
447 /* This function controls the background scanning based on hdev->pend_le_conns
448  * list. If there are pending LE connection we start the background scanning,
449  * otherwise we stop it.
450  *
451  * This function requires the caller holds hdev->lock.
452  */
453 static void __hci_update_background_scan(struct hci_request *req)
454 {
455         struct hci_dev *hdev = req->hdev;
456
457         if (!test_bit(HCI_UP, &hdev->flags) ||
458             test_bit(HCI_INIT, &hdev->flags) ||
459             hci_dev_test_flag(hdev, HCI_SETUP) ||
460             hci_dev_test_flag(hdev, HCI_CONFIG) ||
461             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
462             hci_dev_test_flag(hdev, HCI_UNREGISTER))
463                 return;
464
465         /* No point in doing scanning if LE support hasn't been enabled */
466         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
467                 return;
468
469         /* If discovery is active don't interfere with it */
470         if (hdev->discovery.state != DISCOVERY_STOPPED)
471                 return;
472
473         /* Reset RSSI and UUID filters when starting background scanning
474          * since these filters are meant for service discovery only.
475          *
476          * The Start Discovery and Start Service Discovery operations
477          * ensure to set proper values for RSSI threshold and UUID
478          * filter list. So it is safe to just reset them here.
479          */
480         hci_discovery_filter_clear(hdev);
481
482         bt_dev_dbg(hdev, "ADV monitoring is %s",
483                    hci_is_adv_monitoring(hdev) ? "on" : "off");
484
485         if (list_empty(&hdev->pend_le_conns) &&
486             list_empty(&hdev->pend_le_reports) &&
487             !hci_is_adv_monitoring(hdev)) {
488                 /* If there is no pending LE connections or devices
489                  * to be scanned for or no ADV monitors, we should stop the
490                  * background scanning.
491                  */
492
493                 /* If controller is not scanning we are done. */
494                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
495                         return;
496
497                 hci_req_add_le_scan_disable(req, false);
498
499                 bt_dev_dbg(hdev, "stopping background scanning");
500         } else {
501                 /* If there is at least one pending LE connection, we should
502                  * keep the background scan running.
503                  */
504
505                 /* If controller is connecting, we should not start scanning
506                  * since some controllers are not able to scan and connect at
507                  * the same time.
508                  */
509                 if (hci_lookup_le_connect(hdev))
510                         return;
511
512                 /* If controller is currently scanning, we stop it to ensure we
513                  * don't miss any advertising (due to duplicates filter).
514                  */
515                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
516                         hci_req_add_le_scan_disable(req, false);
517
518                 hci_req_add_le_passive_scan(req);
519                 bt_dev_dbg(hdev, "starting background scanning");
520         }
521 }
522
523 void __hci_req_update_name(struct hci_request *req)
524 {
525         struct hci_dev *hdev = req->hdev;
526         struct hci_cp_write_local_name cp;
527
528         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
529
530         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
531 }
532
533 #define PNP_INFO_SVCLASS_ID             0x1200
534
535 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
536 {
537         u8 *ptr = data, *uuids_start = NULL;
538         struct bt_uuid *uuid;
539
540         if (len < 4)
541                 return ptr;
542
543         list_for_each_entry(uuid, &hdev->uuids, list) {
544                 u16 uuid16;
545
546                 if (uuid->size != 16)
547                         continue;
548
549                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
550                 if (uuid16 < 0x1100)
551                         continue;
552
553                 if (uuid16 == PNP_INFO_SVCLASS_ID)
554                         continue;
555
556                 if (!uuids_start) {
557                         uuids_start = ptr;
558                         uuids_start[0] = 1;
559                         uuids_start[1] = EIR_UUID16_ALL;
560                         ptr += 2;
561                 }
562
563                 /* Stop if not enough space to put next UUID */
564                 if ((ptr - data) + sizeof(u16) > len) {
565                         uuids_start[1] = EIR_UUID16_SOME;
566                         break;
567                 }
568
569                 *ptr++ = (uuid16 & 0x00ff);
570                 *ptr++ = (uuid16 & 0xff00) >> 8;
571                 uuids_start[0] += sizeof(uuid16);
572         }
573
574         return ptr;
575 }
576
577 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
578 {
579         u8 *ptr = data, *uuids_start = NULL;
580         struct bt_uuid *uuid;
581
582         if (len < 6)
583                 return ptr;
584
585         list_for_each_entry(uuid, &hdev->uuids, list) {
586                 if (uuid->size != 32)
587                         continue;
588
589                 if (!uuids_start) {
590                         uuids_start = ptr;
591                         uuids_start[0] = 1;
592                         uuids_start[1] = EIR_UUID32_ALL;
593                         ptr += 2;
594                 }
595
596                 /* Stop if not enough space to put next UUID */
597                 if ((ptr - data) + sizeof(u32) > len) {
598                         uuids_start[1] = EIR_UUID32_SOME;
599                         break;
600                 }
601
602                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
603                 ptr += sizeof(u32);
604                 uuids_start[0] += sizeof(u32);
605         }
606
607         return ptr;
608 }
609
610 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
611 {
612         u8 *ptr = data, *uuids_start = NULL;
613         struct bt_uuid *uuid;
614
615         if (len < 18)
616                 return ptr;
617
618         list_for_each_entry(uuid, &hdev->uuids, list) {
619                 if (uuid->size != 128)
620                         continue;
621
622                 if (!uuids_start) {
623                         uuids_start = ptr;
624                         uuids_start[0] = 1;
625                         uuids_start[1] = EIR_UUID128_ALL;
626                         ptr += 2;
627                 }
628
629                 /* Stop if not enough space to put next UUID */
630                 if ((ptr - data) + 16 > len) {
631                         uuids_start[1] = EIR_UUID128_SOME;
632                         break;
633                 }
634
635                 memcpy(ptr, uuid->uuid, 16);
636                 ptr += 16;
637                 uuids_start[0] += 16;
638         }
639
640         return ptr;
641 }
642
643 static void create_eir(struct hci_dev *hdev, u8 *data)
644 {
645         u8 *ptr = data;
646         size_t name_len;
647
648         name_len = strlen(hdev->dev_name);
649
650         if (name_len > 0) {
651                 /* EIR Data type */
652                 if (name_len > 48) {
653                         name_len = 48;
654                         ptr[1] = EIR_NAME_SHORT;
655                 } else
656                         ptr[1] = EIR_NAME_COMPLETE;
657
658                 /* EIR Data length */
659                 ptr[0] = name_len + 1;
660
661                 memcpy(ptr + 2, hdev->dev_name, name_len);
662
663                 ptr += (name_len + 2);
664         }
665
666         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
667                 ptr[0] = 2;
668                 ptr[1] = EIR_TX_POWER;
669                 ptr[2] = (u8) hdev->inq_tx_power;
670
671                 ptr += 3;
672         }
673
674         if (hdev->devid_source > 0) {
675                 ptr[0] = 9;
676                 ptr[1] = EIR_DEVICE_ID;
677
678                 put_unaligned_le16(hdev->devid_source, ptr + 2);
679                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
680                 put_unaligned_le16(hdev->devid_product, ptr + 6);
681                 put_unaligned_le16(hdev->devid_version, ptr + 8);
682
683                 ptr += 10;
684         }
685
686         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
687         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
688         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
689 }
690
691 void __hci_req_update_eir(struct hci_request *req)
692 {
693         struct hci_dev *hdev = req->hdev;
694         struct hci_cp_write_eir cp;
695
696         if (!hdev_is_powered(hdev))
697                 return;
698
699         if (!lmp_ext_inq_capable(hdev))
700                 return;
701
702         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
703                 return;
704
705         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
706                 return;
707
708         memset(&cp, 0, sizeof(cp));
709
710         create_eir(hdev, cp.data);
711
712         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
713                 return;
714
715         memcpy(hdev->eir, cp.data, sizeof(cp.data));
716
717         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
718 }
719
720 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
721 {
722         struct hci_dev *hdev = req->hdev;
723
724         if (hdev->scanning_paused) {
725                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
726                 return;
727         }
728
729         if (hdev->suspended)
730                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
731
732         if (use_ext_scan(hdev)) {
733                 struct hci_cp_le_set_ext_scan_enable cp;
734
735                 memset(&cp, 0, sizeof(cp));
736                 cp.enable = LE_SCAN_DISABLE;
737                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
738                             &cp);
739         } else {
740                 struct hci_cp_le_set_scan_enable cp;
741
742                 memset(&cp, 0, sizeof(cp));
743                 cp.enable = LE_SCAN_DISABLE;
744                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
745         }
746
747         /* Disable address resolution */
748         if (use_ll_privacy(hdev) &&
749             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
750             hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
751                 __u8 enable = 0x00;
752
753                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
754         }
755 }
756
757 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
758                                  u8 bdaddr_type)
759 {
760         struct hci_cp_le_del_from_accept_list cp;
761
762         cp.bdaddr_type = bdaddr_type;
763         bacpy(&cp.bdaddr, bdaddr);
764
765         bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
766                    cp.bdaddr_type);
767         hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
768
769         if (use_ll_privacy(req->hdev) &&
770             hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
771                 struct smp_irk *irk;
772
773                 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
774                 if (irk) {
775                         struct hci_cp_le_del_from_resolv_list cp;
776
777                         cp.bdaddr_type = bdaddr_type;
778                         bacpy(&cp.bdaddr, bdaddr);
779
780                         hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
781                                     sizeof(cp), &cp);
782                 }
783         }
784 }
785
786 /* Adds connection to accept list if needed. On error, returns -1. */
787 static int add_to_accept_list(struct hci_request *req,
788                               struct hci_conn_params *params, u8 *num_entries,
789                               bool allow_rpa)
790 {
791         struct hci_cp_le_add_to_accept_list cp;
792         struct hci_dev *hdev = req->hdev;
793
794         /* Already in accept list */
795         if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
796                                    params->addr_type))
797                 return 0;
798
799         /* Select filter policy to accept all advertising */
800         if (*num_entries >= hdev->le_accept_list_size)
801                 return -1;
802
803         /* Accept list can not be used with RPAs */
804         if (!allow_rpa &&
805             !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
806             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
807                 return -1;
808         }
809
810         /* During suspend, only wakeable devices can be in accept list */
811         if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
812                                                    params->current_flags))
813                 return 0;
814
815         *num_entries += 1;
816         cp.bdaddr_type = params->addr_type;
817         bacpy(&cp.bdaddr, &params->addr);
818
819         bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
820                    cp.bdaddr_type);
821         hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
822
823         if (use_ll_privacy(hdev) &&
824             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
825                 struct smp_irk *irk;
826
827                 irk = hci_find_irk_by_addr(hdev, &params->addr,
828                                            params->addr_type);
829                 if (irk) {
830                         struct hci_cp_le_add_to_resolv_list cp;
831
832                         cp.bdaddr_type = params->addr_type;
833                         bacpy(&cp.bdaddr, &params->addr);
834                         memcpy(cp.peer_irk, irk->val, 16);
835
836                         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
837                                 memcpy(cp.local_irk, hdev->irk, 16);
838                         else
839                                 memset(cp.local_irk, 0, 16);
840
841                         hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
842                                     sizeof(cp), &cp);
843                 }
844         }
845
846         return 0;
847 }
848
849 static u8 update_accept_list(struct hci_request *req)
850 {
851         struct hci_dev *hdev = req->hdev;
852         struct hci_conn_params *params;
853         struct bdaddr_list *b;
854         u8 num_entries = 0;
855         bool pend_conn, pend_report;
856         /* We allow usage of accept list even with RPAs in suspend. In the worst
857          * case, we won't be able to wake from devices that use the privacy1.2
858          * features. Additionally, once we support privacy1.2 and IRK
859          * offloading, we can update this to also check for those conditions.
860          */
861         bool allow_rpa = hdev->suspended;
862
863         if (use_ll_privacy(hdev) &&
864             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
865                 allow_rpa = true;
866
867         /* Go through the current accept list programmed into the
868          * controller one by one and check if that address is still
869          * in the list of pending connections or list of devices to
870          * report. If not present in either list, then queue the
871          * command to remove it from the controller.
872          */
873         list_for_each_entry(b, &hdev->le_accept_list, list) {
874                 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
875                                                       &b->bdaddr,
876                                                       b->bdaddr_type);
877                 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
878                                                         &b->bdaddr,
879                                                         b->bdaddr_type);
880
881                 /* If the device is not likely to connect or report,
882                  * remove it from the accept list.
883                  */
884                 if (!pend_conn && !pend_report) {
885                         del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
886                         continue;
887                 }
888
889                 /* Accept list can not be used with RPAs */
890                 if (!allow_rpa &&
891                     !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
892                     hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
893                         return 0x00;
894                 }
895
896                 num_entries++;
897         }
898
899         /* Since all no longer valid accept list entries have been
900          * removed, walk through the list of pending connections
901          * and ensure that any new device gets programmed into
902          * the controller.
903          *
904          * If the list of the devices is larger than the list of
905          * available accept list entries in the controller, then
906          * just abort and return filer policy value to not use the
907          * accept list.
908          */
909         list_for_each_entry(params, &hdev->pend_le_conns, action) {
910                 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
911                         return 0x00;
912         }
913
914         /* After adding all new pending connections, walk through
915          * the list of pending reports and also add these to the
916          * accept list if there is still space. Abort if space runs out.
917          */
918         list_for_each_entry(params, &hdev->pend_le_reports, action) {
919                 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
920                         return 0x00;
921         }
922
923         /* Use the allowlist unless the following conditions are all true:
924          * - We are not currently suspending
925          * - There are 1 or more ADV monitors registered and it's not offloaded
926          * - Interleaved scanning is not currently using the allowlist
927          */
928         if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
929             hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
930             hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
931                 return 0x00;
932
933         /* Select filter policy to use accept list */
934         return 0x01;
935 }
936
937 static bool scan_use_rpa(struct hci_dev *hdev)
938 {
939         return hci_dev_test_flag(hdev, HCI_PRIVACY);
940 }
941
942 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
943                                u16 window, u8 own_addr_type, u8 filter_policy,
944                                bool filter_dup, bool addr_resolv)
945 {
946         struct hci_dev *hdev = req->hdev;
947
948         if (hdev->scanning_paused) {
949                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
950                 return;
951         }
952
953         if (use_ll_privacy(hdev) &&
954             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
955             addr_resolv) {
956                 u8 enable = 0x01;
957
958                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
959         }
960
961         /* Use ext scanning if set ext scan param and ext scan enable is
962          * supported
963          */
964         if (use_ext_scan(hdev)) {
965                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
966                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
967                 struct hci_cp_le_scan_phy_params *phy_params;
968                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
969                 u32 plen;
970
971                 ext_param_cp = (void *)data;
972                 phy_params = (void *)ext_param_cp->data;
973
974                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
975                 ext_param_cp->own_addr_type = own_addr_type;
976                 ext_param_cp->filter_policy = filter_policy;
977
978                 plen = sizeof(*ext_param_cp);
979
980                 if (scan_1m(hdev) || scan_2m(hdev)) {
981                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
982
983                         memset(phy_params, 0, sizeof(*phy_params));
984                         phy_params->type = type;
985                         phy_params->interval = cpu_to_le16(interval);
986                         phy_params->window = cpu_to_le16(window);
987
988                         plen += sizeof(*phy_params);
989                         phy_params++;
990                 }
991
992                 if (scan_coded(hdev)) {
993                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
994
995                         memset(phy_params, 0, sizeof(*phy_params));
996                         phy_params->type = type;
997                         phy_params->interval = cpu_to_le16(interval);
998                         phy_params->window = cpu_to_le16(window);
999
1000                         plen += sizeof(*phy_params);
1001                         phy_params++;
1002                 }
1003
1004                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
1005                             plen, ext_param_cp);
1006
1007                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
1008                 ext_enable_cp.enable = LE_SCAN_ENABLE;
1009                 ext_enable_cp.filter_dup = filter_dup;
1010
1011                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
1012                             sizeof(ext_enable_cp), &ext_enable_cp);
1013         } else {
1014                 struct hci_cp_le_set_scan_param param_cp;
1015                 struct hci_cp_le_set_scan_enable enable_cp;
1016
1017                 memset(&param_cp, 0, sizeof(param_cp));
1018                 param_cp.type = type;
1019                 param_cp.interval = cpu_to_le16(interval);
1020                 param_cp.window = cpu_to_le16(window);
1021                 param_cp.own_address_type = own_addr_type;
1022                 param_cp.filter_policy = filter_policy;
1023                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1024                             &param_cp);
1025
1026                 memset(&enable_cp, 0, sizeof(enable_cp));
1027                 enable_cp.enable = LE_SCAN_ENABLE;
1028                 enable_cp.filter_dup = filter_dup;
1029                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1030                             &enable_cp);
1031         }
1032 }
1033
1034 /* Returns true if an le connection is in the scanning state */
1035 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1036 {
1037         struct hci_conn_hash *h = &hdev->conn_hash;
1038         struct hci_conn  *c;
1039
1040         rcu_read_lock();
1041
1042         list_for_each_entry_rcu(c, &h->list, list) {
1043                 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1044                     test_bit(HCI_CONN_SCANNING, &c->flags)) {
1045                         rcu_read_unlock();
1046                         return true;
1047                 }
1048         }
1049
1050         rcu_read_unlock();
1051
1052         return false;
1053 }
1054
1055 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1056  * controller based address resolution to be able to reconfigure
1057  * resolving list.
1058  */
1059 void hci_req_add_le_passive_scan(struct hci_request *req)
1060 {
1061         struct hci_dev *hdev = req->hdev;
1062         u8 own_addr_type;
1063         u8 filter_policy;
1064         u16 window, interval;
1065         /* Default is to enable duplicates filter */
1066         u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1067         /* Background scanning should run with address resolution */
1068         bool addr_resolv = true;
1069
1070         if (hdev->scanning_paused) {
1071                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1072                 return;
1073         }
1074
1075         /* Set require_privacy to false since no SCAN_REQ are send
1076          * during passive scanning. Not using an non-resolvable address
1077          * here is important so that peer devices using direct
1078          * advertising with our address will be correctly reported
1079          * by the controller.
1080          */
1081         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1082                                       &own_addr_type))
1083                 return;
1084
1085         if (hdev->enable_advmon_interleave_scan &&
1086             __hci_update_interleaved_scan(hdev))
1087                 return;
1088
1089         bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1090         /* Adding or removing entries from the accept list must
1091          * happen before enabling scanning. The controller does
1092          * not allow accept list modification while scanning.
1093          */
1094         filter_policy = update_accept_list(req);
1095
1096         /* When the controller is using random resolvable addresses and
1097          * with that having LE privacy enabled, then controllers with
1098          * Extended Scanner Filter Policies support can now enable support
1099          * for handling directed advertising.
1100          *
1101          * So instead of using filter polices 0x00 (no accept list)
1102          * and 0x01 (accept list enabled) use the new filter policies
1103          * 0x02 (no accept list) and 0x03 (accept list enabled).
1104          */
1105         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1106             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1107                 filter_policy |= 0x02;
1108
1109         if (hdev->suspended) {
1110                 window = hdev->le_scan_window_suspend;
1111                 interval = hdev->le_scan_int_suspend;
1112
1113                 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1114         } else if (hci_is_le_conn_scanning(hdev)) {
1115                 window = hdev->le_scan_window_connect;
1116                 interval = hdev->le_scan_int_connect;
1117         } else if (hci_is_adv_monitoring(hdev)) {
1118                 window = hdev->le_scan_window_adv_monitor;
1119                 interval = hdev->le_scan_int_adv_monitor;
1120
1121                 /* Disable duplicates filter when scanning for advertisement
1122                  * monitor for the following reasons.
1123                  *
1124                  * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
1125                  * controllers ignore RSSI_Sampling_Period when the duplicates
1126                  * filter is enabled.
1127                  *
1128                  * For SW pattern filtering, when we're not doing interleaved
1129                  * scanning, it is necessary to disable duplicates filter,
1130                  * otherwise hosts can only receive one advertisement and it's
1131                  * impossible to know if a peer is still in range.
1132                  */
1133                 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
1134         } else {
1135                 window = hdev->le_scan_window;
1136                 interval = hdev->le_scan_interval;
1137         }
1138
1139         bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1140                    filter_policy);
1141         hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1142                            own_addr_type, filter_policy, filter_dup,
1143                            addr_resolv);
1144 }
1145
1146 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1147 {
1148         struct adv_info *adv_instance;
1149
1150         /* Instance 0x00 always set local name */
1151         if (instance == 0x00)
1152                 return true;
1153
1154         adv_instance = hci_find_adv_instance(hdev, instance);
1155         if (!adv_instance)
1156                 return false;
1157
1158         if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1159             adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1160                 return true;
1161
1162         return adv_instance->scan_rsp_len ? true : false;
1163 }
1164
1165 static void hci_req_clear_event_filter(struct hci_request *req)
1166 {
1167         struct hci_cp_set_event_filter f;
1168
1169         if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
1170                 return;
1171
1172         if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
1173                 memset(&f, 0, sizeof(f));
1174                 f.flt_type = HCI_FLT_CLEAR_ALL;
1175                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1176         }
1177 }
1178
1179 static void hci_req_set_event_filter(struct hci_request *req)
1180 {
1181         struct bdaddr_list_with_flags *b;
1182         struct hci_cp_set_event_filter f;
1183         struct hci_dev *hdev = req->hdev;
1184         u8 scan = SCAN_DISABLED;
1185         bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1186
1187         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1188                 return;
1189
1190         /* Always clear event filter when starting */
1191         hci_req_clear_event_filter(req);
1192
1193         list_for_each_entry(b, &hdev->accept_list, list) {
1194                 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1195                                         b->current_flags))
1196                         continue;
1197
1198                 memset(&f, 0, sizeof(f));
1199                 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1200                 f.flt_type = HCI_FLT_CONN_SETUP;
1201                 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1202                 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1203
1204                 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1205                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1206                 scan = SCAN_PAGE;
1207         }
1208
1209         if (scan && !scanning) {
1210                 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1211                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1212         } else if (!scan && scanning) {
1213                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1214                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1215         }
1216 }
1217
1218 static void cancel_adv_timeout(struct hci_dev *hdev)
1219 {
1220         if (hdev->adv_instance_timeout) {
1221                 hdev->adv_instance_timeout = 0;
1222                 cancel_delayed_work(&hdev->adv_instance_expire);
1223         }
1224 }
1225
1226 /* This function requires the caller holds hdev->lock */
1227 void __hci_req_pause_adv_instances(struct hci_request *req)
1228 {
1229         bt_dev_dbg(req->hdev, "Pausing advertising instances");
1230
1231         /* Call to disable any advertisements active on the controller.
1232          * This will succeed even if no advertisements are configured.
1233          */
1234         __hci_req_disable_advertising(req);
1235
1236         /* If we are using software rotation, pause the loop */
1237         if (!ext_adv_capable(req->hdev))
1238                 cancel_adv_timeout(req->hdev);
1239 }
1240
1241 /* This function requires the caller holds hdev->lock */
1242 static void __hci_req_resume_adv_instances(struct hci_request *req)
1243 {
1244         struct adv_info *adv;
1245
1246         bt_dev_dbg(req->hdev, "Resuming advertising instances");
1247
1248         if (ext_adv_capable(req->hdev)) {
1249                 /* Call for each tracked instance to be re-enabled */
1250                 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1251                         __hci_req_enable_ext_advertising(req,
1252                                                          adv->instance);
1253                 }
1254
1255         } else {
1256                 /* Schedule for most recent instance to be restarted and begin
1257                  * the software rotation loop
1258                  */
1259                 __hci_req_schedule_adv_instance(req,
1260                                                 req->hdev->cur_adv_instance,
1261                                                 true);
1262         }
1263 }
1264
1265 /* This function requires the caller holds hdev->lock */
1266 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1267 {
1268         struct hci_request req;
1269
1270         hci_req_init(&req, hdev);
1271         __hci_req_resume_adv_instances(&req);
1272
1273         return hci_req_run(&req, NULL);
1274 }
1275
1276 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1277 {
1278         bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1279                    status);
1280         if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1281             test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1282                 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1283                 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1284                 wake_up(&hdev->suspend_wait_q);
1285         }
1286
1287         if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1288                 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1289                 wake_up(&hdev->suspend_wait_q);
1290         }
1291 }
1292
1293 static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1294                                               bool enable)
1295 {
1296         struct hci_dev *hdev = req->hdev;
1297
1298         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1299         case HCI_ADV_MONITOR_EXT_MSFT:
1300                 msft_req_add_set_filter_enable(req, enable);
1301                 break;
1302         default:
1303                 return;
1304         }
1305
1306         /* No need to block when enabling since it's on resume path */
1307         if (hdev->suspended && !enable)
1308                 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1309 }
1310
1311 /* Call with hci_dev_lock */
1312 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1313 {
1314         int old_state;
1315         struct hci_conn *conn;
1316         struct hci_request req;
1317         u8 page_scan;
1318         int disconnect_counter;
1319
1320         if (next == hdev->suspend_state) {
1321                 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1322                 goto done;
1323         }
1324
1325         hdev->suspend_state = next;
1326         hci_req_init(&req, hdev);
1327
1328         if (next == BT_SUSPEND_DISCONNECT) {
1329                 /* Mark device as suspended */
1330                 hdev->suspended = true;
1331
1332                 /* Pause discovery if not already stopped */
1333                 old_state = hdev->discovery.state;
1334                 if (old_state != DISCOVERY_STOPPED) {
1335                         set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1336                         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1337                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1338                 }
1339
1340                 hdev->discovery_paused = true;
1341                 hdev->discovery_old_state = old_state;
1342
1343                 /* Stop directed advertising */
1344                 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1345                 if (old_state) {
1346                         set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1347                         cancel_delayed_work(&hdev->discov_off);
1348                         queue_delayed_work(hdev->req_workqueue,
1349                                            &hdev->discov_off, 0);
1350                 }
1351
1352                 /* Pause other advertisements */
1353                 if (hdev->adv_instance_cnt)
1354                         __hci_req_pause_adv_instances(&req);
1355
1356                 hdev->advertising_paused = true;
1357                 hdev->advertising_old_state = old_state;
1358
1359                 /* Disable page scan if enabled */
1360                 if (test_bit(HCI_PSCAN, &hdev->flags)) {
1361                         page_scan = SCAN_DISABLED;
1362                         hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1363                                     &page_scan);
1364                         set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1365                 }
1366
1367                 /* Disable LE passive scan if enabled */
1368                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1369                         cancel_interleave_scan(hdev);
1370                         hci_req_add_le_scan_disable(&req, false);
1371                 }
1372
1373                 /* Disable advertisement filters */
1374                 hci_req_add_set_adv_filter_enable(&req, false);
1375
1376                 /* Prevent disconnects from causing scanning to be re-enabled */
1377                 hdev->scanning_paused = true;
1378
1379                 /* Run commands before disconnecting */
1380                 hci_req_run(&req, suspend_req_complete);
1381
1382                 disconnect_counter = 0;
1383                 /* Soft disconnect everything (power off) */
1384                 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1385                         hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1386                         disconnect_counter++;
1387                 }
1388
1389                 if (disconnect_counter > 0) {
1390                         bt_dev_dbg(hdev,
1391                                    "Had %d disconnects. Will wait on them",
1392                                    disconnect_counter);
1393                         set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1394                 }
1395         } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1396                 /* Unpause to take care of updating scanning params */
1397                 hdev->scanning_paused = false;
1398                 /* Enable event filter for paired devices */
1399                 hci_req_set_event_filter(&req);
1400                 /* Enable passive scan at lower duty cycle */
1401                 __hci_update_background_scan(&req);
1402                 /* Pause scan changes again. */
1403                 hdev->scanning_paused = true;
1404                 hci_req_run(&req, suspend_req_complete);
1405         } else {
1406                 hdev->suspended = false;
1407                 hdev->scanning_paused = false;
1408
1409                 /* Clear any event filters and restore scan state */
1410                 hci_req_clear_event_filter(&req);
1411                 __hci_req_update_scan(&req);
1412
1413                 /* Reset passive/background scanning to normal */
1414                 __hci_update_background_scan(&req);
1415                 /* Enable all of the advertisement filters */
1416                 hci_req_add_set_adv_filter_enable(&req, true);
1417
1418                 /* Unpause directed advertising */
1419                 hdev->advertising_paused = false;
1420                 if (hdev->advertising_old_state) {
1421                         set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1422                                 hdev->suspend_tasks);
1423                         hci_dev_set_flag(hdev, HCI_ADVERTISING);
1424                         queue_work(hdev->req_workqueue,
1425                                    &hdev->discoverable_update);
1426                         hdev->advertising_old_state = 0;
1427                 }
1428
1429                 /* Resume other advertisements */
1430                 if (hdev->adv_instance_cnt)
1431                         __hci_req_resume_adv_instances(&req);
1432
1433                 /* Unpause discovery */
1434                 hdev->discovery_paused = false;
1435                 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1436                     hdev->discovery_old_state != DISCOVERY_STOPPING) {
1437                         set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1438                         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1439                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1440                 }
1441
1442                 hci_req_run(&req, suspend_req_complete);
1443         }
1444
1445         hdev->suspend_state = next;
1446
1447 done:
1448         clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1449         wake_up(&hdev->suspend_wait_q);
1450 }
1451
1452 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1453 {
1454         return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1455 }
1456
1457 void __hci_req_disable_advertising(struct hci_request *req)
1458 {
1459         if (ext_adv_capable(req->hdev)) {
1460                 __hci_req_disable_ext_adv_instance(req, 0x00);
1461
1462         } else {
1463                 u8 enable = 0x00;
1464
1465                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1466         }
1467 }
1468
1469 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1470 {
1471         u32 flags;
1472         struct adv_info *adv_instance;
1473
1474         if (instance == 0x00) {
1475                 /* Instance 0 always manages the "Tx Power" and "Flags"
1476                  * fields
1477                  */
1478                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1479
1480                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1481                  * corresponds to the "connectable" instance flag.
1482                  */
1483                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1484                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1485
1486                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1487                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1488                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1489                         flags |= MGMT_ADV_FLAG_DISCOV;
1490
1491                 return flags;
1492         }
1493
1494         adv_instance = hci_find_adv_instance(hdev, instance);
1495
1496         /* Return 0 when we got an invalid instance identifier. */
1497         if (!adv_instance)
1498                 return 0;
1499
1500         return adv_instance->flags;
1501 }
1502
1503 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1504 {
1505         /* If privacy is not enabled don't use RPA */
1506         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1507                 return false;
1508
1509         /* If basic privacy mode is enabled use RPA */
1510         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1511                 return true;
1512
1513         /* If limited privacy mode is enabled don't use RPA if we're
1514          * both discoverable and bondable.
1515          */
1516         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1517             hci_dev_test_flag(hdev, HCI_BONDABLE))
1518                 return false;
1519
1520         /* We're neither bondable nor discoverable in the limited
1521          * privacy mode, therefore use RPA.
1522          */
1523         return true;
1524 }
1525
1526 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1527 {
1528         /* If there is no connection we are OK to advertise. */
1529         if (hci_conn_num(hdev, LE_LINK) == 0)
1530                 return true;
1531
1532         /* Check le_states if there is any connection in peripheral role. */
1533         if (hdev->conn_hash.le_num_peripheral > 0) {
1534                 /* Peripheral connection state and non connectable mode bit 20.
1535                  */
1536                 if (!connectable && !(hdev->le_states[2] & 0x10))
1537                         return false;
1538
1539                 /* Peripheral connection state and connectable mode bit 38
1540                  * and scannable bit 21.
1541                  */
1542                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1543                                     !(hdev->le_states[2] & 0x20)))
1544                         return false;
1545         }
1546
1547         /* Check le_states if there is any connection in central role. */
1548         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1549                 /* Central connection state and non connectable mode bit 18. */
1550                 if (!connectable && !(hdev->le_states[2] & 0x02))
1551                         return false;
1552
1553                 /* Central connection state and connectable mode bit 35 and
1554                  * scannable 19.
1555                  */
1556                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1557                                     !(hdev->le_states[2] & 0x08)))
1558                         return false;
1559         }
1560
1561         return true;
1562 }
1563
1564 void __hci_req_enable_advertising(struct hci_request *req)
1565 {
1566         struct hci_dev *hdev = req->hdev;
1567         struct adv_info *adv_instance;
1568         struct hci_cp_le_set_adv_param cp;
1569         u8 own_addr_type, enable = 0x01;
1570         bool connectable;
1571         u16 adv_min_interval, adv_max_interval;
1572         u32 flags;
1573
1574         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1575         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1576
1577         /* If the "connectable" instance flag was not set, then choose between
1578          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1579          */
1580         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1581                       mgmt_get_connectable(hdev);
1582
1583         if (!is_advertising_allowed(hdev, connectable))
1584                 return;
1585
1586         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1587                 __hci_req_disable_advertising(req);
1588
1589         /* Clear the HCI_LE_ADV bit temporarily so that the
1590          * hci_update_random_address knows that it's safe to go ahead
1591          * and write a new random address. The flag will be set back on
1592          * as soon as the SET_ADV_ENABLE HCI command completes.
1593          */
1594         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1595
1596         /* Set require_privacy to true only when non-connectable
1597          * advertising is used. In that case it is fine to use a
1598          * non-resolvable private address.
1599          */
1600         if (hci_update_random_address(req, !connectable,
1601                                       adv_use_rpa(hdev, flags),
1602                                       &own_addr_type) < 0)
1603                 return;
1604
1605         memset(&cp, 0, sizeof(cp));
1606
1607         if (adv_instance) {
1608                 adv_min_interval = adv_instance->min_interval;
1609                 adv_max_interval = adv_instance->max_interval;
1610         } else {
1611                 adv_min_interval = hdev->le_adv_min_interval;
1612                 adv_max_interval = hdev->le_adv_max_interval;
1613         }
1614
1615         if (connectable) {
1616                 cp.type = LE_ADV_IND;
1617         } else {
1618                 if (adv_cur_instance_is_scannable(hdev))
1619                         cp.type = LE_ADV_SCAN_IND;
1620                 else
1621                         cp.type = LE_ADV_NONCONN_IND;
1622
1623                 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1624                     hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1625                         adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1626                         adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1627                 }
1628         }
1629
1630         cp.min_interval = cpu_to_le16(adv_min_interval);
1631         cp.max_interval = cpu_to_le16(adv_max_interval);
1632
1633 #ifdef TIZEN_BT
1634         cp.filter_policy = hdev->adv_filter_policy;
1635         cp.type = hdev->adv_type;
1636 #endif
1637
1638         cp.own_address_type = own_addr_type;
1639         cp.channel_map = hdev->le_adv_channel_map;
1640
1641         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1642
1643         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1644 }
1645
1646 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1647 {
1648         size_t short_len;
1649         size_t complete_len;
1650
1651         /* no space left for name (+ NULL + type + len) */
1652         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1653                 return ad_len;
1654
1655         /* use complete name if present and fits */
1656         complete_len = strlen(hdev->dev_name);
1657         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1658                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1659                                        hdev->dev_name, complete_len + 1);
1660
1661         /* use short name if present */
1662         short_len = strlen(hdev->short_name);
1663         if (short_len)
1664                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1665                                        hdev->short_name, short_len + 1);
1666
1667         /* use shortened full name if present, we already know that name
1668          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1669          */
1670         if (complete_len) {
1671                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1672
1673                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1674                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1675
1676                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1677                                        sizeof(name));
1678         }
1679
1680         return ad_len;
1681 }
1682
1683 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1684 {
1685         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1686 }
1687
1688 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1689 {
1690         u8 scan_rsp_len = 0;
1691
1692         if (hdev->appearance)
1693                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1694
1695         return append_local_name(hdev, ptr, scan_rsp_len);
1696 }
1697
1698 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1699                                         u8 *ptr)
1700 {
1701         struct adv_info *adv_instance;
1702         u32 instance_flags;
1703         u8 scan_rsp_len = 0;
1704
1705         adv_instance = hci_find_adv_instance(hdev, instance);
1706         if (!adv_instance)
1707                 return 0;
1708
1709         instance_flags = adv_instance->flags;
1710
1711         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
1712                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1713
1714         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1715                adv_instance->scan_rsp_len);
1716
1717         scan_rsp_len += adv_instance->scan_rsp_len;
1718
1719         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1720                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1721
1722         return scan_rsp_len;
1723 }
1724
1725 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1726 {
1727         struct hci_dev *hdev = req->hdev;
1728         u8 len;
1729
1730         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1731                 return;
1732
1733         if (ext_adv_capable(hdev)) {
1734                 struct {
1735                         struct hci_cp_le_set_ext_scan_rsp_data cp;
1736                         u8 data[HCI_MAX_EXT_AD_LENGTH];
1737                 } pdu;
1738
1739                 memset(&pdu, 0, sizeof(pdu));
1740
1741                 if (instance)
1742                         len = create_instance_scan_rsp_data(hdev, instance,
1743                                                             pdu.data);
1744                 else
1745                         len = create_default_scan_rsp_data(hdev, pdu.data);
1746 #ifdef TIZEN_BT
1747                 /* Advertising scan response data is handled in bluez.
1748                  * This value will be updated only when application request the update
1749                  * using adapter_set_scan_rsp_data()
1750                  */
1751                 return;
1752 #else
1753
1754                 if (hdev->scan_rsp_data_len == len &&
1755                     !memcmp(pdu.data, hdev->scan_rsp_data, len))
1756                         return;
1757
1758                 memcpy(hdev->scan_rsp_data, pdu.data, len);
1759                 hdev->scan_rsp_data_len = len;
1760
1761                 pdu.cp.handle = instance;
1762                 pdu.cp.length = len;
1763                 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1764                 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1765
1766                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1767                             sizeof(pdu.cp) + len, &pdu.cp);
1768 #endif
1769         } else {
1770                 struct hci_cp_le_set_scan_rsp_data cp;
1771
1772                 memset(&cp, 0, sizeof(cp));
1773
1774                 if (instance)
1775                         len = create_instance_scan_rsp_data(hdev, instance,
1776                                                             cp.data);
1777                 else
1778                         len = create_default_scan_rsp_data(hdev, cp.data);
1779 #ifdef TIZEN_BT
1780                 /* Advertising scan response data is handled in bluez.
1781                  * This value will be updated only when application request the update
1782                  * using adapter_set_scan_rsp_data()
1783                  */
1784                 return;
1785 #else
1786                 if (hdev->scan_rsp_data_len == len &&
1787                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1788                         return;
1789
1790                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1791                 hdev->scan_rsp_data_len = len;
1792
1793                 cp.length = len;
1794
1795                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1796 #endif
1797         }
1798 }
1799
1800 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1801 {
1802         struct adv_info *adv_instance = NULL;
1803         u8 ad_len = 0, flags = 0;
1804         u32 instance_flags;
1805
1806         /* Return 0 when the current instance identifier is invalid. */
1807         if (instance) {
1808                 adv_instance = hci_find_adv_instance(hdev, instance);
1809                 if (!adv_instance)
1810                         return 0;
1811         }
1812
1813         instance_flags = get_adv_instance_flags(hdev, instance);
1814
1815         /* If instance already has the flags set skip adding it once
1816          * again.
1817          */
1818         if (adv_instance && eir_get_data(adv_instance->adv_data,
1819                                          adv_instance->adv_data_len, EIR_FLAGS,
1820                                          NULL))
1821                 goto skip_flags;
1822
1823         /* The Add Advertising command allows userspace to set both the general
1824          * and limited discoverable flags.
1825          */
1826         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1827                 flags |= LE_AD_GENERAL;
1828
1829         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1830                 flags |= LE_AD_LIMITED;
1831
1832         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1833                 flags |= LE_AD_NO_BREDR;
1834
1835         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1836                 /* If a discovery flag wasn't provided, simply use the global
1837                  * settings.
1838                  */
1839                 if (!flags)
1840                         flags |= mgmt_get_adv_discov_flags(hdev);
1841
1842                 /* If flags would still be empty, then there is no need to
1843                  * include the "Flags" AD field".
1844                  */
1845                 if (flags) {
1846                         ptr[0] = 0x02;
1847                         ptr[1] = EIR_FLAGS;
1848                         ptr[2] = flags;
1849
1850                         ad_len += 3;
1851                         ptr += 3;
1852                 }
1853         }
1854
1855 skip_flags:
1856         if (adv_instance) {
1857                 memcpy(ptr, adv_instance->adv_data,
1858                        adv_instance->adv_data_len);
1859                 ad_len += adv_instance->adv_data_len;
1860                 ptr += adv_instance->adv_data_len;
1861         }
1862
1863         if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1864                 s8 adv_tx_power;
1865
1866                 if (ext_adv_capable(hdev)) {
1867                         if (adv_instance)
1868                                 adv_tx_power = adv_instance->tx_power;
1869                         else
1870                                 adv_tx_power = hdev->adv_tx_power;
1871                 } else {
1872                         adv_tx_power = hdev->adv_tx_power;
1873                 }
1874
1875                 /* Provide Tx Power only if we can provide a valid value for it */
1876                 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1877                         ptr[0] = 0x02;
1878                         ptr[1] = EIR_TX_POWER;
1879                         ptr[2] = (u8)adv_tx_power;
1880
1881                         ad_len += 3;
1882                         ptr += 3;
1883                 }
1884         }
1885
1886         return ad_len;
1887 }
1888
1889 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1890 {
1891         struct hci_dev *hdev = req->hdev;
1892         u8 len;
1893
1894         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1895                 return;
1896
1897         if (ext_adv_capable(hdev)) {
1898                 struct {
1899                         struct hci_cp_le_set_ext_adv_data cp;
1900                         u8 data[HCI_MAX_EXT_AD_LENGTH];
1901                 } pdu;
1902
1903                 memset(&pdu, 0, sizeof(pdu));
1904
1905                 len = create_instance_adv_data(hdev, instance, pdu.data);
1906
1907 #ifdef TIZEN_BT
1908                 /* Bluez will handle the advertising data including the flag and tx
1909                  * power. This value will be updated only when application request the
1910                  * update using adapter_set_advertising_data().
1911                 */
1912                 return;
1913 #else
1914                 /* There's nothing to do if the data hasn't changed */
1915                 if (hdev->adv_data_len == len &&
1916                     memcmp(pdu.data, hdev->adv_data, len) == 0)
1917                         return;
1918
1919                 memcpy(hdev->adv_data, pdu.data, len);
1920                 hdev->adv_data_len = len;
1921
1922                 pdu.cp.length = len;
1923                 pdu.cp.handle = instance;
1924                 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1925                 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1926
1927                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1928                             sizeof(pdu.cp) + len, &pdu.cp);
1929 #endif
1930         } else {
1931                 struct hci_cp_le_set_adv_data cp;
1932
1933                 memset(&cp, 0, sizeof(cp));
1934
1935                 len = create_instance_adv_data(hdev, instance, cp.data);
1936
1937 #ifdef TIZEN_BT
1938                 /* Bluez will handle the advertising data including the flag and tx
1939                  * power. This value will be updated only when application request the
1940                  * update using adapter_set_advertising_data().
1941                 */
1942                 return;
1943 #else
1944                 /* There's nothing to do if the data hasn't changed */
1945                 if (hdev->adv_data_len == len &&
1946                     memcmp(cp.data, hdev->adv_data, len) == 0)
1947                         return;
1948
1949                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1950                 hdev->adv_data_len = len;
1951
1952                 cp.length = len;
1953
1954                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1955 #endif
1956         }
1957 }
1958
1959 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1960 {
1961         struct hci_request req;
1962
1963         hci_req_init(&req, hdev);
1964         __hci_req_update_adv_data(&req, instance);
1965
1966         return hci_req_run(&req, NULL);
1967 }
1968
1969 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1970                                             u16 opcode)
1971 {
1972         BT_DBG("%s status %u", hdev->name, status);
1973 }
1974
1975 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1976 {
1977         struct hci_request req;
1978         __u8 enable = 0x00;
1979
1980         if (!use_ll_privacy(hdev) &&
1981             !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1982                 return;
1983
1984         hci_req_init(&req, hdev);
1985
1986         hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1987
1988         hci_req_run(&req, enable_addr_resolution_complete);
1989 }
1990
1991 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1992 {
1993         bt_dev_dbg(hdev, "status %u", status);
1994 }
1995
1996 void hci_req_reenable_advertising(struct hci_dev *hdev)
1997 {
1998         struct hci_request req;
1999
2000         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
2001             list_empty(&hdev->adv_instances))
2002                 return;
2003
2004         hci_req_init(&req, hdev);
2005
2006         if (hdev->cur_adv_instance) {
2007                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
2008                                                 true);
2009         } else {
2010                 if (ext_adv_capable(hdev)) {
2011                         __hci_req_start_ext_adv(&req, 0x00);
2012                 } else {
2013                         __hci_req_update_adv_data(&req, 0x00);
2014                         __hci_req_update_scan_rsp_data(&req, 0x00);
2015                         __hci_req_enable_advertising(&req);
2016                 }
2017         }
2018
2019         hci_req_run(&req, adv_enable_complete);
2020 }
2021
2022 static void adv_timeout_expire(struct work_struct *work)
2023 {
2024         struct hci_dev *hdev = container_of(work, struct hci_dev,
2025                                             adv_instance_expire.work);
2026
2027         struct hci_request req;
2028         u8 instance;
2029
2030         bt_dev_dbg(hdev, "");
2031
2032         hci_dev_lock(hdev);
2033
2034         hdev->adv_instance_timeout = 0;
2035
2036         instance = hdev->cur_adv_instance;
2037         if (instance == 0x00)
2038                 goto unlock;
2039
2040         hci_req_init(&req, hdev);
2041
2042         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
2043
2044         if (list_empty(&hdev->adv_instances))
2045                 __hci_req_disable_advertising(&req);
2046
2047         hci_req_run(&req, NULL);
2048
2049 unlock:
2050         hci_dev_unlock(hdev);
2051 }
2052
2053 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
2054                                            unsigned long opt)
2055 {
2056         struct hci_dev *hdev = req->hdev;
2057         int ret = 0;
2058
2059         hci_dev_lock(hdev);
2060
2061         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2062                 hci_req_add_le_scan_disable(req, false);
2063         hci_req_add_le_passive_scan(req);
2064
2065         switch (hdev->interleave_scan_state) {
2066         case INTERLEAVE_SCAN_ALLOWLIST:
2067                 bt_dev_dbg(hdev, "next state: allowlist");
2068                 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2069                 break;
2070         case INTERLEAVE_SCAN_NO_FILTER:
2071                 bt_dev_dbg(hdev, "next state: no filter");
2072                 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
2073                 break;
2074         case INTERLEAVE_SCAN_NONE:
2075                 BT_ERR("unexpected error");
2076                 ret = -1;
2077         }
2078
2079         hci_dev_unlock(hdev);
2080
2081         return ret;
2082 }
2083
2084 static void interleave_scan_work(struct work_struct *work)
2085 {
2086         struct hci_dev *hdev = container_of(work, struct hci_dev,
2087                                             interleave_scan.work);
2088         u8 status;
2089         unsigned long timeout;
2090
2091         if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2092                 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2093         } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2094                 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2095         } else {
2096                 bt_dev_err(hdev, "unexpected error");
2097                 return;
2098         }
2099
2100         hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2101                      HCI_CMD_TIMEOUT, &status);
2102
2103         /* Don't continue interleaving if it was canceled */
2104         if (is_interleave_scanning(hdev))
2105                 queue_delayed_work(hdev->req_workqueue,
2106                                    &hdev->interleave_scan, timeout);
2107 }
2108
2109 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2110                            bool use_rpa, struct adv_info *adv_instance,
2111                            u8 *own_addr_type, bdaddr_t *rand_addr)
2112 {
2113         int err;
2114
2115         bacpy(rand_addr, BDADDR_ANY);
2116
2117         /* If privacy is enabled use a resolvable private address. If
2118          * current RPA has expired then generate a new one.
2119          */
2120         if (use_rpa) {
2121                 /* If Controller supports LL Privacy use own address type is
2122                  * 0x03
2123                  */
2124                 if (use_ll_privacy(hdev) &&
2125                     hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2126                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2127                 else
2128                         *own_addr_type = ADDR_LE_DEV_RANDOM;
2129
2130                 if (adv_instance) {
2131                         if (adv_rpa_valid(adv_instance))
2132                                 return 0;
2133                 } else {
2134                         if (rpa_valid(hdev))
2135                                 return 0;
2136                 }
2137
2138                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2139                 if (err < 0) {
2140                         bt_dev_err(hdev, "failed to generate new RPA");
2141                         return err;
2142                 }
2143
2144                 bacpy(rand_addr, &hdev->rpa);
2145
2146                 return 0;
2147         }
2148
2149         /* In case of required privacy without resolvable private address,
2150          * use an non-resolvable private address. This is useful for
2151          * non-connectable advertising.
2152          */
2153         if (require_privacy) {
2154                 bdaddr_t nrpa;
2155
2156                 while (true) {
2157                         /* The non-resolvable private address is generated
2158                          * from random six bytes with the two most significant
2159                          * bits cleared.
2160                          */
2161                         get_random_bytes(&nrpa, 6);
2162                         nrpa.b[5] &= 0x3f;
2163
2164                         /* The non-resolvable private address shall not be
2165                          * equal to the public address.
2166                          */
2167                         if (bacmp(&hdev->bdaddr, &nrpa))
2168                                 break;
2169                 }
2170
2171                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2172                 bacpy(rand_addr, &nrpa);
2173
2174                 return 0;
2175         }
2176
2177         /* No privacy so use a public address. */
2178         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2179
2180         return 0;
2181 }
2182
2183 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2184 {
2185         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2186 }
2187
2188 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2189 {
2190         struct hci_dev *hdev = req->hdev;
2191
2192         /* If we're advertising or initiating an LE connection we can't
2193          * go ahead and change the random address at this time. This is
2194          * because the eventual initiator address used for the
2195          * subsequently created connection will be undefined (some
2196          * controllers use the new address and others the one we had
2197          * when the operation started).
2198          *
2199          * In this kind of scenario skip the update and let the random
2200          * address be updated at the next cycle.
2201          */
2202         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2203             hci_lookup_le_connect(hdev)) {
2204                 bt_dev_dbg(hdev, "Deferring random address update");
2205                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2206                 return;
2207         }
2208
2209         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2210 }
2211
2212 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2213 {
2214         struct hci_cp_le_set_ext_adv_params cp;
2215         struct hci_dev *hdev = req->hdev;
2216         bool connectable;
2217         u32 flags;
2218         bdaddr_t random_addr;
2219         u8 own_addr_type;
2220         int err;
2221         struct adv_info *adv_instance;
2222         bool secondary_adv;
2223
2224         if (instance > 0) {
2225                 adv_instance = hci_find_adv_instance(hdev, instance);
2226                 if (!adv_instance)
2227                         return -EINVAL;
2228         } else {
2229                 adv_instance = NULL;
2230         }
2231
2232         flags = get_adv_instance_flags(hdev, instance);
2233
2234         /* If the "connectable" instance flag was not set, then choose between
2235          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2236          */
2237         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2238                       mgmt_get_connectable(hdev);
2239
2240         if (!is_advertising_allowed(hdev, connectable))
2241                 return -EPERM;
2242
2243         /* Set require_privacy to true only when non-connectable
2244          * advertising is used. In that case it is fine to use a
2245          * non-resolvable private address.
2246          */
2247         err = hci_get_random_address(hdev, !connectable,
2248                                      adv_use_rpa(hdev, flags), adv_instance,
2249                                      &own_addr_type, &random_addr);
2250         if (err < 0)
2251                 return err;
2252
2253         memset(&cp, 0, sizeof(cp));
2254
2255         if (adv_instance) {
2256                 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2257                 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2258                 cp.tx_power = adv_instance->tx_power;
2259         } else {
2260                 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2261                 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2262                 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2263         }
2264
2265         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2266
2267         if (connectable) {
2268                 if (secondary_adv)
2269                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2270                 else
2271                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2272         } else if (adv_instance_is_scannable(hdev, instance) ||
2273                    (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
2274                 if (secondary_adv)
2275                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2276                 else
2277                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2278         } else {
2279                 if (secondary_adv)
2280                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2281                 else
2282                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2283         }
2284
2285         cp.own_addr_type = own_addr_type;
2286         cp.channel_map = hdev->le_adv_channel_map;
2287         cp.handle = instance;
2288
2289         if (flags & MGMT_ADV_FLAG_SEC_2M) {
2290                 cp.primary_phy = HCI_ADV_PHY_1M;
2291                 cp.secondary_phy = HCI_ADV_PHY_2M;
2292         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2293                 cp.primary_phy = HCI_ADV_PHY_CODED;
2294                 cp.secondary_phy = HCI_ADV_PHY_CODED;
2295         } else {
2296                 /* In all other cases use 1M */
2297                 cp.primary_phy = HCI_ADV_PHY_1M;
2298                 cp.secondary_phy = HCI_ADV_PHY_1M;
2299         }
2300
2301         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2302
2303         if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2304             bacmp(&random_addr, BDADDR_ANY)) {
2305                 struct hci_cp_le_set_adv_set_rand_addr cp;
2306
2307                 /* Check if random address need to be updated */
2308                 if (adv_instance) {
2309                         if (!bacmp(&random_addr, &adv_instance->random_addr))
2310                                 return 0;
2311                 } else {
2312                         if (!bacmp(&random_addr, &hdev->random_addr))
2313                                 return 0;
2314                         /* Instance 0x00 doesn't have an adv_info, instead it
2315                          * uses hdev->random_addr to track its address so
2316                          * whenever it needs to be updated this also set the
2317                          * random address since hdev->random_addr is shared with
2318                          * scan state machine.
2319                          */
2320                         set_random_addr(req, &random_addr);
2321                 }
2322
2323                 memset(&cp, 0, sizeof(cp));
2324
2325                 cp.handle = instance;
2326                 bacpy(&cp.bdaddr, &random_addr);
2327
2328                 hci_req_add(req,
2329                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2330                             sizeof(cp), &cp);
2331         }
2332
2333         return 0;
2334 }
2335
2336 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2337 {
2338         struct hci_dev *hdev = req->hdev;
2339         struct hci_cp_le_set_ext_adv_enable *cp;
2340         struct hci_cp_ext_adv_set *adv_set;
2341         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2342         struct adv_info *adv_instance;
2343
2344         if (instance > 0) {
2345                 adv_instance = hci_find_adv_instance(hdev, instance);
2346                 if (!adv_instance)
2347                         return -EINVAL;
2348         } else {
2349                 adv_instance = NULL;
2350         }
2351
2352         cp = (void *) data;
2353         adv_set = (void *) cp->data;
2354
2355         memset(cp, 0, sizeof(*cp));
2356
2357         cp->enable = 0x01;
2358         cp->num_of_sets = 0x01;
2359
2360         memset(adv_set, 0, sizeof(*adv_set));
2361
2362         adv_set->handle = instance;
2363
2364         /* Set duration per instance since controller is responsible for
2365          * scheduling it.
2366          */
2367         if (adv_instance && adv_instance->timeout) {
2368                 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2369
2370                 /* Time = N * 10 ms */
2371                 adv_set->duration = cpu_to_le16(duration / 10);
2372         }
2373
2374         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2375                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2376                     data);
2377
2378         return 0;
2379 }
2380
2381 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2382 {
2383         struct hci_dev *hdev = req->hdev;
2384         struct hci_cp_le_set_ext_adv_enable *cp;
2385         struct hci_cp_ext_adv_set *adv_set;
2386         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2387         u8 req_size;
2388
2389         /* If request specifies an instance that doesn't exist, fail */
2390         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2391                 return -EINVAL;
2392
2393         memset(data, 0, sizeof(data));
2394
2395         cp = (void *)data;
2396         adv_set = (void *)cp->data;
2397
2398         /* Instance 0x00 indicates all advertising instances will be disabled */
2399         cp->num_of_sets = !!instance;
2400         cp->enable = 0x00;
2401
2402         adv_set->handle = instance;
2403
2404         req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2405         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2406
2407         return 0;
2408 }
2409
2410 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2411 {
2412         struct hci_dev *hdev = req->hdev;
2413
2414         /* If request specifies an instance that doesn't exist, fail */
2415         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2416                 return -EINVAL;
2417
2418         hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2419
2420         return 0;
2421 }
2422
2423 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2424 {
2425         struct hci_dev *hdev = req->hdev;
2426         struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2427         int err;
2428
2429         /* If instance isn't pending, the chip knows about it, and it's safe to
2430          * disable
2431          */
2432         if (adv_instance && !adv_instance->pending)
2433                 __hci_req_disable_ext_adv_instance(req, instance);
2434
2435         err = __hci_req_setup_ext_adv_instance(req, instance);
2436         if (err < 0)
2437                 return err;
2438
2439         __hci_req_update_scan_rsp_data(req, instance);
2440         __hci_req_enable_ext_advertising(req, instance);
2441
2442         return 0;
2443 }
2444
2445 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2446                                     bool force)
2447 {
2448         struct hci_dev *hdev = req->hdev;
2449         struct adv_info *adv_instance = NULL;
2450         u16 timeout;
2451
2452         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2453             list_empty(&hdev->adv_instances))
2454                 return -EPERM;
2455
2456         if (hdev->adv_instance_timeout)
2457                 return -EBUSY;
2458
2459         adv_instance = hci_find_adv_instance(hdev, instance);
2460         if (!adv_instance)
2461                 return -ENOENT;
2462
2463         /* A zero timeout means unlimited advertising. As long as there is
2464          * only one instance, duration should be ignored. We still set a timeout
2465          * in case further instances are being added later on.
2466          *
2467          * If the remaining lifetime of the instance is more than the duration
2468          * then the timeout corresponds to the duration, otherwise it will be
2469          * reduced to the remaining instance lifetime.
2470          */
2471         if (adv_instance->timeout == 0 ||
2472             adv_instance->duration <= adv_instance->remaining_time)
2473                 timeout = adv_instance->duration;
2474         else
2475                 timeout = adv_instance->remaining_time;
2476
2477         /* The remaining time is being reduced unless the instance is being
2478          * advertised without time limit.
2479          */
2480         if (adv_instance->timeout)
2481                 adv_instance->remaining_time =
2482                                 adv_instance->remaining_time - timeout;
2483
2484         /* Only use work for scheduling instances with legacy advertising */
2485         if (!ext_adv_capable(hdev)) {
2486                 hdev->adv_instance_timeout = timeout;
2487                 queue_delayed_work(hdev->req_workqueue,
2488                            &hdev->adv_instance_expire,
2489                            msecs_to_jiffies(timeout * 1000));
2490         }
2491
2492         /* If we're just re-scheduling the same instance again then do not
2493          * execute any HCI commands. This happens when a single instance is
2494          * being advertised.
2495          */
2496         if (!force && hdev->cur_adv_instance == instance &&
2497             hci_dev_test_flag(hdev, HCI_LE_ADV))
2498                 return 0;
2499
2500         hdev->cur_adv_instance = instance;
2501         if (ext_adv_capable(hdev)) {
2502                 __hci_req_start_ext_adv(req, instance);
2503         } else {
2504                 __hci_req_update_adv_data(req, instance);
2505                 __hci_req_update_scan_rsp_data(req, instance);
2506                 __hci_req_enable_advertising(req);
2507         }
2508
2509         return 0;
2510 }
2511
2512 /* For a single instance:
2513  * - force == true: The instance will be removed even when its remaining
2514  *   lifetime is not zero.
2515  * - force == false: the instance will be deactivated but kept stored unless
2516  *   the remaining lifetime is zero.
2517  *
2518  * For instance == 0x00:
2519  * - force == true: All instances will be removed regardless of their timeout
2520  *   setting.
2521  * - force == false: Only instances that have a timeout will be removed.
2522  */
2523 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2524                                 struct hci_request *req, u8 instance,
2525                                 bool force)
2526 {
2527         struct adv_info *adv_instance, *n, *next_instance = NULL;
2528         int err;
2529         u8 rem_inst;
2530
2531         /* Cancel any timeout concerning the removed instance(s). */
2532         if (!instance || hdev->cur_adv_instance == instance)
2533                 cancel_adv_timeout(hdev);
2534
2535         /* Get the next instance to advertise BEFORE we remove
2536          * the current one. This can be the same instance again
2537          * if there is only one instance.
2538          */
2539         if (instance && hdev->cur_adv_instance == instance)
2540                 next_instance = hci_get_next_instance(hdev, instance);
2541
2542         if (instance == 0x00) {
2543                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2544                                          list) {
2545                         if (!(force || adv_instance->timeout))
2546                                 continue;
2547
2548                         rem_inst = adv_instance->instance;
2549                         err = hci_remove_adv_instance(hdev, rem_inst);
2550                         if (!err)
2551                                 mgmt_advertising_removed(sk, hdev, rem_inst);
2552                 }
2553         } else {
2554                 adv_instance = hci_find_adv_instance(hdev, instance);
2555
2556                 if (force || (adv_instance && adv_instance->timeout &&
2557                               !adv_instance->remaining_time)) {
2558                         /* Don't advertise a removed instance. */
2559                         if (next_instance &&
2560                             next_instance->instance == instance)
2561                                 next_instance = NULL;
2562
2563                         err = hci_remove_adv_instance(hdev, instance);
2564                         if (!err)
2565                                 mgmt_advertising_removed(sk, hdev, instance);
2566                 }
2567         }
2568
2569         if (!req || !hdev_is_powered(hdev) ||
2570             hci_dev_test_flag(hdev, HCI_ADVERTISING))
2571                 return;
2572
2573         if (next_instance && !ext_adv_capable(hdev))
2574                 __hci_req_schedule_adv_instance(req, next_instance->instance,
2575                                                 false);
2576 }
2577
2578 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2579                               bool use_rpa, u8 *own_addr_type)
2580 {
2581         struct hci_dev *hdev = req->hdev;
2582         int err;
2583
2584         /* If privacy is enabled use a resolvable private address. If
2585          * current RPA has expired or there is something else than
2586          * the current RPA in use, then generate a new one.
2587          */
2588         if (use_rpa) {
2589                 /* If Controller supports LL Privacy use own address type is
2590                  * 0x03
2591                  */
2592                 if (use_ll_privacy(hdev) &&
2593                     hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2594                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2595                 else
2596                         *own_addr_type = ADDR_LE_DEV_RANDOM;
2597
2598                 if (rpa_valid(hdev))
2599                         return 0;
2600
2601                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2602                 if (err < 0) {
2603                         bt_dev_err(hdev, "failed to generate new RPA");
2604                         return err;
2605                 }
2606
2607                 set_random_addr(req, &hdev->rpa);
2608
2609                 return 0;
2610         }
2611
2612         /* In case of required privacy without resolvable private address,
2613          * use an non-resolvable private address. This is useful for active
2614          * scanning and non-connectable advertising.
2615          */
2616         if (require_privacy) {
2617                 bdaddr_t nrpa;
2618
2619                 while (true) {
2620                         /* The non-resolvable private address is generated
2621                          * from random six bytes with the two most significant
2622                          * bits cleared.
2623                          */
2624                         get_random_bytes(&nrpa, 6);
2625                         nrpa.b[5] &= 0x3f;
2626
2627                         /* The non-resolvable private address shall not be
2628                          * equal to the public address.
2629                          */
2630                         if (bacmp(&hdev->bdaddr, &nrpa))
2631                                 break;
2632                 }
2633
2634                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2635                 set_random_addr(req, &nrpa);
2636                 return 0;
2637         }
2638
2639         /* If forcing static address is in use or there is no public
2640          * address use the static address as random address (but skip
2641          * the HCI command if the current random address is already the
2642          * static one.
2643          *
2644          * In case BR/EDR has been disabled on a dual-mode controller
2645          * and a static address has been configured, then use that
2646          * address instead of the public BR/EDR address.
2647          */
2648         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2649             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2650             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2651              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2652                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2653                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2654                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2655                                     &hdev->static_addr);
2656                 return 0;
2657         }
2658
2659         /* Neither privacy nor static address is being used so use a
2660          * public address.
2661          */
2662         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2663
2664         return 0;
2665 }
2666
2667 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2668 {
2669         struct bdaddr_list *b;
2670
2671         list_for_each_entry(b, &hdev->accept_list, list) {
2672                 struct hci_conn *conn;
2673
2674                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2675                 if (!conn)
2676                         return true;
2677
2678                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2679                         return true;
2680         }
2681
2682         return false;
2683 }
2684
2685 void __hci_req_update_scan(struct hci_request *req)
2686 {
2687         struct hci_dev *hdev = req->hdev;
2688         u8 scan;
2689
2690         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2691                 return;
2692
2693         if (!hdev_is_powered(hdev))
2694                 return;
2695
2696         if (mgmt_powering_down(hdev))
2697                 return;
2698
2699         if (hdev->scanning_paused)
2700                 return;
2701
2702         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2703             disconnected_accept_list_entries(hdev))
2704                 scan = SCAN_PAGE;
2705         else
2706                 scan = SCAN_DISABLED;
2707
2708         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2709                 scan |= SCAN_INQUIRY;
2710
2711         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2712             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2713                 return;
2714
2715         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2716 }
2717
2718 static int update_scan(struct hci_request *req, unsigned long opt)
2719 {
2720         hci_dev_lock(req->hdev);
2721         __hci_req_update_scan(req);
2722         hci_dev_unlock(req->hdev);
2723         return 0;
2724 }
2725
2726 static void scan_update_work(struct work_struct *work)
2727 {
2728         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2729
2730         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2731 }
2732
2733 static int connectable_update(struct hci_request *req, unsigned long opt)
2734 {
2735         struct hci_dev *hdev = req->hdev;
2736
2737         hci_dev_lock(hdev);
2738
2739         __hci_req_update_scan(req);
2740
2741         /* If BR/EDR is not enabled and we disable advertising as a
2742          * by-product of disabling connectable, we need to update the
2743          * advertising flags.
2744          */
2745         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2746                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2747
2748         /* Update the advertising parameters if necessary */
2749         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2750             !list_empty(&hdev->adv_instances)) {
2751                 if (ext_adv_capable(hdev))
2752                         __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2753                 else
2754                         __hci_req_enable_advertising(req);
2755         }
2756
2757         __hci_update_background_scan(req);
2758
2759         hci_dev_unlock(hdev);
2760
2761         return 0;
2762 }
2763
2764 static void connectable_update_work(struct work_struct *work)
2765 {
2766         struct hci_dev *hdev = container_of(work, struct hci_dev,
2767                                             connectable_update);
2768         u8 status;
2769
2770         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2771         mgmt_set_connectable_complete(hdev, status);
2772 }
2773
2774 static u8 get_service_classes(struct hci_dev *hdev)
2775 {
2776         struct bt_uuid *uuid;
2777         u8 val = 0;
2778
2779         list_for_each_entry(uuid, &hdev->uuids, list)
2780                 val |= uuid->svc_hint;
2781
2782         return val;
2783 }
2784
2785 void __hci_req_update_class(struct hci_request *req)
2786 {
2787         struct hci_dev *hdev = req->hdev;
2788         u8 cod[3];
2789
2790         bt_dev_dbg(hdev, "");
2791
2792         if (!hdev_is_powered(hdev))
2793                 return;
2794
2795         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2796                 return;
2797
2798         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2799                 return;
2800
2801         cod[0] = hdev->minor_class;
2802         cod[1] = hdev->major_class;
2803         cod[2] = get_service_classes(hdev);
2804
2805         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2806                 cod[1] |= 0x20;
2807
2808         if (memcmp(cod, hdev->dev_class, 3) == 0)
2809                 return;
2810
2811         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2812 }
2813
2814 static void write_iac(struct hci_request *req)
2815 {
2816         struct hci_dev *hdev = req->hdev;
2817         struct hci_cp_write_current_iac_lap cp;
2818
2819         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2820                 return;
2821
2822         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2823                 /* Limited discoverable mode */
2824                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2825                 cp.iac_lap[0] = 0x00;   /* LIAC */
2826                 cp.iac_lap[1] = 0x8b;
2827                 cp.iac_lap[2] = 0x9e;
2828                 cp.iac_lap[3] = 0x33;   /* GIAC */
2829                 cp.iac_lap[4] = 0x8b;
2830                 cp.iac_lap[5] = 0x9e;
2831         } else {
2832                 /* General discoverable mode */
2833                 cp.num_iac = 1;
2834                 cp.iac_lap[0] = 0x33;   /* GIAC */
2835                 cp.iac_lap[1] = 0x8b;
2836                 cp.iac_lap[2] = 0x9e;
2837         }
2838
2839         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2840                     (cp.num_iac * 3) + 1, &cp);
2841 }
2842
2843 static int discoverable_update(struct hci_request *req, unsigned long opt)
2844 {
2845         struct hci_dev *hdev = req->hdev;
2846
2847         hci_dev_lock(hdev);
2848
2849         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2850                 write_iac(req);
2851                 __hci_req_update_scan(req);
2852                 __hci_req_update_class(req);
2853         }
2854
2855         /* Advertising instances don't use the global discoverable setting, so
2856          * only update AD if advertising was enabled using Set Advertising.
2857          */
2858         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2859                 __hci_req_update_adv_data(req, 0x00);
2860
2861                 /* Discoverable mode affects the local advertising
2862                  * address in limited privacy mode.
2863                  */
2864                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2865                         if (ext_adv_capable(hdev))
2866                                 __hci_req_start_ext_adv(req, 0x00);
2867                         else
2868                                 __hci_req_enable_advertising(req);
2869                 }
2870         }
2871
2872         hci_dev_unlock(hdev);
2873
2874         return 0;
2875 }
2876
2877 static void discoverable_update_work(struct work_struct *work)
2878 {
2879         struct hci_dev *hdev = container_of(work, struct hci_dev,
2880                                             discoverable_update);
2881         u8 status;
2882
2883         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2884         mgmt_set_discoverable_complete(hdev, status);
2885 }
2886
2887 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2888                       u8 reason)
2889 {
2890         switch (conn->state) {
2891         case BT_CONNECTED:
2892         case BT_CONFIG:
2893                 if (conn->type == AMP_LINK) {
2894                         struct hci_cp_disconn_phy_link cp;
2895
2896                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2897                         cp.reason = reason;
2898                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2899                                     &cp);
2900                 } else {
2901                         struct hci_cp_disconnect dc;
2902
2903                         dc.handle = cpu_to_le16(conn->handle);
2904                         dc.reason = reason;
2905                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2906                 }
2907
2908                 conn->state = BT_DISCONN;
2909
2910                 break;
2911         case BT_CONNECT:
2912 #ifdef TIZEN_BT
2913                 if (conn->type == LE_LINK && bacmp(&conn->dst, BDADDR_ANY)) {
2914 #else
2915                 if (conn->type == LE_LINK) {
2916 #endif
2917                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2918                                 break;
2919                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2920                                     0, NULL);
2921                 } else if (conn->type == ACL_LINK) {
2922                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2923                                 break;
2924                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2925                                     6, &conn->dst);
2926                 }
2927                 break;
2928         case BT_CONNECT2:
2929                 if (conn->type == ACL_LINK) {
2930                         struct hci_cp_reject_conn_req rej;
2931
2932                         bacpy(&rej.bdaddr, &conn->dst);
2933                         rej.reason = reason;
2934
2935                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2936                                     sizeof(rej), &rej);
2937                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2938                         struct hci_cp_reject_sync_conn_req rej;
2939
2940                         bacpy(&rej.bdaddr, &conn->dst);
2941
2942                         /* SCO rejection has its own limited set of
2943                          * allowed error values (0x0D-0x0F) which isn't
2944                          * compatible with most values passed to this
2945                          * function. To be safe hard-code one of the
2946                          * values that's suitable for SCO.
2947                          */
2948                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2949
2950                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2951                                     sizeof(rej), &rej);
2952                 }
2953                 break;
2954         default:
2955                 conn->state = BT_CLOSED;
2956                 break;
2957         }
2958 }
2959
2960 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2961 {
2962         if (status)
2963                 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2964 }
2965
2966 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2967 {
2968         struct hci_request req;
2969         int err;
2970
2971         hci_req_init(&req, conn->hdev);
2972
2973         __hci_abort_conn(&req, conn, reason);
2974
2975         err = hci_req_run(&req, abort_conn_complete);
2976         if (err && err != -ENODATA) {
2977                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2978                 return err;
2979         }
2980
2981         return 0;
2982 }
2983
2984 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2985 {
2986         hci_dev_lock(req->hdev);
2987         __hci_update_background_scan(req);
2988         hci_dev_unlock(req->hdev);
2989         return 0;
2990 }
2991
2992 static void bg_scan_update(struct work_struct *work)
2993 {
2994         struct hci_dev *hdev = container_of(work, struct hci_dev,
2995                                             bg_scan_update);
2996         struct hci_conn *conn;
2997         u8 status;
2998         int err;
2999
3000         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
3001         if (!err)
3002                 return;
3003
3004         hci_dev_lock(hdev);
3005
3006         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3007         if (conn)
3008                 hci_le_conn_failed(conn, status);
3009
3010         hci_dev_unlock(hdev);
3011 }
3012
3013 static int le_scan_disable(struct hci_request *req, unsigned long opt)
3014 {
3015         hci_req_add_le_scan_disable(req, false);
3016         return 0;
3017 }
3018
3019 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
3020 {
3021         u8 length = opt;
3022         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
3023         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
3024         struct hci_cp_inquiry cp;
3025
3026         if (test_bit(HCI_INQUIRY, &req->hdev->flags))
3027                 return 0;
3028
3029         bt_dev_dbg(req->hdev, "");
3030
3031         hci_dev_lock(req->hdev);
3032         hci_inquiry_cache_flush(req->hdev);
3033         hci_dev_unlock(req->hdev);
3034
3035         memset(&cp, 0, sizeof(cp));
3036
3037         if (req->hdev->discovery.limited)
3038                 memcpy(&cp.lap, liac, sizeof(cp.lap));
3039         else
3040                 memcpy(&cp.lap, giac, sizeof(cp.lap));
3041
3042         cp.length = length;
3043
3044         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3045
3046         return 0;
3047 }
3048
3049 static void le_scan_disable_work(struct work_struct *work)
3050 {
3051         struct hci_dev *hdev = container_of(work, struct hci_dev,
3052                                             le_scan_disable.work);
3053         u8 status;
3054
3055         bt_dev_dbg(hdev, "");
3056
3057         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3058                 return;
3059
3060         cancel_delayed_work(&hdev->le_scan_restart);
3061
3062         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
3063         if (status) {
3064                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
3065                            status);
3066                 return;
3067         }
3068
3069         hdev->discovery.scan_start = 0;
3070
3071         /* If we were running LE only scan, change discovery state. If
3072          * we were running both LE and BR/EDR inquiry simultaneously,
3073          * and BR/EDR inquiry is already finished, stop discovery,
3074          * otherwise BR/EDR inquiry will stop discovery when finished.
3075          * If we will resolve remote device name, do not change
3076          * discovery state.
3077          */
3078
3079         if (hdev->discovery.type == DISCOV_TYPE_LE)
3080                 goto discov_stopped;
3081
3082         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3083                 return;
3084
3085         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3086                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3087                     hdev->discovery.state != DISCOVERY_RESOLVING)
3088                         goto discov_stopped;
3089
3090                 return;
3091         }
3092
3093         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3094                      HCI_CMD_TIMEOUT, &status);
3095         if (status) {
3096                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3097                 goto discov_stopped;
3098         }
3099
3100         return;
3101
3102 discov_stopped:
3103         hci_dev_lock(hdev);
3104         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3105         hci_dev_unlock(hdev);
3106 }
3107
3108 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3109 {
3110         struct hci_dev *hdev = req->hdev;
3111
3112         /* If controller is not scanning we are done. */
3113         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3114                 return 0;
3115
3116         if (hdev->scanning_paused) {
3117                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3118                 return 0;
3119         }
3120
3121         hci_req_add_le_scan_disable(req, false);
3122
3123         if (use_ext_scan(hdev)) {
3124                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3125
3126                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3127                 ext_enable_cp.enable = LE_SCAN_ENABLE;
3128                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3129
3130                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3131                             sizeof(ext_enable_cp), &ext_enable_cp);
3132         } else {
3133                 struct hci_cp_le_set_scan_enable cp;
3134
3135                 memset(&cp, 0, sizeof(cp));
3136                 cp.enable = LE_SCAN_ENABLE;
3137                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3138                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3139         }
3140
3141         return 0;
3142 }
3143
3144 static void le_scan_restart_work(struct work_struct *work)
3145 {
3146         struct hci_dev *hdev = container_of(work, struct hci_dev,
3147                                             le_scan_restart.work);
3148         unsigned long timeout, duration, scan_start, now;
3149         u8 status;
3150
3151         bt_dev_dbg(hdev, "");
3152
3153         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3154         if (status) {
3155                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3156                            status);
3157                 return;
3158         }
3159
3160         hci_dev_lock(hdev);
3161
3162         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3163             !hdev->discovery.scan_start)
3164                 goto unlock;
3165
3166         /* When the scan was started, hdev->le_scan_disable has been queued
3167          * after duration from scan_start. During scan restart this job
3168          * has been canceled, and we need to queue it again after proper
3169          * timeout, to make sure that scan does not run indefinitely.
3170          */
3171         duration = hdev->discovery.scan_duration;
3172         scan_start = hdev->discovery.scan_start;
3173         now = jiffies;
3174         if (now - scan_start <= duration) {
3175                 int elapsed;
3176
3177                 if (now >= scan_start)
3178                         elapsed = now - scan_start;
3179                 else
3180                         elapsed = ULONG_MAX - scan_start + now;
3181
3182                 timeout = duration - elapsed;
3183         } else {
3184                 timeout = 0;
3185         }
3186
3187         queue_delayed_work(hdev->req_workqueue,
3188                            &hdev->le_scan_disable, timeout);
3189
3190 unlock:
3191         hci_dev_unlock(hdev);
3192 }
3193
3194 static int active_scan(struct hci_request *req, unsigned long opt)
3195 {
3196         uint16_t interval = opt;
3197         struct hci_dev *hdev = req->hdev;
3198         u8 own_addr_type;
3199         /* Accept list is not used for discovery */
3200         u8 filter_policy = 0x00;
3201         /* Default is to enable duplicates filter */
3202         u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3203         /* Discovery doesn't require controller address resolution */
3204         bool addr_resolv = false;
3205         int err;
3206
3207         bt_dev_dbg(hdev, "");
3208
3209         /* If controller is scanning, it means the background scanning is
3210          * running. Thus, we should temporarily stop it in order to set the
3211          * discovery scanning parameters.
3212          */
3213         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3214                 hci_req_add_le_scan_disable(req, false);
3215                 cancel_interleave_scan(hdev);
3216         }
3217
3218         /* All active scans will be done with either a resolvable private
3219          * address (when privacy feature has been enabled) or non-resolvable
3220          * private address.
3221          */
3222         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3223                                         &own_addr_type);
3224         if (err < 0)
3225                 own_addr_type = ADDR_LE_DEV_PUBLIC;
3226
3227         hci_dev_lock(hdev);
3228         if (hci_is_adv_monitoring(hdev)) {
3229                 /* Duplicate filter should be disabled when some advertisement
3230                  * monitor is activated, otherwise AdvMon can only receive one
3231                  * advertisement for one peer(*) during active scanning, and
3232                  * might report loss to these peers.
3233                  *
3234                  * Note that different controllers have different meanings of
3235                  * |duplicate|. Some of them consider packets with the same
3236                  * address as duplicate, and others consider packets with the
3237                  * same address and the same RSSI as duplicate. Although in the
3238                  * latter case we don't need to disable duplicate filter, but
3239                  * it is common to have active scanning for a short period of
3240                  * time, the power impact should be neglectable.
3241                  */
3242                 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
3243         }
3244         hci_dev_unlock(hdev);
3245
3246         hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3247                            hdev->le_scan_window_discovery, own_addr_type,
3248                            filter_policy, filter_dup, addr_resolv);
3249         return 0;
3250 }
3251
3252 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3253 {
3254         int err;
3255
3256         bt_dev_dbg(req->hdev, "");
3257
3258         err = active_scan(req, opt);
3259         if (err)
3260                 return err;
3261
3262         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3263 }
3264
3265 static void start_discovery(struct hci_dev *hdev, u8 *status)
3266 {
3267         unsigned long timeout;
3268
3269         bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3270
3271         switch (hdev->discovery.type) {
3272         case DISCOV_TYPE_BREDR:
3273                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3274                         hci_req_sync(hdev, bredr_inquiry,
3275                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3276                                      status);
3277                 return;
3278         case DISCOV_TYPE_INTERLEAVED:
3279                 /* When running simultaneous discovery, the LE scanning time
3280                  * should occupy the whole discovery time sine BR/EDR inquiry
3281                  * and LE scanning are scheduled by the controller.
3282                  *
3283                  * For interleaving discovery in comparison, BR/EDR inquiry
3284                  * and LE scanning are done sequentially with separate
3285                  * timeouts.
3286                  */
3287                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3288                              &hdev->quirks)) {
3289                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3290                         /* During simultaneous discovery, we double LE scan
3291                          * interval. We must leave some time for the controller
3292                          * to do BR/EDR inquiry.
3293                          */
3294                         hci_req_sync(hdev, interleaved_discov,
3295                                      hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3296                                      status);
3297                         break;
3298                 }
3299
3300                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3301                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3302                              HCI_CMD_TIMEOUT, status);
3303                 break;
3304         case DISCOV_TYPE_LE:
3305                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3306                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3307                              HCI_CMD_TIMEOUT, status);
3308                 break;
3309         default:
3310                 *status = HCI_ERROR_UNSPECIFIED;
3311                 return;
3312         }
3313
3314         if (*status)
3315                 return;
3316
3317         bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3318
3319         /* When service discovery is used and the controller has a
3320          * strict duplicate filter, it is important to remember the
3321          * start and duration of the scan. This is required for
3322          * restarting scanning during the discovery phase.
3323          */
3324         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3325                      hdev->discovery.result_filtering) {
3326                 hdev->discovery.scan_start = jiffies;
3327                 hdev->discovery.scan_duration = timeout;
3328         }
3329
3330         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3331                            timeout);
3332 }
3333
3334 bool hci_req_stop_discovery(struct hci_request *req)
3335 {
3336         struct hci_dev *hdev = req->hdev;
3337         struct discovery_state *d = &hdev->discovery;
3338         struct hci_cp_remote_name_req_cancel cp;
3339         struct inquiry_entry *e;
3340         bool ret = false;
3341
3342         bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3343
3344         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3345                 if (test_bit(HCI_INQUIRY, &hdev->flags))
3346                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3347
3348                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3349                         cancel_delayed_work(&hdev->le_scan_disable);
3350                         cancel_delayed_work(&hdev->le_scan_restart);
3351                         hci_req_add_le_scan_disable(req, false);
3352                 }
3353
3354                 ret = true;
3355         } else {
3356                 /* Passive scanning */
3357                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3358                         hci_req_add_le_scan_disable(req, false);
3359                         ret = true;
3360                 }
3361         }
3362
3363         /* No further actions needed for LE-only discovery */
3364         if (d->type == DISCOV_TYPE_LE)
3365                 return ret;
3366
3367         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3368                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3369                                                      NAME_PENDING);
3370                 if (!e)
3371                         return ret;
3372
3373                 bacpy(&cp.bdaddr, &e->data.bdaddr);
3374                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3375                             &cp);
3376                 ret = true;
3377         }
3378
3379         return ret;
3380 }
3381
3382 static int stop_discovery(struct hci_request *req, unsigned long opt)
3383 {
3384         hci_dev_lock(req->hdev);
3385         hci_req_stop_discovery(req);
3386         hci_dev_unlock(req->hdev);
3387
3388         return 0;
3389 }
3390
3391 static void discov_update(struct work_struct *work)
3392 {
3393         struct hci_dev *hdev = container_of(work, struct hci_dev,
3394                                             discov_update);
3395         u8 status = 0;
3396
3397         switch (hdev->discovery.state) {
3398         case DISCOVERY_STARTING:
3399                 start_discovery(hdev, &status);
3400                 mgmt_start_discovery_complete(hdev, status);
3401                 if (status)
3402                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3403                 else
3404                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3405                 break;
3406         case DISCOVERY_STOPPING:
3407                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3408                 mgmt_stop_discovery_complete(hdev, status);
3409                 if (!status)
3410                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3411                 break;
3412         case DISCOVERY_STOPPED:
3413         default:
3414                 return;
3415         }
3416 }
3417
3418 static void discov_off(struct work_struct *work)
3419 {
3420         struct hci_dev *hdev = container_of(work, struct hci_dev,
3421                                             discov_off.work);
3422
3423         bt_dev_dbg(hdev, "");
3424
3425         hci_dev_lock(hdev);
3426
3427         /* When discoverable timeout triggers, then just make sure
3428          * the limited discoverable flag is cleared. Even in the case
3429          * of a timeout triggered from general discoverable, it is
3430          * safe to unconditionally clear the flag.
3431          */
3432         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3433         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3434         hdev->discov_timeout = 0;
3435
3436         hci_dev_unlock(hdev);
3437
3438         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3439         mgmt_new_settings(hdev);
3440 }
3441
3442 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3443 {
3444         struct hci_dev *hdev = req->hdev;
3445         u8 link_sec;
3446
3447         hci_dev_lock(hdev);
3448
3449         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3450             !lmp_host_ssp_capable(hdev)) {
3451                 u8 mode = 0x01;
3452
3453                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3454
3455                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3456                         u8 support = 0x01;
3457
3458                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3459                                     sizeof(support), &support);
3460                 }
3461         }
3462
3463         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3464             lmp_bredr_capable(hdev)) {
3465                 struct hci_cp_write_le_host_supported cp;
3466
3467                 cp.le = 0x01;
3468                 cp.simul = 0x00;
3469
3470                 /* Check first if we already have the right
3471                  * host state (host features set)
3472                  */
3473                 if (cp.le != lmp_host_le_capable(hdev) ||
3474                     cp.simul != lmp_host_le_br_capable(hdev))
3475                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3476                                     sizeof(cp), &cp);
3477         }
3478
3479         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3480                 /* Make sure the controller has a good default for
3481                  * advertising data. This also applies to the case
3482                  * where BR/EDR was toggled during the AUTO_OFF phase.
3483                  */
3484                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3485                     list_empty(&hdev->adv_instances)) {
3486                         int err;
3487
3488                         if (ext_adv_capable(hdev)) {
3489                                 err = __hci_req_setup_ext_adv_instance(req,
3490                                                                        0x00);
3491                                 if (!err)
3492                                         __hci_req_update_scan_rsp_data(req,
3493                                                                        0x00);
3494                         } else {
3495                                 err = 0;
3496                                 __hci_req_update_adv_data(req, 0x00);
3497                                 __hci_req_update_scan_rsp_data(req, 0x00);
3498                         }
3499
3500                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3501                                 if (!ext_adv_capable(hdev))
3502                                         __hci_req_enable_advertising(req);
3503                                 else if (!err)
3504                                         __hci_req_enable_ext_advertising(req,
3505                                                                          0x00);
3506                         }
3507                 } else if (!list_empty(&hdev->adv_instances)) {
3508                         struct adv_info *adv_instance;
3509
3510                         adv_instance = list_first_entry(&hdev->adv_instances,
3511                                                         struct adv_info, list);
3512                         __hci_req_schedule_adv_instance(req,
3513                                                         adv_instance->instance,
3514                                                         true);
3515                 }
3516         }
3517
3518         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3519         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3520                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3521                             sizeof(link_sec), &link_sec);
3522
3523         if (lmp_bredr_capable(hdev)) {
3524                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3525                         __hci_req_write_fast_connectable(req, true);
3526                 else
3527                         __hci_req_write_fast_connectable(req, false);
3528                 __hci_req_update_scan(req);
3529                 __hci_req_update_class(req);
3530                 __hci_req_update_name(req);
3531                 __hci_req_update_eir(req);
3532         }
3533
3534         hci_dev_unlock(hdev);
3535         return 0;
3536 }
3537
3538 int __hci_req_hci_power_on(struct hci_dev *hdev)
3539 {
3540         /* Register the available SMP channels (BR/EDR and LE) only when
3541          * successfully powering on the controller. This late
3542          * registration is required so that LE SMP can clearly decide if
3543          * the public address or static address is used.
3544          */
3545         smp_register(hdev);
3546
3547         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3548                               NULL);
3549 }
3550
3551 void hci_request_setup(struct hci_dev *hdev)
3552 {
3553         INIT_WORK(&hdev->discov_update, discov_update);
3554         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3555         INIT_WORK(&hdev->scan_update, scan_update_work);
3556         INIT_WORK(&hdev->connectable_update, connectable_update_work);
3557         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3558         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3559         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3560         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3561         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3562         INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3563 }
3564
3565 void hci_request_cancel_all(struct hci_dev *hdev)
3566 {
3567         hci_req_sync_cancel(hdev, ENODEV);
3568
3569         cancel_work_sync(&hdev->discov_update);
3570         cancel_work_sync(&hdev->bg_scan_update);
3571         cancel_work_sync(&hdev->scan_update);
3572         cancel_work_sync(&hdev->connectable_update);
3573         cancel_work_sync(&hdev->discoverable_update);
3574         cancel_delayed_work_sync(&hdev->discov_off);
3575         cancel_delayed_work_sync(&hdev->le_scan_disable);
3576         cancel_delayed_work_sync(&hdev->le_scan_restart);
3577
3578         if (hdev->adv_instance_timeout) {
3579                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3580                 hdev->adv_instance_timeout = 0;
3581         }
3582
3583         cancel_interleave_scan(hdev);
3584 }