ipv4/icmp: fix rt dst dev null pointer dereference
[platform/kernel/linux-rpi.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39         skb_queue_head_init(&req->cmd_q);
40         req->hdev = hdev;
41         req->err = 0;
42 }
43
44 void hci_req_purge(struct hci_request *req)
45 {
46         skb_queue_purge(&req->cmd_q);
47 }
48
49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51         return hdev->req_status == HCI_REQ_PEND;
52 }
53
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55                    hci_req_complete_skb_t complete_skb)
56 {
57         struct hci_dev *hdev = req->hdev;
58         struct sk_buff *skb;
59         unsigned long flags;
60
61         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63         /* If an error occurred during request building, remove all HCI
64          * commands queued on the HCI request queue.
65          */
66         if (req->err) {
67                 skb_queue_purge(&req->cmd_q);
68                 return req->err;
69         }
70
71         /* Do not allow empty requests */
72         if (skb_queue_empty(&req->cmd_q))
73                 return -ENODATA;
74
75         skb = skb_peek_tail(&req->cmd_q);
76         if (complete) {
77                 bt_cb(skb)->hci.req_complete = complete;
78         } else if (complete_skb) {
79                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81         }
82
83         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87         queue_work(hdev->workqueue, &hdev->cmd_work);
88
89         return 0;
90 }
91
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94         return req_run(req, complete, NULL);
95 }
96
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99         return req_run(req, NULL, complete);
100 }
101
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103                                   struct sk_buff *skb)
104 {
105         BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107         if (hdev->req_status == HCI_REQ_PEND) {
108                 hdev->req_result = result;
109                 hdev->req_status = HCI_REQ_DONE;
110                 if (skb)
111                         hdev->req_skb = skb_get(skb);
112                 wake_up_interruptible(&hdev->req_wait_q);
113         }
114 }
115
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118         BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120         if (hdev->req_status == HCI_REQ_PEND) {
121                 hdev->req_result = err;
122                 hdev->req_status = HCI_REQ_CANCELED;
123                 wake_up_interruptible(&hdev->req_wait_q);
124         }
125 }
126
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128                                   const void *param, u8 event, u32 timeout)
129 {
130         struct hci_request req;
131         struct sk_buff *skb;
132         int err = 0;
133
134         BT_DBG("%s", hdev->name);
135
136         hci_req_init(&req, hdev);
137
138         hci_req_add_ev(&req, opcode, plen, param, event);
139
140         hdev->req_status = HCI_REQ_PEND;
141
142         err = hci_req_run_skb(&req, hci_req_sync_complete);
143         if (err < 0)
144                 return ERR_PTR(err);
145
146         err = wait_event_interruptible_timeout(hdev->req_wait_q,
147                         hdev->req_status != HCI_REQ_PEND, timeout);
148
149         if (err == -ERESTARTSYS)
150                 return ERR_PTR(-EINTR);
151
152         switch (hdev->req_status) {
153         case HCI_REQ_DONE:
154                 err = -bt_to_errno(hdev->req_result);
155                 break;
156
157         case HCI_REQ_CANCELED:
158                 err = -hdev->req_result;
159                 break;
160
161         default:
162                 err = -ETIMEDOUT;
163                 break;
164         }
165
166         hdev->req_status = hdev->req_result = 0;
167         skb = hdev->req_skb;
168         hdev->req_skb = NULL;
169
170         BT_DBG("%s end: err %d", hdev->name, err);
171
172         if (err < 0) {
173                 kfree_skb(skb);
174                 return ERR_PTR(err);
175         }
176
177         if (!skb)
178                 return ERR_PTR(-ENODATA);
179
180         return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185                                const void *param, u32 timeout)
186 {
187         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190
191 /* Execute request and wait for completion. */
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193                                                      unsigned long opt),
194                    unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196         struct hci_request req;
197         int err = 0;
198
199         BT_DBG("%s start", hdev->name);
200
201         hci_req_init(&req, hdev);
202
203         hdev->req_status = HCI_REQ_PEND;
204
205         err = func(&req, opt);
206         if (err) {
207                 if (hci_status)
208                         *hci_status = HCI_ERROR_UNSPECIFIED;
209                 return err;
210         }
211
212         err = hci_req_run_skb(&req, hci_req_sync_complete);
213         if (err < 0) {
214                 hdev->req_status = 0;
215
216                 /* ENODATA means the HCI request command queue is empty.
217                  * This can happen when a request with conditionals doesn't
218                  * trigger any commands to be sent. This is normal behavior
219                  * and should not trigger an error return.
220                  */
221                 if (err == -ENODATA) {
222                         if (hci_status)
223                                 *hci_status = 0;
224                         return 0;
225                 }
226
227                 if (hci_status)
228                         *hci_status = HCI_ERROR_UNSPECIFIED;
229
230                 return err;
231         }
232
233         err = wait_event_interruptible_timeout(hdev->req_wait_q,
234                         hdev->req_status != HCI_REQ_PEND, timeout);
235
236         if (err == -ERESTARTSYS)
237                 return -EINTR;
238
239         switch (hdev->req_status) {
240         case HCI_REQ_DONE:
241                 err = -bt_to_errno(hdev->req_result);
242                 if (hci_status)
243                         *hci_status = hdev->req_result;
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 if (hci_status)
249                         *hci_status = HCI_ERROR_UNSPECIFIED;
250                 break;
251
252         default:
253                 err = -ETIMEDOUT;
254                 if (hci_status)
255                         *hci_status = HCI_ERROR_UNSPECIFIED;
256                 break;
257         }
258
259         kfree_skb(hdev->req_skb);
260         hdev->req_skb = NULL;
261         hdev->req_status = hdev->req_result = 0;
262
263         BT_DBG("%s end: err %d", hdev->name, err);
264
265         return err;
266 }
267
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269                                                   unsigned long opt),
270                  unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272         int ret;
273
274         if (!test_bit(HCI_UP, &hdev->flags))
275                 return -ENETDOWN;
276
277         /* Serialize all requests */
278         hci_req_sync_lock(hdev);
279         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280         hci_req_sync_unlock(hdev);
281
282         return ret;
283 }
284
285 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286                                 const void *param)
287 {
288         int len = HCI_COMMAND_HDR_SIZE + plen;
289         struct hci_command_hdr *hdr;
290         struct sk_buff *skb;
291
292         skb = bt_skb_alloc(len, GFP_ATOMIC);
293         if (!skb)
294                 return NULL;
295
296         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297         hdr->opcode = cpu_to_le16(opcode);
298         hdr->plen   = plen;
299
300         if (plen)
301                 skb_put_data(skb, param, plen);
302
303         BT_DBG("skb len %d", skb->len);
304
305         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306         hci_skb_opcode(skb) = opcode;
307
308         return skb;
309 }
310
311 /* Queue a command to an asynchronous HCI request */
312 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313                     const void *param, u8 event)
314 {
315         struct hci_dev *hdev = req->hdev;
316         struct sk_buff *skb;
317
318         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320         /* If an error occurred during request building, there is no point in
321          * queueing the HCI command. We can simply return.
322          */
323         if (req->err)
324                 return;
325
326         skb = hci_prepare_cmd(hdev, opcode, plen, param);
327         if (!skb) {
328                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329                            opcode);
330                 req->err = -ENOMEM;
331                 return;
332         }
333
334         if (skb_queue_empty(&req->cmd_q))
335                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
336
337         bt_cb(skb)->hci.req_event = event;
338
339         skb_queue_tail(&req->cmd_q, skb);
340 }
341
342 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343                  const void *param)
344 {
345         hci_req_add_ev(req, opcode, plen, param, 0);
346 }
347
348 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349 {
350         struct hci_dev *hdev = req->hdev;
351         struct hci_cp_write_page_scan_activity acp;
352         u8 type;
353
354         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355                 return;
356
357         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358                 return;
359
360         if (enable) {
361                 type = PAGE_SCAN_TYPE_INTERLACED;
362
363                 /* 160 msec page scan interval */
364                 acp.interval = cpu_to_le16(0x0100);
365         } else {
366                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
367
368                 /* default 1.28 sec page scan */
369                 acp.interval = cpu_to_le16(0x0800);
370         }
371
372         acp.window = cpu_to_le16(0x0012);
373
374         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
375             __cpu_to_le16(hdev->page_scan_window) != acp.window)
376                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
377                             sizeof(acp), &acp);
378
379         if (hdev->page_scan_type != type)
380                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
381 }
382
383 /* This function controls the background scanning based on hdev->pend_le_conns
384  * list. If there are pending LE connection we start the background scanning,
385  * otherwise we stop it.
386  *
387  * This function requires the caller holds hdev->lock.
388  */
389 static void __hci_update_background_scan(struct hci_request *req)
390 {
391         struct hci_dev *hdev = req->hdev;
392
393         if (!test_bit(HCI_UP, &hdev->flags) ||
394             test_bit(HCI_INIT, &hdev->flags) ||
395             hci_dev_test_flag(hdev, HCI_SETUP) ||
396             hci_dev_test_flag(hdev, HCI_CONFIG) ||
397             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
398             hci_dev_test_flag(hdev, HCI_UNREGISTER))
399                 return;
400
401         /* No point in doing scanning if LE support hasn't been enabled */
402         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
403                 return;
404
405         /* If discovery is active don't interfere with it */
406         if (hdev->discovery.state != DISCOVERY_STOPPED)
407                 return;
408
409         /* Reset RSSI and UUID filters when starting background scanning
410          * since these filters are meant for service discovery only.
411          *
412          * The Start Discovery and Start Service Discovery operations
413          * ensure to set proper values for RSSI threshold and UUID
414          * filter list. So it is safe to just reset them here.
415          */
416         hci_discovery_filter_clear(hdev);
417
418         if (list_empty(&hdev->pend_le_conns) &&
419             list_empty(&hdev->pend_le_reports)) {
420                 /* If there is no pending LE connections or devices
421                  * to be scanned for, we should stop the background
422                  * scanning.
423                  */
424
425                 /* If controller is not scanning we are done. */
426                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
427                         return;
428
429                 hci_req_add_le_scan_disable(req);
430
431                 BT_DBG("%s stopping background scanning", hdev->name);
432         } else {
433                 /* If there is at least one pending LE connection, we should
434                  * keep the background scan running.
435                  */
436
437                 /* If controller is connecting, we should not start scanning
438                  * since some controllers are not able to scan and connect at
439                  * the same time.
440                  */
441                 if (hci_lookup_le_connect(hdev))
442                         return;
443
444                 /* If controller is currently scanning, we stop it to ensure we
445                  * don't miss any advertising (due to duplicates filter).
446                  */
447                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
448                         hci_req_add_le_scan_disable(req);
449
450                 hci_req_add_le_passive_scan(req);
451
452                 BT_DBG("%s starting background scanning", hdev->name);
453         }
454 }
455
456 void __hci_req_update_name(struct hci_request *req)
457 {
458         struct hci_dev *hdev = req->hdev;
459         struct hci_cp_write_local_name cp;
460
461         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
462
463         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
464 }
465
466 #define PNP_INFO_SVCLASS_ID             0x1200
467
468 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
469 {
470         u8 *ptr = data, *uuids_start = NULL;
471         struct bt_uuid *uuid;
472
473         if (len < 4)
474                 return ptr;
475
476         list_for_each_entry(uuid, &hdev->uuids, list) {
477                 u16 uuid16;
478
479                 if (uuid->size != 16)
480                         continue;
481
482                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
483                 if (uuid16 < 0x1100)
484                         continue;
485
486                 if (uuid16 == PNP_INFO_SVCLASS_ID)
487                         continue;
488
489                 if (!uuids_start) {
490                         uuids_start = ptr;
491                         uuids_start[0] = 1;
492                         uuids_start[1] = EIR_UUID16_ALL;
493                         ptr += 2;
494                 }
495
496                 /* Stop if not enough space to put next UUID */
497                 if ((ptr - data) + sizeof(u16) > len) {
498                         uuids_start[1] = EIR_UUID16_SOME;
499                         break;
500                 }
501
502                 *ptr++ = (uuid16 & 0x00ff);
503                 *ptr++ = (uuid16 & 0xff00) >> 8;
504                 uuids_start[0] += sizeof(uuid16);
505         }
506
507         return ptr;
508 }
509
510 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
511 {
512         u8 *ptr = data, *uuids_start = NULL;
513         struct bt_uuid *uuid;
514
515         if (len < 6)
516                 return ptr;
517
518         list_for_each_entry(uuid, &hdev->uuids, list) {
519                 if (uuid->size != 32)
520                         continue;
521
522                 if (!uuids_start) {
523                         uuids_start = ptr;
524                         uuids_start[0] = 1;
525                         uuids_start[1] = EIR_UUID32_ALL;
526                         ptr += 2;
527                 }
528
529                 /* Stop if not enough space to put next UUID */
530                 if ((ptr - data) + sizeof(u32) > len) {
531                         uuids_start[1] = EIR_UUID32_SOME;
532                         break;
533                 }
534
535                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
536                 ptr += sizeof(u32);
537                 uuids_start[0] += sizeof(u32);
538         }
539
540         return ptr;
541 }
542
543 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
544 {
545         u8 *ptr = data, *uuids_start = NULL;
546         struct bt_uuid *uuid;
547
548         if (len < 18)
549                 return ptr;
550
551         list_for_each_entry(uuid, &hdev->uuids, list) {
552                 if (uuid->size != 128)
553                         continue;
554
555                 if (!uuids_start) {
556                         uuids_start = ptr;
557                         uuids_start[0] = 1;
558                         uuids_start[1] = EIR_UUID128_ALL;
559                         ptr += 2;
560                 }
561
562                 /* Stop if not enough space to put next UUID */
563                 if ((ptr - data) + 16 > len) {
564                         uuids_start[1] = EIR_UUID128_SOME;
565                         break;
566                 }
567
568                 memcpy(ptr, uuid->uuid, 16);
569                 ptr += 16;
570                 uuids_start[0] += 16;
571         }
572
573         return ptr;
574 }
575
576 static void create_eir(struct hci_dev *hdev, u8 *data)
577 {
578         u8 *ptr = data;
579         size_t name_len;
580
581         name_len = strlen(hdev->dev_name);
582
583         if (name_len > 0) {
584                 /* EIR Data type */
585                 if (name_len > 48) {
586                         name_len = 48;
587                         ptr[1] = EIR_NAME_SHORT;
588                 } else
589                         ptr[1] = EIR_NAME_COMPLETE;
590
591                 /* EIR Data length */
592                 ptr[0] = name_len + 1;
593
594                 memcpy(ptr + 2, hdev->dev_name, name_len);
595
596                 ptr += (name_len + 2);
597         }
598
599         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
600                 ptr[0] = 2;
601                 ptr[1] = EIR_TX_POWER;
602                 ptr[2] = (u8) hdev->inq_tx_power;
603
604                 ptr += 3;
605         }
606
607         if (hdev->devid_source > 0) {
608                 ptr[0] = 9;
609                 ptr[1] = EIR_DEVICE_ID;
610
611                 put_unaligned_le16(hdev->devid_source, ptr + 2);
612                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
613                 put_unaligned_le16(hdev->devid_product, ptr + 6);
614                 put_unaligned_le16(hdev->devid_version, ptr + 8);
615
616                 ptr += 10;
617         }
618
619         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
620         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
621         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622 }
623
624 void __hci_req_update_eir(struct hci_request *req)
625 {
626         struct hci_dev *hdev = req->hdev;
627         struct hci_cp_write_eir cp;
628
629         if (!hdev_is_powered(hdev))
630                 return;
631
632         if (!lmp_ext_inq_capable(hdev))
633                 return;
634
635         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
636                 return;
637
638         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
639                 return;
640
641         memset(&cp, 0, sizeof(cp));
642
643         create_eir(hdev, cp.data);
644
645         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
646                 return;
647
648         memcpy(hdev->eir, cp.data, sizeof(cp.data));
649
650         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
651 }
652
653 void hci_req_add_le_scan_disable(struct hci_request *req)
654 {
655         struct hci_dev *hdev = req->hdev;
656
657         if (use_ext_scan(hdev)) {
658                 struct hci_cp_le_set_ext_scan_enable cp;
659
660                 memset(&cp, 0, sizeof(cp));
661                 cp.enable = LE_SCAN_DISABLE;
662                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
663                             &cp);
664         } else {
665                 struct hci_cp_le_set_scan_enable cp;
666
667                 memset(&cp, 0, sizeof(cp));
668                 cp.enable = LE_SCAN_DISABLE;
669                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
670         }
671 }
672
673 static void add_to_white_list(struct hci_request *req,
674                               struct hci_conn_params *params)
675 {
676         struct hci_cp_le_add_to_white_list cp;
677
678         cp.bdaddr_type = params->addr_type;
679         bacpy(&cp.bdaddr, &params->addr);
680
681         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
682 }
683
684 static u8 update_white_list(struct hci_request *req)
685 {
686         struct hci_dev *hdev = req->hdev;
687         struct hci_conn_params *params;
688         struct bdaddr_list *b;
689         uint8_t white_list_entries = 0;
690
691         /* Go through the current white list programmed into the
692          * controller one by one and check if that address is still
693          * in the list of pending connections or list of devices to
694          * report. If not present in either list, then queue the
695          * command to remove it from the controller.
696          */
697         list_for_each_entry(b, &hdev->le_white_list, list) {
698                 /* If the device is neither in pend_le_conns nor
699                  * pend_le_reports then remove it from the whitelist.
700                  */
701                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
702                                                &b->bdaddr, b->bdaddr_type) &&
703                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
704                                                &b->bdaddr, b->bdaddr_type)) {
705                         struct hci_cp_le_del_from_white_list cp;
706
707                         cp.bdaddr_type = b->bdaddr_type;
708                         bacpy(&cp.bdaddr, &b->bdaddr);
709
710                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
711                                     sizeof(cp), &cp);
712                         continue;
713                 }
714
715                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
716                         /* White list can not be used with RPAs */
717                         return 0x00;
718                 }
719
720                 white_list_entries++;
721         }
722
723         /* Since all no longer valid white list entries have been
724          * removed, walk through the list of pending connections
725          * and ensure that any new device gets programmed into
726          * the controller.
727          *
728          * If the list of the devices is larger than the list of
729          * available white list entries in the controller, then
730          * just abort and return filer policy value to not use the
731          * white list.
732          */
733         list_for_each_entry(params, &hdev->pend_le_conns, action) {
734                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
735                                            &params->addr, params->addr_type))
736                         continue;
737
738                 if (white_list_entries >= hdev->le_white_list_size) {
739                         /* Select filter policy to accept all advertising */
740                         return 0x00;
741                 }
742
743                 if (hci_find_irk_by_addr(hdev, &params->addr,
744                                          params->addr_type)) {
745                         /* White list can not be used with RPAs */
746                         return 0x00;
747                 }
748
749                 white_list_entries++;
750                 add_to_white_list(req, params);
751         }
752
753         /* After adding all new pending connections, walk through
754          * the list of pending reports and also add these to the
755          * white list if there is still space.
756          */
757         list_for_each_entry(params, &hdev->pend_le_reports, action) {
758                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
759                                            &params->addr, params->addr_type))
760                         continue;
761
762                 if (white_list_entries >= hdev->le_white_list_size) {
763                         /* Select filter policy to accept all advertising */
764                         return 0x00;
765                 }
766
767                 if (hci_find_irk_by_addr(hdev, &params->addr,
768                                          params->addr_type)) {
769                         /* White list can not be used with RPAs */
770                         return 0x00;
771                 }
772
773                 white_list_entries++;
774                 add_to_white_list(req, params);
775         }
776
777         /* Select filter policy to use white list */
778         return 0x01;
779 }
780
781 static bool scan_use_rpa(struct hci_dev *hdev)
782 {
783         return hci_dev_test_flag(hdev, HCI_PRIVACY);
784 }
785
786 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
787                                u16 window, u8 own_addr_type, u8 filter_policy)
788 {
789         struct hci_dev *hdev = req->hdev;
790
791         /* Use ext scanning if set ext scan param and ext scan enable is
792          * supported
793          */
794         if (use_ext_scan(hdev)) {
795                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
796                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
797                 struct hci_cp_le_scan_phy_params *phy_params;
798                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
799                 u32 plen;
800
801                 ext_param_cp = (void *)data;
802                 phy_params = (void *)ext_param_cp->data;
803
804                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
805                 ext_param_cp->own_addr_type = own_addr_type;
806                 ext_param_cp->filter_policy = filter_policy;
807
808                 plen = sizeof(*ext_param_cp);
809
810                 if (scan_1m(hdev) || scan_2m(hdev)) {
811                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
812
813                         memset(phy_params, 0, sizeof(*phy_params));
814                         phy_params->type = type;
815                         phy_params->interval = cpu_to_le16(interval);
816                         phy_params->window = cpu_to_le16(window);
817
818                         plen += sizeof(*phy_params);
819                         phy_params++;
820                 }
821
822                 if (scan_coded(hdev)) {
823                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
824
825                         memset(phy_params, 0, sizeof(*phy_params));
826                         phy_params->type = type;
827                         phy_params->interval = cpu_to_le16(interval);
828                         phy_params->window = cpu_to_le16(window);
829
830                         plen += sizeof(*phy_params);
831                         phy_params++;
832                 }
833
834                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
835                             plen, ext_param_cp);
836
837                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
838                 ext_enable_cp.enable = LE_SCAN_ENABLE;
839                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
840
841                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
842                             sizeof(ext_enable_cp), &ext_enable_cp);
843         } else {
844                 struct hci_cp_le_set_scan_param param_cp;
845                 struct hci_cp_le_set_scan_enable enable_cp;
846
847                 memset(&param_cp, 0, sizeof(param_cp));
848                 param_cp.type = type;
849                 param_cp.interval = cpu_to_le16(interval);
850                 param_cp.window = cpu_to_le16(window);
851                 param_cp.own_address_type = own_addr_type;
852                 param_cp.filter_policy = filter_policy;
853                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
854                             &param_cp);
855
856                 memset(&enable_cp, 0, sizeof(enable_cp));
857                 enable_cp.enable = LE_SCAN_ENABLE;
858                 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
859                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
860                             &enable_cp);
861         }
862 }
863
864 void hci_req_add_le_passive_scan(struct hci_request *req)
865 {
866         struct hci_dev *hdev = req->hdev;
867         u8 own_addr_type;
868         u8 filter_policy;
869
870         /* Set require_privacy to false since no SCAN_REQ are send
871          * during passive scanning. Not using an non-resolvable address
872          * here is important so that peer devices using direct
873          * advertising with our address will be correctly reported
874          * by the controller.
875          */
876         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
877                                       &own_addr_type))
878                 return;
879
880         /* Adding or removing entries from the white list must
881          * happen before enabling scanning. The controller does
882          * not allow white list modification while scanning.
883          */
884         filter_policy = update_white_list(req);
885
886         /* When the controller is using random resolvable addresses and
887          * with that having LE privacy enabled, then controllers with
888          * Extended Scanner Filter Policies support can now enable support
889          * for handling directed advertising.
890          *
891          * So instead of using filter polices 0x00 (no whitelist)
892          * and 0x01 (whitelist enabled) use the new filter policies
893          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
894          */
895         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
896             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
897                 filter_policy |= 0x02;
898
899         hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
900                            hdev->le_scan_window, own_addr_type, filter_policy);
901 }
902
903 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
904 {
905         struct adv_info *adv_instance;
906
907         /* Ignore instance 0 */
908         if (instance == 0x00)
909                 return 0;
910
911         adv_instance = hci_find_adv_instance(hdev, instance);
912         if (!adv_instance)
913                 return 0;
914
915         /* TODO: Take into account the "appearance" and "local-name" flags here.
916          * These are currently being ignored as they are not supported.
917          */
918         return adv_instance->scan_rsp_len;
919 }
920
921 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
922 {
923         u8 instance = hdev->cur_adv_instance;
924         struct adv_info *adv_instance;
925
926         /* Ignore instance 0 */
927         if (instance == 0x00)
928                 return 0;
929
930         adv_instance = hci_find_adv_instance(hdev, instance);
931         if (!adv_instance)
932                 return 0;
933
934         /* TODO: Take into account the "appearance" and "local-name" flags here.
935          * These are currently being ignored as they are not supported.
936          */
937         return adv_instance->scan_rsp_len;
938 }
939
940 void __hci_req_disable_advertising(struct hci_request *req)
941 {
942         if (ext_adv_capable(req->hdev)) {
943                 struct hci_cp_le_set_ext_adv_enable cp;
944
945                 cp.enable = 0x00;
946                 /* Disable all sets since we only support one set at the moment */
947                 cp.num_of_sets = 0x00;
948
949                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
950         } else {
951                 u8 enable = 0x00;
952
953                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
954         }
955 }
956
957 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
958 {
959         u32 flags;
960         struct adv_info *adv_instance;
961
962         if (instance == 0x00) {
963                 /* Instance 0 always manages the "Tx Power" and "Flags"
964                  * fields
965                  */
966                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
967
968                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
969                  * corresponds to the "connectable" instance flag.
970                  */
971                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
972                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
973
974                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
975                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
976                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
977                         flags |= MGMT_ADV_FLAG_DISCOV;
978
979                 return flags;
980         }
981
982         adv_instance = hci_find_adv_instance(hdev, instance);
983
984         /* Return 0 when we got an invalid instance identifier. */
985         if (!adv_instance)
986                 return 0;
987
988         return adv_instance->flags;
989 }
990
991 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
992 {
993         /* If privacy is not enabled don't use RPA */
994         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
995                 return false;
996
997         /* If basic privacy mode is enabled use RPA */
998         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
999                 return true;
1000
1001         /* If limited privacy mode is enabled don't use RPA if we're
1002          * both discoverable and bondable.
1003          */
1004         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1005             hci_dev_test_flag(hdev, HCI_BONDABLE))
1006                 return false;
1007
1008         /* We're neither bondable nor discoverable in the limited
1009          * privacy mode, therefore use RPA.
1010          */
1011         return true;
1012 }
1013
1014 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1015 {
1016         /* If there is no connection we are OK to advertise. */
1017         if (hci_conn_num(hdev, LE_LINK) == 0)
1018                 return true;
1019
1020         /* Check le_states if there is any connection in slave role. */
1021         if (hdev->conn_hash.le_num_slave > 0) {
1022                 /* Slave connection state and non connectable mode bit 20. */
1023                 if (!connectable && !(hdev->le_states[2] & 0x10))
1024                         return false;
1025
1026                 /* Slave connection state and connectable mode bit 38
1027                  * and scannable bit 21.
1028                  */
1029                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1030                                     !(hdev->le_states[2] & 0x20)))
1031                         return false;
1032         }
1033
1034         /* Check le_states if there is any connection in master role. */
1035         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1036                 /* Master connection state and non connectable mode bit 18. */
1037                 if (!connectable && !(hdev->le_states[2] & 0x02))
1038                         return false;
1039
1040                 /* Master connection state and connectable mode bit 35 and
1041                  * scannable 19.
1042                  */
1043                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1044                                     !(hdev->le_states[2] & 0x08)))
1045                         return false;
1046         }
1047
1048         return true;
1049 }
1050
1051 void __hci_req_enable_advertising(struct hci_request *req)
1052 {
1053         struct hci_dev *hdev = req->hdev;
1054         struct hci_cp_le_set_adv_param cp;
1055         u8 own_addr_type, enable = 0x01;
1056         bool connectable;
1057         u32 flags;
1058
1059         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1060
1061         /* If the "connectable" instance flag was not set, then choose between
1062          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1063          */
1064         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1065                       mgmt_get_connectable(hdev);
1066
1067         if (!is_advertising_allowed(hdev, connectable))
1068                 return;
1069
1070         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1071                 __hci_req_disable_advertising(req);
1072
1073         /* Clear the HCI_LE_ADV bit temporarily so that the
1074          * hci_update_random_address knows that it's safe to go ahead
1075          * and write a new random address. The flag will be set back on
1076          * as soon as the SET_ADV_ENABLE HCI command completes.
1077          */
1078         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1079
1080         /* Set require_privacy to true only when non-connectable
1081          * advertising is used. In that case it is fine to use a
1082          * non-resolvable private address.
1083          */
1084         if (hci_update_random_address(req, !connectable,
1085                                       adv_use_rpa(hdev, flags),
1086                                       &own_addr_type) < 0)
1087                 return;
1088
1089         memset(&cp, 0, sizeof(cp));
1090         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092
1093         if (connectable)
1094                 cp.type = LE_ADV_IND;
1095         else if (get_cur_adv_instance_scan_rsp_len(hdev))
1096                 cp.type = LE_ADV_SCAN_IND;
1097         else
1098                 cp.type = LE_ADV_NONCONN_IND;
1099
1100         cp.own_address_type = own_addr_type;
1101         cp.channel_map = hdev->le_adv_channel_map;
1102
1103         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1104
1105         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1106 }
1107
1108 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1109 {
1110         size_t short_len;
1111         size_t complete_len;
1112
1113         /* no space left for name (+ NULL + type + len) */
1114         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1115                 return ad_len;
1116
1117         /* use complete name if present and fits */
1118         complete_len = strlen(hdev->dev_name);
1119         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1120                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1121                                        hdev->dev_name, complete_len + 1);
1122
1123         /* use short name if present */
1124         short_len = strlen(hdev->short_name);
1125         if (short_len)
1126                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1127                                        hdev->short_name, short_len + 1);
1128
1129         /* use shortened full name if present, we already know that name
1130          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1131          */
1132         if (complete_len) {
1133                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1134
1135                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1136                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1137
1138                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1139                                        sizeof(name));
1140         }
1141
1142         return ad_len;
1143 }
1144
1145 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1146 {
1147         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1148 }
1149
1150 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1151 {
1152         u8 scan_rsp_len = 0;
1153
1154         if (hdev->appearance) {
1155                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1156         }
1157
1158         return append_local_name(hdev, ptr, scan_rsp_len);
1159 }
1160
1161 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1162                                         u8 *ptr)
1163 {
1164         struct adv_info *adv_instance;
1165         u32 instance_flags;
1166         u8 scan_rsp_len = 0;
1167
1168         adv_instance = hci_find_adv_instance(hdev, instance);
1169         if (!adv_instance)
1170                 return 0;
1171
1172         instance_flags = adv_instance->flags;
1173
1174         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1175                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1176         }
1177
1178         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1179                adv_instance->scan_rsp_len);
1180
1181         scan_rsp_len += adv_instance->scan_rsp_len;
1182
1183         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1184                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1185
1186         return scan_rsp_len;
1187 }
1188
1189 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1190 {
1191         struct hci_dev *hdev = req->hdev;
1192         u8 len;
1193
1194         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1195                 return;
1196
1197         if (ext_adv_capable(hdev)) {
1198                 struct hci_cp_le_set_ext_scan_rsp_data cp;
1199
1200                 memset(&cp, 0, sizeof(cp));
1201
1202                 if (instance)
1203                         len = create_instance_scan_rsp_data(hdev, instance,
1204                                                             cp.data);
1205                 else
1206                         len = create_default_scan_rsp_data(hdev, cp.data);
1207
1208                 if (hdev->scan_rsp_data_len == len &&
1209                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1210                         return;
1211
1212                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1213                 hdev->scan_rsp_data_len = len;
1214
1215                 cp.handle = 0;
1216                 cp.length = len;
1217                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1218                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1219
1220                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1221                             &cp);
1222         } else {
1223                 struct hci_cp_le_set_scan_rsp_data cp;
1224
1225                 memset(&cp, 0, sizeof(cp));
1226
1227                 if (instance)
1228                         len = create_instance_scan_rsp_data(hdev, instance,
1229                                                             cp.data);
1230                 else
1231                         len = create_default_scan_rsp_data(hdev, cp.data);
1232
1233                 if (hdev->scan_rsp_data_len == len &&
1234                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1235                         return;
1236
1237                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1238                 hdev->scan_rsp_data_len = len;
1239
1240                 cp.length = len;
1241
1242                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1243         }
1244 }
1245
1246 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1247 {
1248         struct adv_info *adv_instance = NULL;
1249         u8 ad_len = 0, flags = 0;
1250         u32 instance_flags;
1251
1252         /* Return 0 when the current instance identifier is invalid. */
1253         if (instance) {
1254                 adv_instance = hci_find_adv_instance(hdev, instance);
1255                 if (!adv_instance)
1256                         return 0;
1257         }
1258
1259         instance_flags = get_adv_instance_flags(hdev, instance);
1260
1261         /* The Add Advertising command allows userspace to set both the general
1262          * and limited discoverable flags.
1263          */
1264         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1265                 flags |= LE_AD_GENERAL;
1266
1267         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1268                 flags |= LE_AD_LIMITED;
1269
1270         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1271                 flags |= LE_AD_NO_BREDR;
1272
1273         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1274                 /* If a discovery flag wasn't provided, simply use the global
1275                  * settings.
1276                  */
1277                 if (!flags)
1278                         flags |= mgmt_get_adv_discov_flags(hdev);
1279
1280                 /* If flags would still be empty, then there is no need to
1281                  * include the "Flags" AD field".
1282                  */
1283                 if (flags) {
1284                         ptr[0] = 0x02;
1285                         ptr[1] = EIR_FLAGS;
1286                         ptr[2] = flags;
1287
1288                         ad_len += 3;
1289                         ptr += 3;
1290                 }
1291         }
1292
1293         if (adv_instance) {
1294                 memcpy(ptr, adv_instance->adv_data,
1295                        adv_instance->adv_data_len);
1296                 ad_len += adv_instance->adv_data_len;
1297                 ptr += adv_instance->adv_data_len;
1298         }
1299
1300         if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1301                 s8 adv_tx_power;
1302
1303                 if (ext_adv_capable(hdev)) {
1304                         if (adv_instance)
1305                                 adv_tx_power = adv_instance->tx_power;
1306                         else
1307                                 adv_tx_power = hdev->adv_tx_power;
1308                 } else {
1309                         adv_tx_power = hdev->adv_tx_power;
1310                 }
1311
1312                 /* Provide Tx Power only if we can provide a valid value for it */
1313                 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1314                         ptr[0] = 0x02;
1315                         ptr[1] = EIR_TX_POWER;
1316                         ptr[2] = (u8)adv_tx_power;
1317
1318                         ad_len += 3;
1319                         ptr += 3;
1320                 }
1321         }
1322
1323         return ad_len;
1324 }
1325
1326 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1327 {
1328         struct hci_dev *hdev = req->hdev;
1329         u8 len;
1330
1331         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1332                 return;
1333
1334         if (ext_adv_capable(hdev)) {
1335                 struct hci_cp_le_set_ext_adv_data cp;
1336
1337                 memset(&cp, 0, sizeof(cp));
1338
1339                 len = create_instance_adv_data(hdev, instance, cp.data);
1340
1341                 /* There's nothing to do if the data hasn't changed */
1342                 if (hdev->adv_data_len == len &&
1343                     memcmp(cp.data, hdev->adv_data, len) == 0)
1344                         return;
1345
1346                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1347                 hdev->adv_data_len = len;
1348
1349                 cp.length = len;
1350                 cp.handle = 0;
1351                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1352                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1353
1354                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1355         } else {
1356                 struct hci_cp_le_set_adv_data cp;
1357
1358                 memset(&cp, 0, sizeof(cp));
1359
1360                 len = create_instance_adv_data(hdev, instance, cp.data);
1361
1362                 /* There's nothing to do if the data hasn't changed */
1363                 if (hdev->adv_data_len == len &&
1364                     memcmp(cp.data, hdev->adv_data, len) == 0)
1365                         return;
1366
1367                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1368                 hdev->adv_data_len = len;
1369
1370                 cp.length = len;
1371
1372                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1373         }
1374 }
1375
1376 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1377 {
1378         struct hci_request req;
1379
1380         hci_req_init(&req, hdev);
1381         __hci_req_update_adv_data(&req, instance);
1382
1383         return hci_req_run(&req, NULL);
1384 }
1385
1386 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1387 {
1388         BT_DBG("%s status %u", hdev->name, status);
1389 }
1390
1391 void hci_req_reenable_advertising(struct hci_dev *hdev)
1392 {
1393         struct hci_request req;
1394
1395         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1396             list_empty(&hdev->adv_instances))
1397                 return;
1398
1399         hci_req_init(&req, hdev);
1400
1401         if (hdev->cur_adv_instance) {
1402                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1403                                                 true);
1404         } else {
1405                 if (ext_adv_capable(hdev)) {
1406                         __hci_req_start_ext_adv(&req, 0x00);
1407                 } else {
1408                         __hci_req_update_adv_data(&req, 0x00);
1409                         __hci_req_update_scan_rsp_data(&req, 0x00);
1410                         __hci_req_enable_advertising(&req);
1411                 }
1412         }
1413
1414         hci_req_run(&req, adv_enable_complete);
1415 }
1416
1417 static void adv_timeout_expire(struct work_struct *work)
1418 {
1419         struct hci_dev *hdev = container_of(work, struct hci_dev,
1420                                             adv_instance_expire.work);
1421
1422         struct hci_request req;
1423         u8 instance;
1424
1425         BT_DBG("%s", hdev->name);
1426
1427         hci_dev_lock(hdev);
1428
1429         hdev->adv_instance_timeout = 0;
1430
1431         instance = hdev->cur_adv_instance;
1432         if (instance == 0x00)
1433                 goto unlock;
1434
1435         hci_req_init(&req, hdev);
1436
1437         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1438
1439         if (list_empty(&hdev->adv_instances))
1440                 __hci_req_disable_advertising(&req);
1441
1442         hci_req_run(&req, NULL);
1443
1444 unlock:
1445         hci_dev_unlock(hdev);
1446 }
1447
1448 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1449                            bool use_rpa, struct adv_info *adv_instance,
1450                            u8 *own_addr_type, bdaddr_t *rand_addr)
1451 {
1452         int err;
1453
1454         bacpy(rand_addr, BDADDR_ANY);
1455
1456         /* If privacy is enabled use a resolvable private address. If
1457          * current RPA has expired then generate a new one.
1458          */
1459         if (use_rpa) {
1460                 int to;
1461
1462                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1463
1464                 if (adv_instance) {
1465                         if (!adv_instance->rpa_expired &&
1466                             !bacmp(&adv_instance->random_addr, &hdev->rpa))
1467                                 return 0;
1468
1469                         adv_instance->rpa_expired = false;
1470                 } else {
1471                         if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1472                             !bacmp(&hdev->random_addr, &hdev->rpa))
1473                                 return 0;
1474                 }
1475
1476                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1477                 if (err < 0) {
1478                         BT_ERR("%s failed to generate new RPA", hdev->name);
1479                         return err;
1480                 }
1481
1482                 bacpy(rand_addr, &hdev->rpa);
1483
1484                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1485                 if (adv_instance)
1486                         queue_delayed_work(hdev->workqueue,
1487                                            &adv_instance->rpa_expired_cb, to);
1488                 else
1489                         queue_delayed_work(hdev->workqueue,
1490                                            &hdev->rpa_expired, to);
1491
1492                 return 0;
1493         }
1494
1495         /* In case of required privacy without resolvable private address,
1496          * use an non-resolvable private address. This is useful for
1497          * non-connectable advertising.
1498          */
1499         if (require_privacy) {
1500                 bdaddr_t nrpa;
1501
1502                 while (true) {
1503                         /* The non-resolvable private address is generated
1504                          * from random six bytes with the two most significant
1505                          * bits cleared.
1506                          */
1507                         get_random_bytes(&nrpa, 6);
1508                         nrpa.b[5] &= 0x3f;
1509
1510                         /* The non-resolvable private address shall not be
1511                          * equal to the public address.
1512                          */
1513                         if (bacmp(&hdev->bdaddr, &nrpa))
1514                                 break;
1515                 }
1516
1517                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1518                 bacpy(rand_addr, &nrpa);
1519
1520                 return 0;
1521         }
1522
1523         /* No privacy so use a public address. */
1524         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1525
1526         return 0;
1527 }
1528
1529 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1530 {
1531         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1532 }
1533
1534 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1535 {
1536         struct hci_cp_le_set_ext_adv_params cp;
1537         struct hci_dev *hdev = req->hdev;
1538         bool connectable;
1539         u32 flags;
1540         bdaddr_t random_addr;
1541         u8 own_addr_type;
1542         int err;
1543         struct adv_info *adv_instance;
1544         bool secondary_adv;
1545         /* In ext adv set param interval is 3 octets */
1546         const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1547
1548         if (instance > 0) {
1549                 adv_instance = hci_find_adv_instance(hdev, instance);
1550                 if (!adv_instance)
1551                         return -EINVAL;
1552         } else {
1553                 adv_instance = NULL;
1554         }
1555
1556         flags = get_adv_instance_flags(hdev, instance);
1557
1558         /* If the "connectable" instance flag was not set, then choose between
1559          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1560          */
1561         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1562                       mgmt_get_connectable(hdev);
1563
1564          if (!is_advertising_allowed(hdev, connectable))
1565                 return -EPERM;
1566
1567         /* Set require_privacy to true only when non-connectable
1568          * advertising is used. In that case it is fine to use a
1569          * non-resolvable private address.
1570          */
1571         err = hci_get_random_address(hdev, !connectable,
1572                                      adv_use_rpa(hdev, flags), adv_instance,
1573                                      &own_addr_type, &random_addr);
1574         if (err < 0)
1575                 return err;
1576
1577         memset(&cp, 0, sizeof(cp));
1578
1579         memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1580         memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1581
1582         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1583
1584         if (connectable) {
1585                 if (secondary_adv)
1586                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1587                 else
1588                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1589         } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1590                 if (secondary_adv)
1591                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1592                 else
1593                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1594         } else {
1595                 if (secondary_adv)
1596                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1597                 else
1598                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1599         }
1600
1601         cp.own_addr_type = own_addr_type;
1602         cp.channel_map = hdev->le_adv_channel_map;
1603         cp.tx_power = 127;
1604         cp.handle = 0;
1605
1606         if (flags & MGMT_ADV_FLAG_SEC_2M) {
1607                 cp.primary_phy = HCI_ADV_PHY_1M;
1608                 cp.secondary_phy = HCI_ADV_PHY_2M;
1609         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1610                 cp.primary_phy = HCI_ADV_PHY_CODED;
1611                 cp.secondary_phy = HCI_ADV_PHY_CODED;
1612         } else {
1613                 /* In all other cases use 1M */
1614                 cp.primary_phy = HCI_ADV_PHY_1M;
1615                 cp.secondary_phy = HCI_ADV_PHY_1M;
1616         }
1617
1618         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1619
1620         if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1621             bacmp(&random_addr, BDADDR_ANY)) {
1622                 struct hci_cp_le_set_adv_set_rand_addr cp;
1623
1624                 /* Check if random address need to be updated */
1625                 if (adv_instance) {
1626                         if (!bacmp(&random_addr, &adv_instance->random_addr))
1627                                 return 0;
1628                 } else {
1629                         if (!bacmp(&random_addr, &hdev->random_addr))
1630                                 return 0;
1631                 }
1632
1633                 memset(&cp, 0, sizeof(cp));
1634
1635                 cp.handle = 0;
1636                 bacpy(&cp.bdaddr, &random_addr);
1637
1638                 hci_req_add(req,
1639                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1640                             sizeof(cp), &cp);
1641         }
1642
1643         return 0;
1644 }
1645
1646 void __hci_req_enable_ext_advertising(struct hci_request *req)
1647 {
1648         struct hci_cp_le_set_ext_adv_enable *cp;
1649         struct hci_cp_ext_adv_set *adv_set;
1650         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1651
1652         cp = (void *) data;
1653         adv_set = (void *) cp->data;
1654
1655         memset(cp, 0, sizeof(*cp));
1656
1657         cp->enable = 0x01;
1658         cp->num_of_sets = 0x01;
1659
1660         memset(adv_set, 0, sizeof(*adv_set));
1661
1662         adv_set->handle = 0;
1663
1664         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1665                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1666                     data);
1667 }
1668
1669 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1670 {
1671         struct hci_dev *hdev = req->hdev;
1672         int err;
1673
1674         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1675                 __hci_req_disable_advertising(req);
1676
1677         err = __hci_req_setup_ext_adv_instance(req, instance);
1678         if (err < 0)
1679                 return err;
1680
1681         __hci_req_update_scan_rsp_data(req, instance);
1682         __hci_req_enable_ext_advertising(req);
1683
1684         return 0;
1685 }
1686
1687 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1688                                     bool force)
1689 {
1690         struct hci_dev *hdev = req->hdev;
1691         struct adv_info *adv_instance = NULL;
1692         u16 timeout;
1693
1694         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1695             list_empty(&hdev->adv_instances))
1696                 return -EPERM;
1697
1698         if (hdev->adv_instance_timeout)
1699                 return -EBUSY;
1700
1701         adv_instance = hci_find_adv_instance(hdev, instance);
1702         if (!adv_instance)
1703                 return -ENOENT;
1704
1705         /* A zero timeout means unlimited advertising. As long as there is
1706          * only one instance, duration should be ignored. We still set a timeout
1707          * in case further instances are being added later on.
1708          *
1709          * If the remaining lifetime of the instance is more than the duration
1710          * then the timeout corresponds to the duration, otherwise it will be
1711          * reduced to the remaining instance lifetime.
1712          */
1713         if (adv_instance->timeout == 0 ||
1714             adv_instance->duration <= adv_instance->remaining_time)
1715                 timeout = adv_instance->duration;
1716         else
1717                 timeout = adv_instance->remaining_time;
1718
1719         /* The remaining time is being reduced unless the instance is being
1720          * advertised without time limit.
1721          */
1722         if (adv_instance->timeout)
1723                 adv_instance->remaining_time =
1724                                 adv_instance->remaining_time - timeout;
1725
1726         hdev->adv_instance_timeout = timeout;
1727         queue_delayed_work(hdev->req_workqueue,
1728                            &hdev->adv_instance_expire,
1729                            msecs_to_jiffies(timeout * 1000));
1730
1731         /* If we're just re-scheduling the same instance again then do not
1732          * execute any HCI commands. This happens when a single instance is
1733          * being advertised.
1734          */
1735         if (!force && hdev->cur_adv_instance == instance &&
1736             hci_dev_test_flag(hdev, HCI_LE_ADV))
1737                 return 0;
1738
1739         hdev->cur_adv_instance = instance;
1740         if (ext_adv_capable(hdev)) {
1741                 __hci_req_start_ext_adv(req, instance);
1742         } else {
1743                 __hci_req_update_adv_data(req, instance);
1744                 __hci_req_update_scan_rsp_data(req, instance);
1745                 __hci_req_enable_advertising(req);
1746         }
1747
1748         return 0;
1749 }
1750
1751 static void cancel_adv_timeout(struct hci_dev *hdev)
1752 {
1753         if (hdev->adv_instance_timeout) {
1754                 hdev->adv_instance_timeout = 0;
1755                 cancel_delayed_work(&hdev->adv_instance_expire);
1756         }
1757 }
1758
1759 /* For a single instance:
1760  * - force == true: The instance will be removed even when its remaining
1761  *   lifetime is not zero.
1762  * - force == false: the instance will be deactivated but kept stored unless
1763  *   the remaining lifetime is zero.
1764  *
1765  * For instance == 0x00:
1766  * - force == true: All instances will be removed regardless of their timeout
1767  *   setting.
1768  * - force == false: Only instances that have a timeout will be removed.
1769  */
1770 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1771                                 struct hci_request *req, u8 instance,
1772                                 bool force)
1773 {
1774         struct adv_info *adv_instance, *n, *next_instance = NULL;
1775         int err;
1776         u8 rem_inst;
1777
1778         /* Cancel any timeout concerning the removed instance(s). */
1779         if (!instance || hdev->cur_adv_instance == instance)
1780                 cancel_adv_timeout(hdev);
1781
1782         /* Get the next instance to advertise BEFORE we remove
1783          * the current one. This can be the same instance again
1784          * if there is only one instance.
1785          */
1786         if (instance && hdev->cur_adv_instance == instance)
1787                 next_instance = hci_get_next_instance(hdev, instance);
1788
1789         if (instance == 0x00) {
1790                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1791                                          list) {
1792                         if (!(force || adv_instance->timeout))
1793                                 continue;
1794
1795                         rem_inst = adv_instance->instance;
1796                         err = hci_remove_adv_instance(hdev, rem_inst);
1797                         if (!err)
1798                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1799                 }
1800         } else {
1801                 adv_instance = hci_find_adv_instance(hdev, instance);
1802
1803                 if (force || (adv_instance && adv_instance->timeout &&
1804                               !adv_instance->remaining_time)) {
1805                         /* Don't advertise a removed instance. */
1806                         if (next_instance &&
1807                             next_instance->instance == instance)
1808                                 next_instance = NULL;
1809
1810                         err = hci_remove_adv_instance(hdev, instance);
1811                         if (!err)
1812                                 mgmt_advertising_removed(sk, hdev, instance);
1813                 }
1814         }
1815
1816         if (!req || !hdev_is_powered(hdev) ||
1817             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1818                 return;
1819
1820         if (next_instance)
1821                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1822                                                 false);
1823 }
1824
1825 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1826 {
1827         struct hci_dev *hdev = req->hdev;
1828
1829         /* If we're advertising or initiating an LE connection we can't
1830          * go ahead and change the random address at this time. This is
1831          * because the eventual initiator address used for the
1832          * subsequently created connection will be undefined (some
1833          * controllers use the new address and others the one we had
1834          * when the operation started).
1835          *
1836          * In this kind of scenario skip the update and let the random
1837          * address be updated at the next cycle.
1838          */
1839         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1840             hci_lookup_le_connect(hdev)) {
1841                 BT_DBG("Deferring random address update");
1842                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1843                 return;
1844         }
1845
1846         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1847 }
1848
1849 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1850                               bool use_rpa, u8 *own_addr_type)
1851 {
1852         struct hci_dev *hdev = req->hdev;
1853         int err;
1854
1855         /* If privacy is enabled use a resolvable private address. If
1856          * current RPA has expired or there is something else than
1857          * the current RPA in use, then generate a new one.
1858          */
1859         if (use_rpa) {
1860                 int to;
1861
1862                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1863
1864                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1865                     !bacmp(&hdev->random_addr, &hdev->rpa))
1866                         return 0;
1867
1868                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1869                 if (err < 0) {
1870                         bt_dev_err(hdev, "failed to generate new RPA");
1871                         return err;
1872                 }
1873
1874                 set_random_addr(req, &hdev->rpa);
1875
1876                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1877                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1878
1879                 return 0;
1880         }
1881
1882         /* In case of required privacy without resolvable private address,
1883          * use an non-resolvable private address. This is useful for active
1884          * scanning and non-connectable advertising.
1885          */
1886         if (require_privacy) {
1887                 bdaddr_t nrpa;
1888
1889                 while (true) {
1890                         /* The non-resolvable private address is generated
1891                          * from random six bytes with the two most significant
1892                          * bits cleared.
1893                          */
1894                         get_random_bytes(&nrpa, 6);
1895                         nrpa.b[5] &= 0x3f;
1896
1897                         /* The non-resolvable private address shall not be
1898                          * equal to the public address.
1899                          */
1900                         if (bacmp(&hdev->bdaddr, &nrpa))
1901                                 break;
1902                 }
1903
1904                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1905                 set_random_addr(req, &nrpa);
1906                 return 0;
1907         }
1908
1909         /* If forcing static address is in use or there is no public
1910          * address use the static address as random address (but skip
1911          * the HCI command if the current random address is already the
1912          * static one.
1913          *
1914          * In case BR/EDR has been disabled on a dual-mode controller
1915          * and a static address has been configured, then use that
1916          * address instead of the public BR/EDR address.
1917          */
1918         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1919             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1920             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1921              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1922                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1923                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1924                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1925                                     &hdev->static_addr);
1926                 return 0;
1927         }
1928
1929         /* Neither privacy nor static address is being used so use a
1930          * public address.
1931          */
1932         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1933
1934         return 0;
1935 }
1936
1937 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1938 {
1939         struct bdaddr_list *b;
1940
1941         list_for_each_entry(b, &hdev->whitelist, list) {
1942                 struct hci_conn *conn;
1943
1944                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1945                 if (!conn)
1946                         return true;
1947
1948                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1949                         return true;
1950         }
1951
1952         return false;
1953 }
1954
1955 void __hci_req_update_scan(struct hci_request *req)
1956 {
1957         struct hci_dev *hdev = req->hdev;
1958         u8 scan;
1959
1960         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1961                 return;
1962
1963         if (!hdev_is_powered(hdev))
1964                 return;
1965
1966         if (mgmt_powering_down(hdev))
1967                 return;
1968
1969         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1970             disconnected_whitelist_entries(hdev))
1971                 scan = SCAN_PAGE;
1972         else
1973                 scan = SCAN_DISABLED;
1974
1975         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1976                 scan |= SCAN_INQUIRY;
1977
1978         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1979             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1980                 return;
1981
1982         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1983 }
1984
1985 static int update_scan(struct hci_request *req, unsigned long opt)
1986 {
1987         hci_dev_lock(req->hdev);
1988         __hci_req_update_scan(req);
1989         hci_dev_unlock(req->hdev);
1990         return 0;
1991 }
1992
1993 static void scan_update_work(struct work_struct *work)
1994 {
1995         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1996
1997         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1998 }
1999
2000 static int connectable_update(struct hci_request *req, unsigned long opt)
2001 {
2002         struct hci_dev *hdev = req->hdev;
2003
2004         hci_dev_lock(hdev);
2005
2006         __hci_req_update_scan(req);
2007
2008         /* If BR/EDR is not enabled and we disable advertising as a
2009          * by-product of disabling connectable, we need to update the
2010          * advertising flags.
2011          */
2012         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2013                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2014
2015         /* Update the advertising parameters if necessary */
2016         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2017             !list_empty(&hdev->adv_instances)) {
2018                 if (ext_adv_capable(hdev))
2019                         __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2020                 else
2021                         __hci_req_enable_advertising(req);
2022         }
2023
2024         __hci_update_background_scan(req);
2025
2026         hci_dev_unlock(hdev);
2027
2028         return 0;
2029 }
2030
2031 static void connectable_update_work(struct work_struct *work)
2032 {
2033         struct hci_dev *hdev = container_of(work, struct hci_dev,
2034                                             connectable_update);
2035         u8 status;
2036
2037         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2038         mgmt_set_connectable_complete(hdev, status);
2039 }
2040
2041 static u8 get_service_classes(struct hci_dev *hdev)
2042 {
2043         struct bt_uuid *uuid;
2044         u8 val = 0;
2045
2046         list_for_each_entry(uuid, &hdev->uuids, list)
2047                 val |= uuid->svc_hint;
2048
2049         return val;
2050 }
2051
2052 void __hci_req_update_class(struct hci_request *req)
2053 {
2054         struct hci_dev *hdev = req->hdev;
2055         u8 cod[3];
2056
2057         BT_DBG("%s", hdev->name);
2058
2059         if (!hdev_is_powered(hdev))
2060                 return;
2061
2062         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2063                 return;
2064
2065         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2066                 return;
2067
2068         cod[0] = hdev->minor_class;
2069         cod[1] = hdev->major_class;
2070         cod[2] = get_service_classes(hdev);
2071
2072         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2073                 cod[1] |= 0x20;
2074
2075         if (memcmp(cod, hdev->dev_class, 3) == 0)
2076                 return;
2077
2078         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2079 }
2080
2081 static void write_iac(struct hci_request *req)
2082 {
2083         struct hci_dev *hdev = req->hdev;
2084         struct hci_cp_write_current_iac_lap cp;
2085
2086         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2087                 return;
2088
2089         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2090                 /* Limited discoverable mode */
2091                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2092                 cp.iac_lap[0] = 0x00;   /* LIAC */
2093                 cp.iac_lap[1] = 0x8b;
2094                 cp.iac_lap[2] = 0x9e;
2095                 cp.iac_lap[3] = 0x33;   /* GIAC */
2096                 cp.iac_lap[4] = 0x8b;
2097                 cp.iac_lap[5] = 0x9e;
2098         } else {
2099                 /* General discoverable mode */
2100                 cp.num_iac = 1;
2101                 cp.iac_lap[0] = 0x33;   /* GIAC */
2102                 cp.iac_lap[1] = 0x8b;
2103                 cp.iac_lap[2] = 0x9e;
2104         }
2105
2106         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2107                     (cp.num_iac * 3) + 1, &cp);
2108 }
2109
2110 static int discoverable_update(struct hci_request *req, unsigned long opt)
2111 {
2112         struct hci_dev *hdev = req->hdev;
2113
2114         hci_dev_lock(hdev);
2115
2116         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2117                 write_iac(req);
2118                 __hci_req_update_scan(req);
2119                 __hci_req_update_class(req);
2120         }
2121
2122         /* Advertising instances don't use the global discoverable setting, so
2123          * only update AD if advertising was enabled using Set Advertising.
2124          */
2125         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2126                 __hci_req_update_adv_data(req, 0x00);
2127
2128                 /* Discoverable mode affects the local advertising
2129                  * address in limited privacy mode.
2130                  */
2131                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2132                         if (ext_adv_capable(hdev))
2133                                 __hci_req_start_ext_adv(req, 0x00);
2134                         else
2135                                 __hci_req_enable_advertising(req);
2136                 }
2137         }
2138
2139         hci_dev_unlock(hdev);
2140
2141         return 0;
2142 }
2143
2144 static void discoverable_update_work(struct work_struct *work)
2145 {
2146         struct hci_dev *hdev = container_of(work, struct hci_dev,
2147                                             discoverable_update);
2148         u8 status;
2149
2150         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2151         mgmt_set_discoverable_complete(hdev, status);
2152 }
2153
2154 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2155                       u8 reason)
2156 {
2157         switch (conn->state) {
2158         case BT_CONNECTED:
2159         case BT_CONFIG:
2160                 if (conn->type == AMP_LINK) {
2161                         struct hci_cp_disconn_phy_link cp;
2162
2163                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2164                         cp.reason = reason;
2165                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2166                                     &cp);
2167                 } else {
2168                         struct hci_cp_disconnect dc;
2169
2170                         dc.handle = cpu_to_le16(conn->handle);
2171                         dc.reason = reason;
2172                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2173                 }
2174
2175                 conn->state = BT_DISCONN;
2176
2177                 break;
2178         case BT_CONNECT:
2179                 if (conn->type == LE_LINK) {
2180                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2181                                 break;
2182                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2183                                     0, NULL);
2184                 } else if (conn->type == ACL_LINK) {
2185                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2186                                 break;
2187                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2188                                     6, &conn->dst);
2189                 }
2190                 break;
2191         case BT_CONNECT2:
2192                 if (conn->type == ACL_LINK) {
2193                         struct hci_cp_reject_conn_req rej;
2194
2195                         bacpy(&rej.bdaddr, &conn->dst);
2196                         rej.reason = reason;
2197
2198                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2199                                     sizeof(rej), &rej);
2200                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2201                         struct hci_cp_reject_sync_conn_req rej;
2202
2203                         bacpy(&rej.bdaddr, &conn->dst);
2204
2205                         /* SCO rejection has its own limited set of
2206                          * allowed error values (0x0D-0x0F) which isn't
2207                          * compatible with most values passed to this
2208                          * function. To be safe hard-code one of the
2209                          * values that's suitable for SCO.
2210                          */
2211                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2212
2213                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2214                                     sizeof(rej), &rej);
2215                 }
2216                 break;
2217         default:
2218                 conn->state = BT_CLOSED;
2219                 break;
2220         }
2221 }
2222
2223 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2224 {
2225         if (status)
2226                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2227 }
2228
2229 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2230 {
2231         struct hci_request req;
2232         int err;
2233
2234         hci_req_init(&req, conn->hdev);
2235
2236         __hci_abort_conn(&req, conn, reason);
2237
2238         err = hci_req_run(&req, abort_conn_complete);
2239         if (err && err != -ENODATA) {
2240                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2241                 return err;
2242         }
2243
2244         return 0;
2245 }
2246
2247 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2248 {
2249         hci_dev_lock(req->hdev);
2250         __hci_update_background_scan(req);
2251         hci_dev_unlock(req->hdev);
2252         return 0;
2253 }
2254
2255 static void bg_scan_update(struct work_struct *work)
2256 {
2257         struct hci_dev *hdev = container_of(work, struct hci_dev,
2258                                             bg_scan_update);
2259         struct hci_conn *conn;
2260         u8 status;
2261         int err;
2262
2263         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2264         if (!err)
2265                 return;
2266
2267         hci_dev_lock(hdev);
2268
2269         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2270         if (conn)
2271                 hci_le_conn_failed(conn, status);
2272
2273         hci_dev_unlock(hdev);
2274 }
2275
2276 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2277 {
2278         hci_req_add_le_scan_disable(req);
2279         return 0;
2280 }
2281
2282 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2283 {
2284         u8 length = opt;
2285         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2286         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2287         struct hci_cp_inquiry cp;
2288
2289         BT_DBG("%s", req->hdev->name);
2290
2291         hci_dev_lock(req->hdev);
2292         hci_inquiry_cache_flush(req->hdev);
2293         hci_dev_unlock(req->hdev);
2294
2295         memset(&cp, 0, sizeof(cp));
2296
2297         if (req->hdev->discovery.limited)
2298                 memcpy(&cp.lap, liac, sizeof(cp.lap));
2299         else
2300                 memcpy(&cp.lap, giac, sizeof(cp.lap));
2301
2302         cp.length = length;
2303
2304         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2305
2306         return 0;
2307 }
2308
2309 static void le_scan_disable_work(struct work_struct *work)
2310 {
2311         struct hci_dev *hdev = container_of(work, struct hci_dev,
2312                                             le_scan_disable.work);
2313         u8 status;
2314
2315         BT_DBG("%s", hdev->name);
2316
2317         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2318                 return;
2319
2320         cancel_delayed_work(&hdev->le_scan_restart);
2321
2322         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2323         if (status) {
2324                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2325                            status);
2326                 return;
2327         }
2328
2329         hdev->discovery.scan_start = 0;
2330
2331         /* If we were running LE only scan, change discovery state. If
2332          * we were running both LE and BR/EDR inquiry simultaneously,
2333          * and BR/EDR inquiry is already finished, stop discovery,
2334          * otherwise BR/EDR inquiry will stop discovery when finished.
2335          * If we will resolve remote device name, do not change
2336          * discovery state.
2337          */
2338
2339         if (hdev->discovery.type == DISCOV_TYPE_LE)
2340                 goto discov_stopped;
2341
2342         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2343                 return;
2344
2345         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2346                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2347                     hdev->discovery.state != DISCOVERY_RESOLVING)
2348                         goto discov_stopped;
2349
2350                 return;
2351         }
2352
2353         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2354                      HCI_CMD_TIMEOUT, &status);
2355         if (status) {
2356                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2357                 goto discov_stopped;
2358         }
2359
2360         return;
2361
2362 discov_stopped:
2363         hci_dev_lock(hdev);
2364         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2365         hci_dev_unlock(hdev);
2366 }
2367
2368 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2369 {
2370         struct hci_dev *hdev = req->hdev;
2371
2372         /* If controller is not scanning we are done. */
2373         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2374                 return 0;
2375
2376         hci_req_add_le_scan_disable(req);
2377
2378         if (use_ext_scan(hdev)) {
2379                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2380
2381                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2382                 ext_enable_cp.enable = LE_SCAN_ENABLE;
2383                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2384
2385                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2386                             sizeof(ext_enable_cp), &ext_enable_cp);
2387         } else {
2388                 struct hci_cp_le_set_scan_enable cp;
2389
2390                 memset(&cp, 0, sizeof(cp));
2391                 cp.enable = LE_SCAN_ENABLE;
2392                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2393                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2394         }
2395
2396         return 0;
2397 }
2398
2399 static void le_scan_restart_work(struct work_struct *work)
2400 {
2401         struct hci_dev *hdev = container_of(work, struct hci_dev,
2402                                             le_scan_restart.work);
2403         unsigned long timeout, duration, scan_start, now;
2404         u8 status;
2405
2406         BT_DBG("%s", hdev->name);
2407
2408         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2409         if (status) {
2410                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2411                            status);
2412                 return;
2413         }
2414
2415         hci_dev_lock(hdev);
2416
2417         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2418             !hdev->discovery.scan_start)
2419                 goto unlock;
2420
2421         /* When the scan was started, hdev->le_scan_disable has been queued
2422          * after duration from scan_start. During scan restart this job
2423          * has been canceled, and we need to queue it again after proper
2424          * timeout, to make sure that scan does not run indefinitely.
2425          */
2426         duration = hdev->discovery.scan_duration;
2427         scan_start = hdev->discovery.scan_start;
2428         now = jiffies;
2429         if (now - scan_start <= duration) {
2430                 int elapsed;
2431
2432                 if (now >= scan_start)
2433                         elapsed = now - scan_start;
2434                 else
2435                         elapsed = ULONG_MAX - scan_start + now;
2436
2437                 timeout = duration - elapsed;
2438         } else {
2439                 timeout = 0;
2440         }
2441
2442         queue_delayed_work(hdev->req_workqueue,
2443                            &hdev->le_scan_disable, timeout);
2444
2445 unlock:
2446         hci_dev_unlock(hdev);
2447 }
2448
2449 static int active_scan(struct hci_request *req, unsigned long opt)
2450 {
2451         uint16_t interval = opt;
2452         struct hci_dev *hdev = req->hdev;
2453         u8 own_addr_type;
2454         int err;
2455
2456         BT_DBG("%s", hdev->name);
2457
2458         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2459                 hci_dev_lock(hdev);
2460
2461                 /* Don't let discovery abort an outgoing connection attempt
2462                  * that's using directed advertising.
2463                  */
2464                 if (hci_lookup_le_connect(hdev)) {
2465                         hci_dev_unlock(hdev);
2466                         return -EBUSY;
2467                 }
2468
2469                 cancel_adv_timeout(hdev);
2470                 hci_dev_unlock(hdev);
2471
2472                 __hci_req_disable_advertising(req);
2473         }
2474
2475         /* If controller is scanning, it means the background scanning is
2476          * running. Thus, we should temporarily stop it in order to set the
2477          * discovery scanning parameters.
2478          */
2479         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2480                 hci_req_add_le_scan_disable(req);
2481
2482         /* All active scans will be done with either a resolvable private
2483          * address (when privacy feature has been enabled) or non-resolvable
2484          * private address.
2485          */
2486         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2487                                         &own_addr_type);
2488         if (err < 0)
2489                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2490
2491         hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2492                            own_addr_type, 0);
2493         return 0;
2494 }
2495
2496 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2497 {
2498         int err;
2499
2500         BT_DBG("%s", req->hdev->name);
2501
2502         err = active_scan(req, opt);
2503         if (err)
2504                 return err;
2505
2506         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2507 }
2508
2509 static void start_discovery(struct hci_dev *hdev, u8 *status)
2510 {
2511         unsigned long timeout;
2512
2513         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2514
2515         switch (hdev->discovery.type) {
2516         case DISCOV_TYPE_BREDR:
2517                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2518                         hci_req_sync(hdev, bredr_inquiry,
2519                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2520                                      status);
2521                 return;
2522         case DISCOV_TYPE_INTERLEAVED:
2523                 /* When running simultaneous discovery, the LE scanning time
2524                  * should occupy the whole discovery time sine BR/EDR inquiry
2525                  * and LE scanning are scheduled by the controller.
2526                  *
2527                  * For interleaving discovery in comparison, BR/EDR inquiry
2528                  * and LE scanning are done sequentially with separate
2529                  * timeouts.
2530                  */
2531                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2532                              &hdev->quirks)) {
2533                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2534                         /* During simultaneous discovery, we double LE scan
2535                          * interval. We must leave some time for the controller
2536                          * to do BR/EDR inquiry.
2537                          */
2538                         hci_req_sync(hdev, interleaved_discov,
2539                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2540                                      status);
2541                         break;
2542                 }
2543
2544                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2545                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2546                              HCI_CMD_TIMEOUT, status);
2547                 break;
2548         case DISCOV_TYPE_LE:
2549                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2550                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2551                              HCI_CMD_TIMEOUT, status);
2552                 break;
2553         default:
2554                 *status = HCI_ERROR_UNSPECIFIED;
2555                 return;
2556         }
2557
2558         if (*status)
2559                 return;
2560
2561         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2562
2563         /* When service discovery is used and the controller has a
2564          * strict duplicate filter, it is important to remember the
2565          * start and duration of the scan. This is required for
2566          * restarting scanning during the discovery phase.
2567          */
2568         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2569                      hdev->discovery.result_filtering) {
2570                 hdev->discovery.scan_start = jiffies;
2571                 hdev->discovery.scan_duration = timeout;
2572         }
2573
2574         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2575                            timeout);
2576 }
2577
2578 bool hci_req_stop_discovery(struct hci_request *req)
2579 {
2580         struct hci_dev *hdev = req->hdev;
2581         struct discovery_state *d = &hdev->discovery;
2582         struct hci_cp_remote_name_req_cancel cp;
2583         struct inquiry_entry *e;
2584         bool ret = false;
2585
2586         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2587
2588         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2589                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2590                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2591
2592                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2593                         cancel_delayed_work(&hdev->le_scan_disable);
2594                         hci_req_add_le_scan_disable(req);
2595                 }
2596
2597                 ret = true;
2598         } else {
2599                 /* Passive scanning */
2600                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2601                         hci_req_add_le_scan_disable(req);
2602                         ret = true;
2603                 }
2604         }
2605
2606         /* No further actions needed for LE-only discovery */
2607         if (d->type == DISCOV_TYPE_LE)
2608                 return ret;
2609
2610         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2611                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2612                                                      NAME_PENDING);
2613                 if (!e)
2614                         return ret;
2615
2616                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2617                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2618                             &cp);
2619                 ret = true;
2620         }
2621
2622         return ret;
2623 }
2624
2625 static int stop_discovery(struct hci_request *req, unsigned long opt)
2626 {
2627         hci_dev_lock(req->hdev);
2628         hci_req_stop_discovery(req);
2629         hci_dev_unlock(req->hdev);
2630
2631         return 0;
2632 }
2633
2634 static void discov_update(struct work_struct *work)
2635 {
2636         struct hci_dev *hdev = container_of(work, struct hci_dev,
2637                                             discov_update);
2638         u8 status = 0;
2639
2640         switch (hdev->discovery.state) {
2641         case DISCOVERY_STARTING:
2642                 start_discovery(hdev, &status);
2643                 mgmt_start_discovery_complete(hdev, status);
2644                 if (status)
2645                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2646                 else
2647                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2648                 break;
2649         case DISCOVERY_STOPPING:
2650                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2651                 mgmt_stop_discovery_complete(hdev, status);
2652                 if (!status)
2653                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2654                 break;
2655         case DISCOVERY_STOPPED:
2656         default:
2657                 return;
2658         }
2659 }
2660
2661 static void discov_off(struct work_struct *work)
2662 {
2663         struct hci_dev *hdev = container_of(work, struct hci_dev,
2664                                             discov_off.work);
2665
2666         BT_DBG("%s", hdev->name);
2667
2668         hci_dev_lock(hdev);
2669
2670         /* When discoverable timeout triggers, then just make sure
2671          * the limited discoverable flag is cleared. Even in the case
2672          * of a timeout triggered from general discoverable, it is
2673          * safe to unconditionally clear the flag.
2674          */
2675         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2676         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2677         hdev->discov_timeout = 0;
2678
2679         hci_dev_unlock(hdev);
2680
2681         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2682         mgmt_new_settings(hdev);
2683 }
2684
2685 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2686 {
2687         struct hci_dev *hdev = req->hdev;
2688         u8 link_sec;
2689
2690         hci_dev_lock(hdev);
2691
2692         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2693             !lmp_host_ssp_capable(hdev)) {
2694                 u8 mode = 0x01;
2695
2696                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2697
2698                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2699                         u8 support = 0x01;
2700
2701                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2702                                     sizeof(support), &support);
2703                 }
2704         }
2705
2706         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2707             lmp_bredr_capable(hdev)) {
2708                 struct hci_cp_write_le_host_supported cp;
2709
2710                 cp.le = 0x01;
2711                 cp.simul = 0x00;
2712
2713                 /* Check first if we already have the right
2714                  * host state (host features set)
2715                  */
2716                 if (cp.le != lmp_host_le_capable(hdev) ||
2717                     cp.simul != lmp_host_le_br_capable(hdev))
2718                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2719                                     sizeof(cp), &cp);
2720         }
2721
2722         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2723                 /* Make sure the controller has a good default for
2724                  * advertising data. This also applies to the case
2725                  * where BR/EDR was toggled during the AUTO_OFF phase.
2726                  */
2727                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2728                     list_empty(&hdev->adv_instances)) {
2729                         int err;
2730
2731                         if (ext_adv_capable(hdev)) {
2732                                 err = __hci_req_setup_ext_adv_instance(req,
2733                                                                        0x00);
2734                                 if (!err)
2735                                         __hci_req_update_scan_rsp_data(req,
2736                                                                        0x00);
2737                         } else {
2738                                 err = 0;
2739                                 __hci_req_update_adv_data(req, 0x00);
2740                                 __hci_req_update_scan_rsp_data(req, 0x00);
2741                         }
2742
2743                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2744                                 if (!ext_adv_capable(hdev))
2745                                         __hci_req_enable_advertising(req);
2746                                 else if (!err)
2747                                         __hci_req_enable_ext_advertising(req);
2748                         }
2749                 } else if (!list_empty(&hdev->adv_instances)) {
2750                         struct adv_info *adv_instance;
2751
2752                         adv_instance = list_first_entry(&hdev->adv_instances,
2753                                                         struct adv_info, list);
2754                         __hci_req_schedule_adv_instance(req,
2755                                                         adv_instance->instance,
2756                                                         true);
2757                 }
2758         }
2759
2760         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2761         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2762                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2763                             sizeof(link_sec), &link_sec);
2764
2765         if (lmp_bredr_capable(hdev)) {
2766                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2767                         __hci_req_write_fast_connectable(req, true);
2768                 else
2769                         __hci_req_write_fast_connectable(req, false);
2770                 __hci_req_update_scan(req);
2771                 __hci_req_update_class(req);
2772                 __hci_req_update_name(req);
2773                 __hci_req_update_eir(req);
2774         }
2775
2776         hci_dev_unlock(hdev);
2777         return 0;
2778 }
2779
2780 int __hci_req_hci_power_on(struct hci_dev *hdev)
2781 {
2782         /* Register the available SMP channels (BR/EDR and LE) only when
2783          * successfully powering on the controller. This late
2784          * registration is required so that LE SMP can clearly decide if
2785          * the public address or static address is used.
2786          */
2787         smp_register(hdev);
2788
2789         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2790                               NULL);
2791 }
2792
2793 void hci_request_setup(struct hci_dev *hdev)
2794 {
2795         INIT_WORK(&hdev->discov_update, discov_update);
2796         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2797         INIT_WORK(&hdev->scan_update, scan_update_work);
2798         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2799         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2800         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2801         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2802         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2803         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2804 }
2805
2806 void hci_request_cancel_all(struct hci_dev *hdev)
2807 {
2808         hci_req_sync_cancel(hdev, ENODEV);
2809
2810         cancel_work_sync(&hdev->discov_update);
2811         cancel_work_sync(&hdev->bg_scan_update);
2812         cancel_work_sync(&hdev->scan_update);
2813         cancel_work_sync(&hdev->connectable_update);
2814         cancel_work_sync(&hdev->discoverable_update);
2815         cancel_delayed_work_sync(&hdev->discov_off);
2816         cancel_delayed_work_sync(&hdev->le_scan_disable);
2817         cancel_delayed_work_sync(&hdev->le_scan_restart);
2818
2819         if (hdev->adv_instance_timeout) {
2820                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2821                 hdev->adv_instance_timeout = 0;
2822         }
2823 }