Bluetooth: Add BT LE discovery feature
[platform/kernel/linux-starfive.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
67 {
68         __u8 scan = opt;
69
70         BT_DBG("%s %x", req->hdev->name, scan);
71
72         /* Inquiry and Page scans */
73         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74         return 0;
75 }
76
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
78 {
79         __u8 auth = opt;
80
81         BT_DBG("%s %x", req->hdev->name, auth);
82
83         /* Authentication */
84         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85         return 0;
86 }
87
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89 {
90         __u8 encrypt = opt;
91
92         BT_DBG("%s %x", req->hdev->name, encrypt);
93
94         /* Encryption */
95         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96         return 0;
97 }
98
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 {
101         __le16 policy = cpu_to_le16(opt);
102
103         BT_DBG("%s %x", req->hdev->name, policy);
104
105         /* Default link policy */
106         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107         return 0;
108 }
109
110 /* Get HCI device by index.
111  * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
113 {
114         struct hci_dev *hdev = NULL, *d;
115
116         BT_DBG("%d", index);
117
118         if (index < 0)
119                 return NULL;
120
121         read_lock(&hci_dev_list_lock);
122         list_for_each_entry(d, &hci_dev_list, list) {
123                 if (d->id == index) {
124                         hdev = hci_dev_hold(d);
125                         break;
126                 }
127         }
128         read_unlock(&hci_dev_list_lock);
129         return hdev;
130 }
131
132 /* ---- Inquiry support ---- */
133
134 bool hci_discovery_active(struct hci_dev *hdev)
135 {
136         struct discovery_state *discov = &hdev->discovery;
137
138         switch (discov->state) {
139         case DISCOVERY_FINDING:
140         case DISCOVERY_RESOLVING:
141                 return true;
142
143         default:
144                 return false;
145         }
146 }
147
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 {
150         int old_state = hdev->discovery.state;
151
152         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153
154         if (old_state == state)
155                 return;
156
157         hdev->discovery.state = state;
158
159         switch (state) {
160         case DISCOVERY_STOPPED:
161                 hci_update_passive_scan(hdev);
162
163                 if (old_state != DISCOVERY_STARTING)
164                         mgmt_discovering(hdev, 0);
165                 break;
166         case DISCOVERY_STARTING:
167                 break;
168         case DISCOVERY_FINDING:
169                 mgmt_discovering(hdev, 1);
170                 break;
171         case DISCOVERY_RESOLVING:
172                 break;
173         case DISCOVERY_STOPPING:
174                 break;
175         }
176 }
177
178 #ifdef TIZEN_BT
179 bool hci_le_discovery_active(struct hci_dev *hdev)
180 {
181         struct discovery_state *discov = &hdev->le_discovery;
182
183         switch (discov->state) {
184         case DISCOVERY_FINDING:
185         case DISCOVERY_RESOLVING:
186                 return true;
187
188         default:
189                 return false;
190         }
191 }
192
193 void hci_le_discovery_set_state(struct hci_dev *hdev, int state)
194 {
195         BT_DBG("%s state %u -> %u", hdev->name,
196                         hdev->le_discovery.state, state);
197
198         if (hdev->le_discovery.state == state)
199                 return;
200
201         switch (state) {
202         case DISCOVERY_STOPPED:
203                 hci_update_passive_scan(hdev);
204
205                 if (hdev->le_discovery.state != DISCOVERY_STARTING)
206                         mgmt_le_discovering(hdev, 0);
207                 break;
208         case DISCOVERY_STARTING:
209                 break;
210         case DISCOVERY_FINDING:
211                 mgmt_le_discovering(hdev, 1);
212                 break;
213         case DISCOVERY_RESOLVING:
214                 break;
215         case DISCOVERY_STOPPING:
216                 break;
217         }
218
219         hdev->le_discovery.state = state;
220 }
221 #endif
222
223 void hci_inquiry_cache_flush(struct hci_dev *hdev)
224 {
225         struct discovery_state *cache = &hdev->discovery;
226         struct inquiry_entry *p, *n;
227
228         list_for_each_entry_safe(p, n, &cache->all, all) {
229                 list_del(&p->all);
230                 kfree(p);
231         }
232
233         INIT_LIST_HEAD(&cache->unknown);
234         INIT_LIST_HEAD(&cache->resolve);
235 }
236
237 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
238                                                bdaddr_t *bdaddr)
239 {
240         struct discovery_state *cache = &hdev->discovery;
241         struct inquiry_entry *e;
242
243         BT_DBG("cache %p, %pMR", cache, bdaddr);
244
245         list_for_each_entry(e, &cache->all, all) {
246                 if (!bacmp(&e->data.bdaddr, bdaddr))
247                         return e;
248         }
249
250         return NULL;
251 }
252
253 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
254                                                        bdaddr_t *bdaddr)
255 {
256         struct discovery_state *cache = &hdev->discovery;
257         struct inquiry_entry *e;
258
259         BT_DBG("cache %p, %pMR", cache, bdaddr);
260
261         list_for_each_entry(e, &cache->unknown, list) {
262                 if (!bacmp(&e->data.bdaddr, bdaddr))
263                         return e;
264         }
265
266         return NULL;
267 }
268
269 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
270                                                        bdaddr_t *bdaddr,
271                                                        int state)
272 {
273         struct discovery_state *cache = &hdev->discovery;
274         struct inquiry_entry *e;
275
276         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
277
278         list_for_each_entry(e, &cache->resolve, list) {
279                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
280                         return e;
281                 if (!bacmp(&e->data.bdaddr, bdaddr))
282                         return e;
283         }
284
285         return NULL;
286 }
287
288 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
289                                       struct inquiry_entry *ie)
290 {
291         struct discovery_state *cache = &hdev->discovery;
292         struct list_head *pos = &cache->resolve;
293         struct inquiry_entry *p;
294
295         list_del(&ie->list);
296
297         list_for_each_entry(p, &cache->resolve, list) {
298                 if (p->name_state != NAME_PENDING &&
299                     abs(p->data.rssi) >= abs(ie->data.rssi))
300                         break;
301                 pos = &p->list;
302         }
303
304         list_add(&ie->list, pos);
305 }
306
307 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
308                              bool name_known)
309 {
310         struct discovery_state *cache = &hdev->discovery;
311         struct inquiry_entry *ie;
312         u32 flags = 0;
313
314         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
315
316         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
317
318         if (!data->ssp_mode)
319                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
320
321         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
322         if (ie) {
323                 if (!ie->data.ssp_mode)
324                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
325
326                 if (ie->name_state == NAME_NEEDED &&
327                     data->rssi != ie->data.rssi) {
328                         ie->data.rssi = data->rssi;
329                         hci_inquiry_cache_update_resolve(hdev, ie);
330                 }
331
332                 goto update;
333         }
334
335         /* Entry not in the cache. Add new one. */
336         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
337         if (!ie) {
338                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
339                 goto done;
340         }
341
342         list_add(&ie->all, &cache->all);
343
344         if (name_known) {
345                 ie->name_state = NAME_KNOWN;
346         } else {
347                 ie->name_state = NAME_NOT_KNOWN;
348                 list_add(&ie->list, &cache->unknown);
349         }
350
351 update:
352         if (name_known && ie->name_state != NAME_KNOWN &&
353             ie->name_state != NAME_PENDING) {
354                 ie->name_state = NAME_KNOWN;
355                 list_del(&ie->list);
356         }
357
358         memcpy(&ie->data, data, sizeof(*data));
359         ie->timestamp = jiffies;
360         cache->timestamp = jiffies;
361
362         if (ie->name_state == NAME_NOT_KNOWN)
363                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
364
365 done:
366         return flags;
367 }
368
369 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
370 {
371         struct discovery_state *cache = &hdev->discovery;
372         struct inquiry_info *info = (struct inquiry_info *) buf;
373         struct inquiry_entry *e;
374         int copied = 0;
375
376         list_for_each_entry(e, &cache->all, all) {
377                 struct inquiry_data *data = &e->data;
378
379                 if (copied >= num)
380                         break;
381
382                 bacpy(&info->bdaddr, &data->bdaddr);
383                 info->pscan_rep_mode    = data->pscan_rep_mode;
384                 info->pscan_period_mode = data->pscan_period_mode;
385                 info->pscan_mode        = data->pscan_mode;
386                 memcpy(info->dev_class, data->dev_class, 3);
387                 info->clock_offset      = data->clock_offset;
388
389                 info++;
390                 copied++;
391         }
392
393         BT_DBG("cache %p, copied %d", cache, copied);
394         return copied;
395 }
396
397 static int hci_inq_req(struct hci_request *req, unsigned long opt)
398 {
399         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
400         struct hci_dev *hdev = req->hdev;
401         struct hci_cp_inquiry cp;
402
403         BT_DBG("%s", hdev->name);
404
405         if (test_bit(HCI_INQUIRY, &hdev->flags))
406                 return 0;
407
408         /* Start Inquiry */
409         memcpy(&cp.lap, &ir->lap, 3);
410         cp.length  = ir->length;
411         cp.num_rsp = ir->num_rsp;
412         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
413
414         return 0;
415 }
416
417 int hci_inquiry(void __user *arg)
418 {
419         __u8 __user *ptr = arg;
420         struct hci_inquiry_req ir;
421         struct hci_dev *hdev;
422         int err = 0, do_inquiry = 0, max_rsp;
423         long timeo;
424         __u8 *buf;
425
426         if (copy_from_user(&ir, ptr, sizeof(ir)))
427                 return -EFAULT;
428
429         hdev = hci_dev_get(ir.dev_id);
430         if (!hdev)
431                 return -ENODEV;
432
433         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
434                 err = -EBUSY;
435                 goto done;
436         }
437
438         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
439                 err = -EOPNOTSUPP;
440                 goto done;
441         }
442
443         if (hdev->dev_type != HCI_PRIMARY) {
444                 err = -EOPNOTSUPP;
445                 goto done;
446         }
447
448         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
449                 err = -EOPNOTSUPP;
450                 goto done;
451         }
452
453         /* Restrict maximum inquiry length to 60 seconds */
454         if (ir.length > 60) {
455                 err = -EINVAL;
456                 goto done;
457         }
458
459         hci_dev_lock(hdev);
460         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
461             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
462                 hci_inquiry_cache_flush(hdev);
463                 do_inquiry = 1;
464         }
465         hci_dev_unlock(hdev);
466
467         timeo = ir.length * msecs_to_jiffies(2000);
468
469         if (do_inquiry) {
470                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
471                                    timeo, NULL);
472                 if (err < 0)
473                         goto done;
474
475                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
476                  * cleared). If it is interrupted by a signal, return -EINTR.
477                  */
478                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
479                                 TASK_INTERRUPTIBLE)) {
480                         err = -EINTR;
481                         goto done;
482                 }
483         }
484
485         /* for unlimited number of responses we will use buffer with
486          * 255 entries
487          */
488         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
489
490         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
491          * copy it to the user space.
492          */
493         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
494         if (!buf) {
495                 err = -ENOMEM;
496                 goto done;
497         }
498
499         hci_dev_lock(hdev);
500         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
501         hci_dev_unlock(hdev);
502
503         BT_DBG("num_rsp %d", ir.num_rsp);
504
505         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
506                 ptr += sizeof(ir);
507                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
508                                  ir.num_rsp))
509                         err = -EFAULT;
510         } else
511                 err = -EFAULT;
512
513         kfree(buf);
514
515 done:
516         hci_dev_put(hdev);
517         return err;
518 }
519
520 static int hci_dev_do_open(struct hci_dev *hdev)
521 {
522         int ret = 0;
523
524         BT_DBG("%s %p", hdev->name, hdev);
525
526         hci_req_sync_lock(hdev);
527
528         ret = hci_dev_open_sync(hdev);
529
530         hci_req_sync_unlock(hdev);
531         return ret;
532 }
533
534 /* ---- HCI ioctl helpers ---- */
535
536 int hci_dev_open(__u16 dev)
537 {
538         struct hci_dev *hdev;
539         int err;
540
541         hdev = hci_dev_get(dev);
542         if (!hdev)
543                 return -ENODEV;
544
545         /* Devices that are marked as unconfigured can only be powered
546          * up as user channel. Trying to bring them up as normal devices
547          * will result into a failure. Only user channel operation is
548          * possible.
549          *
550          * When this function is called for a user channel, the flag
551          * HCI_USER_CHANNEL will be set first before attempting to
552          * open the device.
553          */
554         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
555             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
556                 err = -EOPNOTSUPP;
557                 goto done;
558         }
559
560         /* We need to ensure that no other power on/off work is pending
561          * before proceeding to call hci_dev_do_open. This is
562          * particularly important if the setup procedure has not yet
563          * completed.
564          */
565         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
566                 cancel_delayed_work(&hdev->power_off);
567
568         /* After this call it is guaranteed that the setup procedure
569          * has finished. This means that error conditions like RFKILL
570          * or no valid public or static random address apply.
571          */
572         flush_workqueue(hdev->req_workqueue);
573
574         /* For controllers not using the management interface and that
575          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
576          * so that pairing works for them. Once the management interface
577          * is in use this bit will be cleared again and userspace has
578          * to explicitly enable it.
579          */
580         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
581             !hci_dev_test_flag(hdev, HCI_MGMT))
582                 hci_dev_set_flag(hdev, HCI_BONDABLE);
583
584         err = hci_dev_do_open(hdev);
585
586 done:
587         hci_dev_put(hdev);
588         return err;
589 }
590
591 int hci_dev_do_close(struct hci_dev *hdev)
592 {
593         int err;
594
595         BT_DBG("%s %p", hdev->name, hdev);
596
597         hci_req_sync_lock(hdev);
598
599         err = hci_dev_close_sync(hdev);
600
601         hci_req_sync_unlock(hdev);
602
603         return err;
604 }
605
606 int hci_dev_close(__u16 dev)
607 {
608         struct hci_dev *hdev;
609         int err;
610
611         hdev = hci_dev_get(dev);
612         if (!hdev)
613                 return -ENODEV;
614
615         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
616                 err = -EBUSY;
617                 goto done;
618         }
619
620         cancel_work_sync(&hdev->power_on);
621         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
622                 cancel_delayed_work(&hdev->power_off);
623
624         err = hci_dev_do_close(hdev);
625
626 done:
627         hci_dev_put(hdev);
628         return err;
629 }
630
631 static int hci_dev_do_reset(struct hci_dev *hdev)
632 {
633         int ret;
634
635         BT_DBG("%s %p", hdev->name, hdev);
636
637         hci_req_sync_lock(hdev);
638
639         /* Drop queues */
640         skb_queue_purge(&hdev->rx_q);
641         skb_queue_purge(&hdev->cmd_q);
642
643         /* Cancel these to avoid queueing non-chained pending work */
644         hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
645         /* Wait for
646          *
647          *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
648          *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
649          *
650          * inside RCU section to see the flag or complete scheduling.
651          */
652         synchronize_rcu();
653         /* Explicitly cancel works in case scheduled after setting the flag. */
654         cancel_delayed_work(&hdev->cmd_timer);
655         cancel_delayed_work(&hdev->ncmd_timer);
656
657         /* Avoid potential lockdep warnings from the *_flush() calls by
658          * ensuring the workqueue is empty up front.
659          */
660         drain_workqueue(hdev->workqueue);
661
662         hci_dev_lock(hdev);
663         hci_inquiry_cache_flush(hdev);
664         hci_conn_hash_flush(hdev);
665         hci_dev_unlock(hdev);
666
667         if (hdev->flush)
668                 hdev->flush(hdev);
669
670         hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
671
672         atomic_set(&hdev->cmd_cnt, 1);
673         hdev->acl_cnt = 0;
674         hdev->sco_cnt = 0;
675         hdev->le_cnt = 0;
676         hdev->iso_cnt = 0;
677
678         ret = hci_reset_sync(hdev);
679
680         hci_req_sync_unlock(hdev);
681         return ret;
682 }
683
684 int hci_dev_reset(__u16 dev)
685 {
686         struct hci_dev *hdev;
687         int err;
688
689         hdev = hci_dev_get(dev);
690         if (!hdev)
691                 return -ENODEV;
692
693         if (!test_bit(HCI_UP, &hdev->flags)) {
694                 err = -ENETDOWN;
695                 goto done;
696         }
697
698         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
699                 err = -EBUSY;
700                 goto done;
701         }
702
703         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
704                 err = -EOPNOTSUPP;
705                 goto done;
706         }
707
708         err = hci_dev_do_reset(hdev);
709
710 done:
711         hci_dev_put(hdev);
712         return err;
713 }
714
715 int hci_dev_reset_stat(__u16 dev)
716 {
717         struct hci_dev *hdev;
718         int ret = 0;
719
720         hdev = hci_dev_get(dev);
721         if (!hdev)
722                 return -ENODEV;
723
724         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
725                 ret = -EBUSY;
726                 goto done;
727         }
728
729         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
730                 ret = -EOPNOTSUPP;
731                 goto done;
732         }
733
734         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
735
736 done:
737         hci_dev_put(hdev);
738         return ret;
739 }
740
741 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
742 {
743         bool conn_changed, discov_changed;
744
745         BT_DBG("%s scan 0x%02x", hdev->name, scan);
746
747         if ((scan & SCAN_PAGE))
748                 conn_changed = !hci_dev_test_and_set_flag(hdev,
749                                                           HCI_CONNECTABLE);
750         else
751                 conn_changed = hci_dev_test_and_clear_flag(hdev,
752                                                            HCI_CONNECTABLE);
753
754         if ((scan & SCAN_INQUIRY)) {
755                 discov_changed = !hci_dev_test_and_set_flag(hdev,
756                                                             HCI_DISCOVERABLE);
757         } else {
758                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
759                 discov_changed = hci_dev_test_and_clear_flag(hdev,
760                                                              HCI_DISCOVERABLE);
761         }
762
763         if (!hci_dev_test_flag(hdev, HCI_MGMT))
764                 return;
765
766         if (conn_changed || discov_changed) {
767                 /* In case this was disabled through mgmt */
768                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
769
770                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
771                         hci_update_adv_data(hdev, hdev->cur_adv_instance);
772
773                 mgmt_new_settings(hdev);
774         }
775 }
776
777 int hci_dev_cmd(unsigned int cmd, void __user *arg)
778 {
779         struct hci_dev *hdev;
780         struct hci_dev_req dr;
781         int err = 0;
782
783         if (copy_from_user(&dr, arg, sizeof(dr)))
784                 return -EFAULT;
785
786         hdev = hci_dev_get(dr.dev_id);
787         if (!hdev)
788                 return -ENODEV;
789
790         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
791                 err = -EBUSY;
792                 goto done;
793         }
794
795         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
796                 err = -EOPNOTSUPP;
797                 goto done;
798         }
799
800         if (hdev->dev_type != HCI_PRIMARY) {
801                 err = -EOPNOTSUPP;
802                 goto done;
803         }
804
805         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
806                 err = -EOPNOTSUPP;
807                 goto done;
808         }
809
810         switch (cmd) {
811         case HCISETAUTH:
812                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
813                                    HCI_INIT_TIMEOUT, NULL);
814                 break;
815
816         case HCISETENCRYPT:
817                 if (!lmp_encrypt_capable(hdev)) {
818                         err = -EOPNOTSUPP;
819                         break;
820                 }
821
822                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
823                         /* Auth must be enabled first */
824                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
825                                            HCI_INIT_TIMEOUT, NULL);
826                         if (err)
827                                 break;
828                 }
829
830                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
831                                    HCI_INIT_TIMEOUT, NULL);
832                 break;
833
834         case HCISETSCAN:
835                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
836                                    HCI_INIT_TIMEOUT, NULL);
837
838                 /* Ensure that the connectable and discoverable states
839                  * get correctly modified as this was a non-mgmt change.
840                  */
841                 if (!err)
842                         hci_update_passive_scan_state(hdev, dr.dev_opt);
843                 break;
844
845         case HCISETLINKPOL:
846                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
847                                    HCI_INIT_TIMEOUT, NULL);
848                 break;
849
850         case HCISETLINKMODE:
851                 hdev->link_mode = ((__u16) dr.dev_opt) &
852                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
853                 break;
854
855         case HCISETPTYPE:
856                 if (hdev->pkt_type == (__u16) dr.dev_opt)
857                         break;
858
859                 hdev->pkt_type = (__u16) dr.dev_opt;
860                 mgmt_phy_configuration_changed(hdev, NULL);
861                 break;
862
863         case HCISETACLMTU:
864                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
865                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
866                 break;
867
868         case HCISETSCOMTU:
869                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
870                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
871                 break;
872
873         default:
874                 err = -EINVAL;
875                 break;
876         }
877
878 done:
879         hci_dev_put(hdev);
880         return err;
881 }
882
883 int hci_get_dev_list(void __user *arg)
884 {
885         struct hci_dev *hdev;
886         struct hci_dev_list_req *dl;
887         struct hci_dev_req *dr;
888         int n = 0, size, err;
889         __u16 dev_num;
890
891         if (get_user(dev_num, (__u16 __user *) arg))
892                 return -EFAULT;
893
894         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
895                 return -EINVAL;
896
897         size = sizeof(*dl) + dev_num * sizeof(*dr);
898
899         dl = kzalloc(size, GFP_KERNEL);
900         if (!dl)
901                 return -ENOMEM;
902
903         dr = dl->dev_req;
904
905         read_lock(&hci_dev_list_lock);
906         list_for_each_entry(hdev, &hci_dev_list, list) {
907                 unsigned long flags = hdev->flags;
908
909                 /* When the auto-off is configured it means the transport
910                  * is running, but in that case still indicate that the
911                  * device is actually down.
912                  */
913                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
914                         flags &= ~BIT(HCI_UP);
915
916                 (dr + n)->dev_id  = hdev->id;
917                 (dr + n)->dev_opt = flags;
918
919                 if (++n >= dev_num)
920                         break;
921         }
922         read_unlock(&hci_dev_list_lock);
923
924         dl->dev_num = n;
925         size = sizeof(*dl) + n * sizeof(*dr);
926
927         err = copy_to_user(arg, dl, size);
928         kfree(dl);
929
930         return err ? -EFAULT : 0;
931 }
932
933 int hci_get_dev_info(void __user *arg)
934 {
935         struct hci_dev *hdev;
936         struct hci_dev_info di;
937         unsigned long flags;
938         int err = 0;
939
940         if (copy_from_user(&di, arg, sizeof(di)))
941                 return -EFAULT;
942
943         hdev = hci_dev_get(di.dev_id);
944         if (!hdev)
945                 return -ENODEV;
946
947         /* When the auto-off is configured it means the transport
948          * is running, but in that case still indicate that the
949          * device is actually down.
950          */
951         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
952                 flags = hdev->flags & ~BIT(HCI_UP);
953         else
954                 flags = hdev->flags;
955
956         strcpy(di.name, hdev->name);
957         di.bdaddr   = hdev->bdaddr;
958         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
959         di.flags    = flags;
960         di.pkt_type = hdev->pkt_type;
961         if (lmp_bredr_capable(hdev)) {
962                 di.acl_mtu  = hdev->acl_mtu;
963                 di.acl_pkts = hdev->acl_pkts;
964                 di.sco_mtu  = hdev->sco_mtu;
965                 di.sco_pkts = hdev->sco_pkts;
966         } else {
967                 di.acl_mtu  = hdev->le_mtu;
968                 di.acl_pkts = hdev->le_pkts;
969                 di.sco_mtu  = 0;
970                 di.sco_pkts = 0;
971         }
972         di.link_policy = hdev->link_policy;
973         di.link_mode   = hdev->link_mode;
974
975         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
976         memcpy(&di.features, &hdev->features, sizeof(di.features));
977
978         if (copy_to_user(arg, &di, sizeof(di)))
979                 err = -EFAULT;
980
981         hci_dev_put(hdev);
982
983         return err;
984 }
985
986 /* ---- Interface to HCI drivers ---- */
987
988 static int hci_rfkill_set_block(void *data, bool blocked)
989 {
990         struct hci_dev *hdev = data;
991
992         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
993
994         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
995                 return -EBUSY;
996
997         if (blocked) {
998                 hci_dev_set_flag(hdev, HCI_RFKILLED);
999                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1000                     !hci_dev_test_flag(hdev, HCI_CONFIG))
1001                         hci_dev_do_close(hdev);
1002         } else {
1003                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1004         }
1005
1006         return 0;
1007 }
1008
1009 static const struct rfkill_ops hci_rfkill_ops = {
1010         .set_block = hci_rfkill_set_block,
1011 };
1012
1013 static void hci_power_on(struct work_struct *work)
1014 {
1015         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1016         int err;
1017
1018         BT_DBG("%s", hdev->name);
1019
1020         if (test_bit(HCI_UP, &hdev->flags) &&
1021             hci_dev_test_flag(hdev, HCI_MGMT) &&
1022             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1023                 cancel_delayed_work(&hdev->power_off);
1024                 err = hci_powered_update_sync(hdev);
1025                 mgmt_power_on(hdev, err);
1026                 return;
1027         }
1028
1029         err = hci_dev_do_open(hdev);
1030         if (err < 0) {
1031                 hci_dev_lock(hdev);
1032                 mgmt_set_powered_failed(hdev, err);
1033                 hci_dev_unlock(hdev);
1034                 return;
1035         }
1036
1037         /* During the HCI setup phase, a few error conditions are
1038          * ignored and they need to be checked now. If they are still
1039          * valid, it is important to turn the device back off.
1040          */
1041         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
1042             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
1043             (hdev->dev_type == HCI_PRIMARY &&
1044              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1045              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1046                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1047                 hci_dev_do_close(hdev);
1048         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1049                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1050                                    HCI_AUTO_OFF_TIMEOUT);
1051         }
1052
1053         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1054                 /* For unconfigured devices, set the HCI_RAW flag
1055                  * so that userspace can easily identify them.
1056                  */
1057                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1058                         set_bit(HCI_RAW, &hdev->flags);
1059
1060                 /* For fully configured devices, this will send
1061                  * the Index Added event. For unconfigured devices,
1062                  * it will send Unconfigued Index Added event.
1063                  *
1064                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1065                  * and no event will be send.
1066                  */
1067                 mgmt_index_added(hdev);
1068         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1069                 /* When the controller is now configured, then it
1070                  * is important to clear the HCI_RAW flag.
1071                  */
1072                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1073                         clear_bit(HCI_RAW, &hdev->flags);
1074
1075                 /* Powering on the controller with HCI_CONFIG set only
1076                  * happens with the transition from unconfigured to
1077                  * configured. This will send the Index Added event.
1078                  */
1079                 mgmt_index_added(hdev);
1080         }
1081 }
1082
1083 static void hci_power_off(struct work_struct *work)
1084 {
1085         struct hci_dev *hdev = container_of(work, struct hci_dev,
1086                                             power_off.work);
1087
1088         BT_DBG("%s", hdev->name);
1089
1090         hci_dev_do_close(hdev);
1091 }
1092
1093 static void hci_error_reset(struct work_struct *work)
1094 {
1095         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1096
1097         BT_DBG("%s", hdev->name);
1098
1099         if (hdev->hw_error)
1100                 hdev->hw_error(hdev, hdev->hw_error_code);
1101         else
1102                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1103
1104         if (hci_dev_do_close(hdev))
1105                 return;
1106
1107         hci_dev_do_open(hdev);
1108 }
1109
1110 void hci_uuids_clear(struct hci_dev *hdev)
1111 {
1112         struct bt_uuid *uuid, *tmp;
1113
1114         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1115                 list_del(&uuid->list);
1116                 kfree(uuid);
1117         }
1118 }
1119
1120 void hci_link_keys_clear(struct hci_dev *hdev)
1121 {
1122         struct link_key *key;
1123
1124         list_for_each_entry(key, &hdev->link_keys, list) {
1125                 list_del_rcu(&key->list);
1126                 kfree_rcu(key, rcu);
1127         }
1128 }
1129
1130 void hci_smp_ltks_clear(struct hci_dev *hdev)
1131 {
1132         struct smp_ltk *k;
1133
1134         list_for_each_entry(k, &hdev->long_term_keys, list) {
1135                 list_del_rcu(&k->list);
1136                 kfree_rcu(k, rcu);
1137         }
1138 }
1139
1140 void hci_smp_irks_clear(struct hci_dev *hdev)
1141 {
1142         struct smp_irk *k;
1143
1144         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1145                 list_del_rcu(&k->list);
1146                 kfree_rcu(k, rcu);
1147         }
1148 }
1149
1150 void hci_blocked_keys_clear(struct hci_dev *hdev)
1151 {
1152         struct blocked_key *b;
1153
1154         list_for_each_entry(b, &hdev->blocked_keys, list) {
1155                 list_del_rcu(&b->list);
1156                 kfree_rcu(b, rcu);
1157         }
1158 }
1159
1160 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1161 {
1162         bool blocked = false;
1163         struct blocked_key *b;
1164
1165         rcu_read_lock();
1166         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1167                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1168                         blocked = true;
1169                         break;
1170                 }
1171         }
1172
1173         rcu_read_unlock();
1174         return blocked;
1175 }
1176
1177 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1178 {
1179         struct link_key *k;
1180
1181         rcu_read_lock();
1182         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1183                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1184                         rcu_read_unlock();
1185
1186                         if (hci_is_blocked_key(hdev,
1187                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
1188                                                k->val)) {
1189                                 bt_dev_warn_ratelimited(hdev,
1190                                                         "Link key blocked for %pMR",
1191                                                         &k->bdaddr);
1192                                 return NULL;
1193                         }
1194
1195                         return k;
1196                 }
1197         }
1198         rcu_read_unlock();
1199
1200         return NULL;
1201 }
1202
1203 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1204                                u8 key_type, u8 old_key_type)
1205 {
1206         /* Legacy key */
1207         if (key_type < 0x03)
1208                 return true;
1209
1210         /* Debug keys are insecure so don't store them persistently */
1211         if (key_type == HCI_LK_DEBUG_COMBINATION)
1212                 return false;
1213
1214         /* Changed combination key and there's no previous one */
1215         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1216                 return false;
1217
1218         /* Security mode 3 case */
1219         if (!conn)
1220                 return true;
1221
1222         /* BR/EDR key derived using SC from an LE link */
1223         if (conn->type == LE_LINK)
1224                 return true;
1225
1226         /* Neither local nor remote side had no-bonding as requirement */
1227         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1228                 return true;
1229
1230         /* Local side had dedicated bonding as requirement */
1231         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1232                 return true;
1233
1234         /* Remote side had dedicated bonding as requirement */
1235         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1236                 return true;
1237
1238         /* If none of the above criteria match, then don't store the key
1239          * persistently */
1240         return false;
1241 }
1242
1243 static u8 ltk_role(u8 type)
1244 {
1245         if (type == SMP_LTK)
1246                 return HCI_ROLE_MASTER;
1247
1248         return HCI_ROLE_SLAVE;
1249 }
1250
1251 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1252                              u8 addr_type, u8 role)
1253 {
1254         struct smp_ltk *k;
1255
1256         rcu_read_lock();
1257         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1258                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1259                         continue;
1260
1261                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1262                         rcu_read_unlock();
1263
1264                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1265                                                k->val)) {
1266                                 bt_dev_warn_ratelimited(hdev,
1267                                                         "LTK blocked for %pMR",
1268                                                         &k->bdaddr);
1269                                 return NULL;
1270                         }
1271
1272                         return k;
1273                 }
1274         }
1275         rcu_read_unlock();
1276
1277         return NULL;
1278 }
1279
1280 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1281 {
1282         struct smp_irk *irk_to_return = NULL;
1283         struct smp_irk *irk;
1284
1285         rcu_read_lock();
1286         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1287                 if (!bacmp(&irk->rpa, rpa)) {
1288                         irk_to_return = irk;
1289                         goto done;
1290                 }
1291         }
1292
1293         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1294                 if (smp_irk_matches(hdev, irk->val, rpa)) {
1295                         bacpy(&irk->rpa, rpa);
1296                         irk_to_return = irk;
1297                         goto done;
1298                 }
1299         }
1300
1301 done:
1302         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1303                                                 irk_to_return->val)) {
1304                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1305                                         &irk_to_return->bdaddr);
1306                 irk_to_return = NULL;
1307         }
1308
1309         rcu_read_unlock();
1310
1311         return irk_to_return;
1312 }
1313
1314 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1315                                      u8 addr_type)
1316 {
1317         struct smp_irk *irk_to_return = NULL;
1318         struct smp_irk *irk;
1319
1320         /* Identity Address must be public or static random */
1321         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1322                 return NULL;
1323
1324         rcu_read_lock();
1325         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1326                 if (addr_type == irk->addr_type &&
1327                     bacmp(bdaddr, &irk->bdaddr) == 0) {
1328                         irk_to_return = irk;
1329                         goto done;
1330                 }
1331         }
1332
1333 done:
1334
1335         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1336                                                 irk_to_return->val)) {
1337                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1338                                         &irk_to_return->bdaddr);
1339                 irk_to_return = NULL;
1340         }
1341
1342         rcu_read_unlock();
1343
1344         return irk_to_return;
1345 }
1346
1347 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1348                                   bdaddr_t *bdaddr, u8 *val, u8 type,
1349                                   u8 pin_len, bool *persistent)
1350 {
1351         struct link_key *key, *old_key;
1352         u8 old_key_type;
1353
1354         old_key = hci_find_link_key(hdev, bdaddr);
1355         if (old_key) {
1356                 old_key_type = old_key->type;
1357                 key = old_key;
1358         } else {
1359                 old_key_type = conn ? conn->key_type : 0xff;
1360                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1361                 if (!key)
1362                         return NULL;
1363                 list_add_rcu(&key->list, &hdev->link_keys);
1364         }
1365
1366         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1367
1368         /* Some buggy controller combinations generate a changed
1369          * combination key for legacy pairing even when there's no
1370          * previous key */
1371         if (type == HCI_LK_CHANGED_COMBINATION &&
1372             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1373                 type = HCI_LK_COMBINATION;
1374                 if (conn)
1375                         conn->key_type = type;
1376         }
1377
1378         bacpy(&key->bdaddr, bdaddr);
1379         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1380         key->pin_len = pin_len;
1381
1382         if (type == HCI_LK_CHANGED_COMBINATION)
1383                 key->type = old_key_type;
1384         else
1385                 key->type = type;
1386
1387         if (persistent)
1388                 *persistent = hci_persistent_key(hdev, conn, type,
1389                                                  old_key_type);
1390
1391         return key;
1392 }
1393
1394 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1395                             u8 addr_type, u8 type, u8 authenticated,
1396                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1397 {
1398         struct smp_ltk *key, *old_key;
1399         u8 role = ltk_role(type);
1400
1401         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1402         if (old_key)
1403                 key = old_key;
1404         else {
1405                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1406                 if (!key)
1407                         return NULL;
1408                 list_add_rcu(&key->list, &hdev->long_term_keys);
1409         }
1410
1411         bacpy(&key->bdaddr, bdaddr);
1412         key->bdaddr_type = addr_type;
1413         memcpy(key->val, tk, sizeof(key->val));
1414         key->authenticated = authenticated;
1415         key->ediv = ediv;
1416         key->rand = rand;
1417         key->enc_size = enc_size;
1418         key->type = type;
1419
1420         return key;
1421 }
1422
1423 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1424                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
1425 {
1426         struct smp_irk *irk;
1427
1428         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1429         if (!irk) {
1430                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1431                 if (!irk)
1432                         return NULL;
1433
1434                 bacpy(&irk->bdaddr, bdaddr);
1435                 irk->addr_type = addr_type;
1436
1437                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1438         }
1439
1440         memcpy(irk->val, val, 16);
1441         bacpy(&irk->rpa, rpa);
1442
1443         return irk;
1444 }
1445
1446 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1447 {
1448         struct link_key *key;
1449
1450         key = hci_find_link_key(hdev, bdaddr);
1451         if (!key)
1452                 return -ENOENT;
1453
1454         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1455
1456         list_del_rcu(&key->list);
1457         kfree_rcu(key, rcu);
1458
1459         return 0;
1460 }
1461
1462 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1463 {
1464         struct smp_ltk *k, *tmp;
1465         int removed = 0;
1466
1467         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1468                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1469                         continue;
1470
1471                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1472
1473                 list_del_rcu(&k->list);
1474                 kfree_rcu(k, rcu);
1475                 removed++;
1476         }
1477
1478         return removed ? 0 : -ENOENT;
1479 }
1480
1481 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1482 {
1483         struct smp_irk *k, *tmp;
1484
1485         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1486                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1487                         continue;
1488
1489                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1490
1491                 list_del_rcu(&k->list);
1492                 kfree_rcu(k, rcu);
1493         }
1494 }
1495
1496 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1497 {
1498         struct smp_ltk *k;
1499         struct smp_irk *irk;
1500         u8 addr_type;
1501
1502         if (type == BDADDR_BREDR) {
1503                 if (hci_find_link_key(hdev, bdaddr))
1504                         return true;
1505                 return false;
1506         }
1507
1508         /* Convert to HCI addr type which struct smp_ltk uses */
1509         if (type == BDADDR_LE_PUBLIC)
1510                 addr_type = ADDR_LE_DEV_PUBLIC;
1511         else
1512                 addr_type = ADDR_LE_DEV_RANDOM;
1513
1514         irk = hci_get_irk(hdev, bdaddr, addr_type);
1515         if (irk) {
1516                 bdaddr = &irk->bdaddr;
1517                 addr_type = irk->addr_type;
1518         }
1519
1520         rcu_read_lock();
1521         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1522                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1523                         rcu_read_unlock();
1524                         return true;
1525                 }
1526         }
1527         rcu_read_unlock();
1528
1529         return false;
1530 }
1531
1532 /* HCI command timer function */
1533 static void hci_cmd_timeout(struct work_struct *work)
1534 {
1535         struct hci_dev *hdev = container_of(work, struct hci_dev,
1536                                             cmd_timer.work);
1537
1538         if (hdev->sent_cmd) {
1539                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1540                 u16 opcode = __le16_to_cpu(sent->opcode);
1541
1542                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1543         } else {
1544                 bt_dev_err(hdev, "command tx timeout");
1545         }
1546
1547         if (hdev->cmd_timeout)
1548                 hdev->cmd_timeout(hdev);
1549
1550         atomic_set(&hdev->cmd_cnt, 1);
1551         queue_work(hdev->workqueue, &hdev->cmd_work);
1552 }
1553
1554 /* HCI ncmd timer function */
1555 static void hci_ncmd_timeout(struct work_struct *work)
1556 {
1557         struct hci_dev *hdev = container_of(work, struct hci_dev,
1558                                             ncmd_timer.work);
1559
1560         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1561
1562         /* During HCI_INIT phase no events can be injected if the ncmd timer
1563          * triggers since the procedure has its own timeout handling.
1564          */
1565         if (test_bit(HCI_INIT, &hdev->flags))
1566                 return;
1567
1568         /* This is an irrecoverable state, inject hardware error event */
1569         hci_reset_dev(hdev);
1570 }
1571
1572 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1573                                           bdaddr_t *bdaddr, u8 bdaddr_type)
1574 {
1575         struct oob_data *data;
1576
1577         list_for_each_entry(data, &hdev->remote_oob_data, list) {
1578                 if (bacmp(bdaddr, &data->bdaddr) != 0)
1579                         continue;
1580                 if (data->bdaddr_type != bdaddr_type)
1581                         continue;
1582                 return data;
1583         }
1584
1585         return NULL;
1586 }
1587
1588 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1589                                u8 bdaddr_type)
1590 {
1591         struct oob_data *data;
1592
1593         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1594         if (!data)
1595                 return -ENOENT;
1596
1597         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1598
1599         list_del(&data->list);
1600         kfree(data);
1601
1602         return 0;
1603 }
1604
1605 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1606 {
1607         struct oob_data *data, *n;
1608
1609         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1610                 list_del(&data->list);
1611                 kfree(data);
1612         }
1613 }
1614
1615 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1616                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
1617                             u8 *hash256, u8 *rand256)
1618 {
1619         struct oob_data *data;
1620
1621         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1622         if (!data) {
1623                 data = kmalloc(sizeof(*data), GFP_KERNEL);
1624                 if (!data)
1625                         return -ENOMEM;
1626
1627                 bacpy(&data->bdaddr, bdaddr);
1628                 data->bdaddr_type = bdaddr_type;
1629                 list_add(&data->list, &hdev->remote_oob_data);
1630         }
1631
1632         if (hash192 && rand192) {
1633                 memcpy(data->hash192, hash192, sizeof(data->hash192));
1634                 memcpy(data->rand192, rand192, sizeof(data->rand192));
1635                 if (hash256 && rand256)
1636                         data->present = 0x03;
1637         } else {
1638                 memset(data->hash192, 0, sizeof(data->hash192));
1639                 memset(data->rand192, 0, sizeof(data->rand192));
1640                 if (hash256 && rand256)
1641                         data->present = 0x02;
1642                 else
1643                         data->present = 0x00;
1644         }
1645
1646         if (hash256 && rand256) {
1647                 memcpy(data->hash256, hash256, sizeof(data->hash256));
1648                 memcpy(data->rand256, rand256, sizeof(data->rand256));
1649         } else {
1650                 memset(data->hash256, 0, sizeof(data->hash256));
1651                 memset(data->rand256, 0, sizeof(data->rand256));
1652                 if (hash192 && rand192)
1653                         data->present = 0x01;
1654         }
1655
1656         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1657
1658         return 0;
1659 }
1660
1661 /* This function requires the caller holds hdev->lock */
1662 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1663 {
1664         struct adv_info *adv_instance;
1665
1666         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1667                 if (adv_instance->instance == instance)
1668                         return adv_instance;
1669         }
1670
1671         return NULL;
1672 }
1673
1674 /* This function requires the caller holds hdev->lock */
1675 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1676 {
1677         struct adv_info *cur_instance;
1678
1679         cur_instance = hci_find_adv_instance(hdev, instance);
1680         if (!cur_instance)
1681                 return NULL;
1682
1683         if (cur_instance == list_last_entry(&hdev->adv_instances,
1684                                             struct adv_info, list))
1685                 return list_first_entry(&hdev->adv_instances,
1686                                                  struct adv_info, list);
1687         else
1688                 return list_next_entry(cur_instance, list);
1689 }
1690
1691 /* This function requires the caller holds hdev->lock */
1692 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1693 {
1694         struct adv_info *adv_instance;
1695
1696         adv_instance = hci_find_adv_instance(hdev, instance);
1697         if (!adv_instance)
1698                 return -ENOENT;
1699
1700         BT_DBG("%s removing %dMR", hdev->name, instance);
1701
1702         if (hdev->cur_adv_instance == instance) {
1703                 if (hdev->adv_instance_timeout) {
1704                         cancel_delayed_work(&hdev->adv_instance_expire);
1705                         hdev->adv_instance_timeout = 0;
1706                 }
1707                 hdev->cur_adv_instance = 0x00;
1708         }
1709
1710         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1711
1712         list_del(&adv_instance->list);
1713         kfree(adv_instance);
1714
1715         hdev->adv_instance_cnt--;
1716
1717         return 0;
1718 }
1719
1720 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1721 {
1722         struct adv_info *adv_instance, *n;
1723
1724         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1725                 adv_instance->rpa_expired = rpa_expired;
1726 }
1727
1728 /* This function requires the caller holds hdev->lock */
1729 void hci_adv_instances_clear(struct hci_dev *hdev)
1730 {
1731         struct adv_info *adv_instance, *n;
1732
1733         if (hdev->adv_instance_timeout) {
1734                 cancel_delayed_work(&hdev->adv_instance_expire);
1735                 hdev->adv_instance_timeout = 0;
1736         }
1737
1738         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1739                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1740                 list_del(&adv_instance->list);
1741                 kfree(adv_instance);
1742         }
1743
1744         hdev->adv_instance_cnt = 0;
1745         hdev->cur_adv_instance = 0x00;
1746 }
1747
1748 static void adv_instance_rpa_expired(struct work_struct *work)
1749 {
1750         struct adv_info *adv_instance = container_of(work, struct adv_info,
1751                                                      rpa_expired_cb.work);
1752
1753         BT_DBG("");
1754
1755         adv_instance->rpa_expired = true;
1756 }
1757
1758 /* This function requires the caller holds hdev->lock */
1759 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1760                                       u32 flags, u16 adv_data_len, u8 *adv_data,
1761                                       u16 scan_rsp_len, u8 *scan_rsp_data,
1762                                       u16 timeout, u16 duration, s8 tx_power,
1763                                       u32 min_interval, u32 max_interval,
1764                                       u8 mesh_handle)
1765 {
1766         struct adv_info *adv;
1767
1768         adv = hci_find_adv_instance(hdev, instance);
1769         if (adv) {
1770                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1771                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1772                 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1773         } else {
1774                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1775                     instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1776                         return ERR_PTR(-EOVERFLOW);
1777
1778                 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1779                 if (!adv)
1780                         return ERR_PTR(-ENOMEM);
1781
1782                 adv->pending = true;
1783                 adv->instance = instance;
1784                 list_add(&adv->list, &hdev->adv_instances);
1785                 hdev->adv_instance_cnt++;
1786         }
1787
1788         adv->flags = flags;
1789         adv->min_interval = min_interval;
1790         adv->max_interval = max_interval;
1791         adv->tx_power = tx_power;
1792         /* Defining a mesh_handle changes the timing units to ms,
1793          * rather than seconds, and ties the instance to the requested
1794          * mesh_tx queue.
1795          */
1796         adv->mesh = mesh_handle;
1797
1798         hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1799                                   scan_rsp_len, scan_rsp_data);
1800
1801         adv->timeout = timeout;
1802         adv->remaining_time = timeout;
1803
1804         if (duration == 0)
1805                 adv->duration = hdev->def_multi_adv_rotation_duration;
1806         else
1807                 adv->duration = duration;
1808
1809         INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1810
1811         BT_DBG("%s for %dMR", hdev->name, instance);
1812
1813         return adv;
1814 }
1815
1816 /* This function requires the caller holds hdev->lock */
1817 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1818                                       u32 flags, u8 data_len, u8 *data,
1819                                       u32 min_interval, u32 max_interval)
1820 {
1821         struct adv_info *adv;
1822
1823         adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1824                                    0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1825                                    min_interval, max_interval, 0);
1826         if (IS_ERR(adv))
1827                 return adv;
1828
1829         adv->periodic = true;
1830         adv->per_adv_data_len = data_len;
1831
1832         if (data)
1833                 memcpy(adv->per_adv_data, data, data_len);
1834
1835         return adv;
1836 }
1837
1838 /* This function requires the caller holds hdev->lock */
1839 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1840                               u16 adv_data_len, u8 *adv_data,
1841                               u16 scan_rsp_len, u8 *scan_rsp_data)
1842 {
1843         struct adv_info *adv;
1844
1845         adv = hci_find_adv_instance(hdev, instance);
1846
1847         /* If advertisement doesn't exist, we can't modify its data */
1848         if (!adv)
1849                 return -ENOENT;
1850
1851         if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1852                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1853                 memcpy(adv->adv_data, adv_data, adv_data_len);
1854                 adv->adv_data_len = adv_data_len;
1855                 adv->adv_data_changed = true;
1856         }
1857
1858         if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1859                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1860                 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1861                 adv->scan_rsp_len = scan_rsp_len;
1862                 adv->scan_rsp_changed = true;
1863         }
1864
1865         /* Mark as changed if there are flags which would affect it */
1866         if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1867             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1868                 adv->scan_rsp_changed = true;
1869
1870         return 0;
1871 }
1872
1873 /* This function requires the caller holds hdev->lock */
1874 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1875 {
1876         u32 flags;
1877         struct adv_info *adv;
1878
1879         if (instance == 0x00) {
1880                 /* Instance 0 always manages the "Tx Power" and "Flags"
1881                  * fields
1882                  */
1883                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1884
1885                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1886                  * corresponds to the "connectable" instance flag.
1887                  */
1888                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1889                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1890
1891                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1892                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1893                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1894                         flags |= MGMT_ADV_FLAG_DISCOV;
1895
1896                 return flags;
1897         }
1898
1899         adv = hci_find_adv_instance(hdev, instance);
1900
1901         /* Return 0 when we got an invalid instance identifier. */
1902         if (!adv)
1903                 return 0;
1904
1905         return adv->flags;
1906 }
1907
1908 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1909 {
1910         struct adv_info *adv;
1911
1912         /* Instance 0x00 always set local name */
1913         if (instance == 0x00)
1914                 return true;
1915
1916         adv = hci_find_adv_instance(hdev, instance);
1917         if (!adv)
1918                 return false;
1919
1920         if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1921             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1922                 return true;
1923
1924         return adv->scan_rsp_len ? true : false;
1925 }
1926
1927 /* This function requires the caller holds hdev->lock */
1928 void hci_adv_monitors_clear(struct hci_dev *hdev)
1929 {
1930         struct adv_monitor *monitor;
1931         int handle;
1932
1933         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1934                 hci_free_adv_monitor(hdev, monitor);
1935
1936         idr_destroy(&hdev->adv_monitors_idr);
1937 }
1938
1939 /* Frees the monitor structure and do some bookkeepings.
1940  * This function requires the caller holds hdev->lock.
1941  */
1942 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1943 {
1944         struct adv_pattern *pattern;
1945         struct adv_pattern *tmp;
1946
1947         if (!monitor)
1948                 return;
1949
1950         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1951                 list_del(&pattern->list);
1952                 kfree(pattern);
1953         }
1954
1955         if (monitor->handle)
1956                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1957
1958         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1959                 hdev->adv_monitors_cnt--;
1960                 mgmt_adv_monitor_removed(hdev, monitor->handle);
1961         }
1962
1963         kfree(monitor);
1964 }
1965
1966 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1967  * also attempts to forward the request to the controller.
1968  * This function requires the caller holds hci_req_sync_lock.
1969  */
1970 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1971 {
1972         int min, max, handle;
1973         int status = 0;
1974
1975         if (!monitor)
1976                 return -EINVAL;
1977
1978         hci_dev_lock(hdev);
1979
1980         min = HCI_MIN_ADV_MONITOR_HANDLE;
1981         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1982         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1983                            GFP_KERNEL);
1984
1985         hci_dev_unlock(hdev);
1986
1987         if (handle < 0)
1988                 return handle;
1989
1990         monitor->handle = handle;
1991
1992         if (!hdev_is_powered(hdev))
1993                 return status;
1994
1995         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1996         case HCI_ADV_MONITOR_EXT_NONE:
1997                 bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name,
1998                            monitor->handle, status);
1999                 /* Message was not forwarded to controller - not an error */
2000                 break;
2001
2002         case HCI_ADV_MONITOR_EXT_MSFT:
2003                 status = msft_add_monitor_pattern(hdev, monitor);
2004                 bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name,
2005                            monitor->handle, status);
2006                 break;
2007         }
2008
2009         return status;
2010 }
2011
2012 /* Attempts to tell the controller and free the monitor. If somehow the
2013  * controller doesn't have a corresponding handle, remove anyway.
2014  * This function requires the caller holds hci_req_sync_lock.
2015  */
2016 static int hci_remove_adv_monitor(struct hci_dev *hdev,
2017                                   struct adv_monitor *monitor)
2018 {
2019         int status = 0;
2020
2021         switch (hci_get_adv_monitor_offload_ext(hdev)) {
2022         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
2023                 bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name,
2024                            monitor->handle, status);
2025                 goto free_monitor;
2026
2027         case HCI_ADV_MONITOR_EXT_MSFT:
2028                 status = msft_remove_monitor(hdev, monitor);
2029                 bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
2030                            hdev->name, monitor->handle, status);
2031                 break;
2032         }
2033
2034         /* In case no matching handle registered, just free the monitor */
2035         if (status == -ENOENT)
2036                 goto free_monitor;
2037
2038         return status;
2039
2040 free_monitor:
2041         if (status == -ENOENT)
2042                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2043                             monitor->handle);
2044         hci_free_adv_monitor(hdev, monitor);
2045
2046         return status;
2047 }
2048
2049 /* This function requires the caller holds hci_req_sync_lock */
2050 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2051 {
2052         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2053
2054         if (!monitor)
2055                 return -EINVAL;
2056
2057         return hci_remove_adv_monitor(hdev, monitor);
2058 }
2059
2060 /* This function requires the caller holds hci_req_sync_lock */
2061 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2062 {
2063         struct adv_monitor *monitor;
2064         int idr_next_id = 0;
2065         int status = 0;
2066
2067         while (1) {
2068                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2069                 if (!monitor)
2070                         break;
2071
2072                 status = hci_remove_adv_monitor(hdev, monitor);
2073                 if (status)
2074                         return status;
2075
2076                 idr_next_id++;
2077         }
2078
2079         return status;
2080 }
2081
2082 /* This function requires the caller holds hdev->lock */
2083 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2084 {
2085         return !idr_is_empty(&hdev->adv_monitors_idr);
2086 }
2087
2088 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2089 {
2090         if (msft_monitor_supported(hdev))
2091                 return HCI_ADV_MONITOR_EXT_MSFT;
2092
2093         return HCI_ADV_MONITOR_EXT_NONE;
2094 }
2095
2096 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2097                                          bdaddr_t *bdaddr, u8 type)
2098 {
2099         struct bdaddr_list *b;
2100
2101         list_for_each_entry(b, bdaddr_list, list) {
2102                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2103                         return b;
2104         }
2105
2106         return NULL;
2107 }
2108
2109 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2110                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2111                                 u8 type)
2112 {
2113         struct bdaddr_list_with_irk *b;
2114
2115         list_for_each_entry(b, bdaddr_list, list) {
2116                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2117                         return b;
2118         }
2119
2120         return NULL;
2121 }
2122
2123 struct bdaddr_list_with_flags *
2124 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2125                                   bdaddr_t *bdaddr, u8 type)
2126 {
2127         struct bdaddr_list_with_flags *b;
2128
2129         list_for_each_entry(b, bdaddr_list, list) {
2130                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2131                         return b;
2132         }
2133
2134         return NULL;
2135 }
2136
2137 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2138 {
2139         struct bdaddr_list *b, *n;
2140
2141         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2142                 list_del(&b->list);
2143                 kfree(b);
2144         }
2145 }
2146
2147 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2148 {
2149         struct bdaddr_list *entry;
2150
2151         if (!bacmp(bdaddr, BDADDR_ANY))
2152                 return -EBADF;
2153
2154         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2155                 return -EEXIST;
2156
2157         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2158         if (!entry)
2159                 return -ENOMEM;
2160
2161         bacpy(&entry->bdaddr, bdaddr);
2162         entry->bdaddr_type = type;
2163
2164         list_add(&entry->list, list);
2165
2166         return 0;
2167 }
2168
2169 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2170                                         u8 type, u8 *peer_irk, u8 *local_irk)
2171 {
2172         struct bdaddr_list_with_irk *entry;
2173
2174         if (!bacmp(bdaddr, BDADDR_ANY))
2175                 return -EBADF;
2176
2177         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2178                 return -EEXIST;
2179
2180         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2181         if (!entry)
2182                 return -ENOMEM;
2183
2184         bacpy(&entry->bdaddr, bdaddr);
2185         entry->bdaddr_type = type;
2186
2187         if (peer_irk)
2188                 memcpy(entry->peer_irk, peer_irk, 16);
2189
2190         if (local_irk)
2191                 memcpy(entry->local_irk, local_irk, 16);
2192
2193         list_add(&entry->list, list);
2194
2195         return 0;
2196 }
2197
2198 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2199                                    u8 type, u32 flags)
2200 {
2201         struct bdaddr_list_with_flags *entry;
2202
2203         if (!bacmp(bdaddr, BDADDR_ANY))
2204                 return -EBADF;
2205
2206         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2207                 return -EEXIST;
2208
2209         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2210         if (!entry)
2211                 return -ENOMEM;
2212
2213         bacpy(&entry->bdaddr, bdaddr);
2214         entry->bdaddr_type = type;
2215         entry->flags = flags;
2216
2217         list_add(&entry->list, list);
2218
2219         return 0;
2220 }
2221
2222 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2223 {
2224         struct bdaddr_list *entry;
2225
2226         if (!bacmp(bdaddr, BDADDR_ANY)) {
2227                 hci_bdaddr_list_clear(list);
2228                 return 0;
2229         }
2230
2231         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2232         if (!entry)
2233                 return -ENOENT;
2234
2235         list_del(&entry->list);
2236         kfree(entry);
2237
2238         return 0;
2239 }
2240
2241 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2242                                                         u8 type)
2243 {
2244         struct bdaddr_list_with_irk *entry;
2245
2246         if (!bacmp(bdaddr, BDADDR_ANY)) {
2247                 hci_bdaddr_list_clear(list);
2248                 return 0;
2249         }
2250
2251         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2252         if (!entry)
2253                 return -ENOENT;
2254
2255         list_del(&entry->list);
2256         kfree(entry);
2257
2258         return 0;
2259 }
2260
2261 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2262                                    u8 type)
2263 {
2264         struct bdaddr_list_with_flags *entry;
2265
2266         if (!bacmp(bdaddr, BDADDR_ANY)) {
2267                 hci_bdaddr_list_clear(list);
2268                 return 0;
2269         }
2270
2271         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2272         if (!entry)
2273                 return -ENOENT;
2274
2275         list_del(&entry->list);
2276         kfree(entry);
2277
2278         return 0;
2279 }
2280
2281 /* This function requires the caller holds hdev->lock */
2282 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2283                                                bdaddr_t *addr, u8 addr_type)
2284 {
2285         struct hci_conn_params *params;
2286
2287         list_for_each_entry(params, &hdev->le_conn_params, list) {
2288                 if (bacmp(&params->addr, addr) == 0 &&
2289                     params->addr_type == addr_type) {
2290                         return params;
2291                 }
2292         }
2293
2294         return NULL;
2295 }
2296
2297 /* This function requires the caller holds hdev->lock */
2298 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2299                                                   bdaddr_t *addr, u8 addr_type)
2300 {
2301         struct hci_conn_params *param;
2302
2303         list_for_each_entry(param, list, action) {
2304                 if (bacmp(&param->addr, addr) == 0 &&
2305                     param->addr_type == addr_type)
2306                         return param;
2307         }
2308
2309         return NULL;
2310 }
2311
2312 /* This function requires the caller holds hdev->lock */
2313 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2314                                             bdaddr_t *addr, u8 addr_type)
2315 {
2316         struct hci_conn_params *params;
2317
2318         params = hci_conn_params_lookup(hdev, addr, addr_type);
2319         if (params)
2320                 return params;
2321
2322         params = kzalloc(sizeof(*params), GFP_KERNEL);
2323         if (!params) {
2324                 bt_dev_err(hdev, "out of memory");
2325                 return NULL;
2326         }
2327
2328         bacpy(&params->addr, addr);
2329         params->addr_type = addr_type;
2330
2331         list_add(&params->list, &hdev->le_conn_params);
2332         INIT_LIST_HEAD(&params->action);
2333
2334         params->conn_min_interval = hdev->le_conn_min_interval;
2335         params->conn_max_interval = hdev->le_conn_max_interval;
2336         params->conn_latency = hdev->le_conn_latency;
2337         params->supervision_timeout = hdev->le_supv_timeout;
2338         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2339
2340         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2341
2342         return params;
2343 }
2344
2345 static void hci_conn_params_free(struct hci_conn_params *params)
2346 {
2347         if (params->conn) {
2348                 hci_conn_drop(params->conn);
2349                 hci_conn_put(params->conn);
2350         }
2351
2352         list_del(&params->action);
2353         list_del(&params->list);
2354         kfree(params);
2355 }
2356
2357 /* This function requires the caller holds hdev->lock */
2358 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2359 {
2360         struct hci_conn_params *params;
2361
2362         params = hci_conn_params_lookup(hdev, addr, addr_type);
2363         if (!params)
2364                 return;
2365
2366         hci_conn_params_free(params);
2367
2368         hci_update_passive_scan(hdev);
2369
2370         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2371 }
2372
2373 /* This function requires the caller holds hdev->lock */
2374 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2375 {
2376         struct hci_conn_params *params, *tmp;
2377
2378         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2379                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2380                         continue;
2381
2382                 /* If trying to establish one time connection to disabled
2383                  * device, leave the params, but mark them as just once.
2384                  */
2385                 if (params->explicit_connect) {
2386                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2387                         continue;
2388                 }
2389
2390                 list_del(&params->list);
2391                 kfree(params);
2392         }
2393
2394         BT_DBG("All LE disabled connection parameters were removed");
2395 }
2396
2397 /* This function requires the caller holds hdev->lock */
2398 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2399 {
2400         struct hci_conn_params *params, *tmp;
2401
2402         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2403                 hci_conn_params_free(params);
2404
2405         BT_DBG("All LE connection parameters were removed");
2406 }
2407
2408 /* Copy the Identity Address of the controller.
2409  *
2410  * If the controller has a public BD_ADDR, then by default use that one.
2411  * If this is a LE only controller without a public address, default to
2412  * the static random address.
2413  *
2414  * For debugging purposes it is possible to force controllers with a
2415  * public address to use the static random address instead.
2416  *
2417  * In case BR/EDR has been disabled on a dual-mode controller and
2418  * userspace has configured a static address, then that address
2419  * becomes the identity address instead of the public BR/EDR address.
2420  */
2421 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2422                                u8 *bdaddr_type)
2423 {
2424         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2425             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2426             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2427              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2428                 bacpy(bdaddr, &hdev->static_addr);
2429                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2430         } else {
2431                 bacpy(bdaddr, &hdev->bdaddr);
2432                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2433         }
2434 }
2435
2436 static void hci_clear_wake_reason(struct hci_dev *hdev)
2437 {
2438         hci_dev_lock(hdev);
2439
2440         hdev->wake_reason = 0;
2441         bacpy(&hdev->wake_addr, BDADDR_ANY);
2442         hdev->wake_addr_type = 0;
2443
2444         hci_dev_unlock(hdev);
2445 }
2446
2447 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2448                                 void *data)
2449 {
2450         struct hci_dev *hdev =
2451                 container_of(nb, struct hci_dev, suspend_notifier);
2452         int ret = 0;
2453
2454         /* Userspace has full control of this device. Do nothing. */
2455         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2456                 return NOTIFY_DONE;
2457
2458         if (action == PM_SUSPEND_PREPARE)
2459                 ret = hci_suspend_dev(hdev);
2460         else if (action == PM_POST_SUSPEND)
2461                 ret = hci_resume_dev(hdev);
2462
2463         if (ret)
2464                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2465                            action, ret);
2466
2467         return NOTIFY_DONE;
2468 }
2469
2470 /* Alloc HCI device */
2471 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2472 {
2473         struct hci_dev *hdev;
2474         unsigned int alloc_size;
2475
2476         alloc_size = sizeof(*hdev);
2477         if (sizeof_priv) {
2478                 /* Fixme: May need ALIGN-ment? */
2479                 alloc_size += sizeof_priv;
2480         }
2481
2482         hdev = kzalloc(alloc_size, GFP_KERNEL);
2483         if (!hdev)
2484                 return NULL;
2485
2486         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2487         hdev->esco_type = (ESCO_HV1);
2488         hdev->link_mode = (HCI_LM_ACCEPT);
2489         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2490         hdev->io_capability = 0x03;     /* No Input No Output */
2491         hdev->manufacturer = 0xffff;    /* Default to internal use */
2492         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2493         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2494         hdev->adv_instance_cnt = 0;
2495         hdev->cur_adv_instance = 0x00;
2496         hdev->adv_instance_timeout = 0;
2497
2498         hdev->advmon_allowlist_duration = 300;
2499         hdev->advmon_no_filter_duration = 500;
2500         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
2501
2502         hdev->sniff_max_interval = 800;
2503         hdev->sniff_min_interval = 80;
2504
2505         hdev->le_adv_channel_map = 0x07;
2506         hdev->le_adv_min_interval = 0x0800;
2507         hdev->le_adv_max_interval = 0x0800;
2508 #ifdef TIZEN_BT
2509         hdev->adv_filter_policy = 0x00;
2510         hdev->adv_type = 0x00;
2511 #endif
2512         hdev->le_scan_interval = 0x0060;
2513         hdev->le_scan_window = 0x0030;
2514         hdev->le_scan_int_suspend = 0x0400;
2515         hdev->le_scan_window_suspend = 0x0012;
2516         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2517         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2518         hdev->le_scan_int_adv_monitor = 0x0060;
2519         hdev->le_scan_window_adv_monitor = 0x0030;
2520         hdev->le_scan_int_connect = 0x0060;
2521         hdev->le_scan_window_connect = 0x0060;
2522         hdev->le_conn_min_interval = 0x0018;
2523         hdev->le_conn_max_interval = 0x0028;
2524         hdev->le_conn_latency = 0x0000;
2525         hdev->le_supv_timeout = 0x002a;
2526         hdev->le_def_tx_len = 0x001b;
2527         hdev->le_def_tx_time = 0x0148;
2528         hdev->le_max_tx_len = 0x001b;
2529         hdev->le_max_tx_time = 0x0148;
2530         hdev->le_max_rx_len = 0x001b;
2531         hdev->le_max_rx_time = 0x0148;
2532         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2533         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2534         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2535         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2536         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2537         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2538         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2539         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2540         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2541
2542         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2543         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2544         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2545         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2546         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2547         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2548
2549         /* default 1.28 sec page scan */
2550         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2551         hdev->def_page_scan_int = 0x0800;
2552         hdev->def_page_scan_window = 0x0012;
2553
2554         mutex_init(&hdev->lock);
2555         mutex_init(&hdev->req_lock);
2556
2557         INIT_LIST_HEAD(&hdev->mesh_pending);
2558         INIT_LIST_HEAD(&hdev->mgmt_pending);
2559         INIT_LIST_HEAD(&hdev->reject_list);
2560         INIT_LIST_HEAD(&hdev->accept_list);
2561         INIT_LIST_HEAD(&hdev->uuids);
2562         INIT_LIST_HEAD(&hdev->link_keys);
2563         INIT_LIST_HEAD(&hdev->long_term_keys);
2564         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2565         INIT_LIST_HEAD(&hdev->remote_oob_data);
2566         INIT_LIST_HEAD(&hdev->le_accept_list);
2567         INIT_LIST_HEAD(&hdev->le_resolv_list);
2568         INIT_LIST_HEAD(&hdev->le_conn_params);
2569         INIT_LIST_HEAD(&hdev->pend_le_conns);
2570         INIT_LIST_HEAD(&hdev->pend_le_reports);
2571         INIT_LIST_HEAD(&hdev->conn_hash.list);
2572         INIT_LIST_HEAD(&hdev->adv_instances);
2573         INIT_LIST_HEAD(&hdev->blocked_keys);
2574         INIT_LIST_HEAD(&hdev->monitored_devices);
2575
2576         INIT_LIST_HEAD(&hdev->local_codecs);
2577         INIT_WORK(&hdev->rx_work, hci_rx_work);
2578         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2579         INIT_WORK(&hdev->tx_work, hci_tx_work);
2580         INIT_WORK(&hdev->power_on, hci_power_on);
2581         INIT_WORK(&hdev->error_reset, hci_error_reset);
2582
2583         hci_cmd_sync_init(hdev);
2584
2585         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2586
2587         skb_queue_head_init(&hdev->rx_q);
2588         skb_queue_head_init(&hdev->cmd_q);
2589         skb_queue_head_init(&hdev->raw_q);
2590
2591         init_waitqueue_head(&hdev->req_wait_q);
2592
2593         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2594         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2595
2596         hci_request_setup(hdev);
2597
2598         hci_init_sysfs(hdev);
2599         discovery_init(hdev);
2600
2601         return hdev;
2602 }
2603 EXPORT_SYMBOL(hci_alloc_dev_priv);
2604
2605 /* Free HCI device */
2606 void hci_free_dev(struct hci_dev *hdev)
2607 {
2608         /* will free via device release */
2609         put_device(&hdev->dev);
2610 }
2611 EXPORT_SYMBOL(hci_free_dev);
2612
2613 /* Register HCI device */
2614 int hci_register_dev(struct hci_dev *hdev)
2615 {
2616         int id, error;
2617
2618         if (!hdev->open || !hdev->close || !hdev->send)
2619                 return -EINVAL;
2620
2621         /* Do not allow HCI_AMP devices to register at index 0,
2622          * so the index can be used as the AMP controller ID.
2623          */
2624         switch (hdev->dev_type) {
2625         case HCI_PRIMARY:
2626                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2627                 break;
2628         case HCI_AMP:
2629                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2630                 break;
2631         default:
2632                 return -EINVAL;
2633         }
2634
2635         if (id < 0)
2636                 return id;
2637
2638         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2639         hdev->id = id;
2640
2641         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2642
2643         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2644         if (!hdev->workqueue) {
2645                 error = -ENOMEM;
2646                 goto err;
2647         }
2648
2649         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2650                                                       hdev->name);
2651         if (!hdev->req_workqueue) {
2652                 destroy_workqueue(hdev->workqueue);
2653                 error = -ENOMEM;
2654                 goto err;
2655         }
2656
2657         if (!IS_ERR_OR_NULL(bt_debugfs))
2658                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2659
2660         dev_set_name(&hdev->dev, "%s", hdev->name);
2661
2662         error = device_add(&hdev->dev);
2663         if (error < 0)
2664                 goto err_wqueue;
2665
2666         hci_leds_init(hdev);
2667
2668         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2669                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2670                                     hdev);
2671         if (hdev->rfkill) {
2672                 if (rfkill_register(hdev->rfkill) < 0) {
2673                         rfkill_destroy(hdev->rfkill);
2674                         hdev->rfkill = NULL;
2675                 }
2676         }
2677
2678         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2679                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2680
2681         hci_dev_set_flag(hdev, HCI_SETUP);
2682         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2683
2684         if (hdev->dev_type == HCI_PRIMARY) {
2685                 /* Assume BR/EDR support until proven otherwise (such as
2686                  * through reading supported features during init.
2687                  */
2688                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2689         }
2690
2691         write_lock(&hci_dev_list_lock);
2692         list_add(&hdev->list, &hci_dev_list);
2693         write_unlock(&hci_dev_list_lock);
2694
2695         /* Devices that are marked for raw-only usage are unconfigured
2696          * and should not be included in normal operation.
2697          */
2698         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2699                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2700
2701         /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2702          * callback.
2703          */
2704         if (hdev->wakeup)
2705                 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2706
2707         hci_sock_dev_event(hdev, HCI_DEV_REG);
2708         hci_dev_hold(hdev);
2709
2710         error = hci_register_suspend_notifier(hdev);
2711         if (error)
2712                 BT_WARN("register suspend notifier failed error:%d\n", error);
2713
2714         queue_work(hdev->req_workqueue, &hdev->power_on);
2715
2716         idr_init(&hdev->adv_monitors_idr);
2717         msft_register(hdev);
2718
2719         return id;
2720
2721 err_wqueue:
2722         debugfs_remove_recursive(hdev->debugfs);
2723         destroy_workqueue(hdev->workqueue);
2724         destroy_workqueue(hdev->req_workqueue);
2725 err:
2726         ida_simple_remove(&hci_index_ida, hdev->id);
2727
2728         return error;
2729 }
2730 EXPORT_SYMBOL(hci_register_dev);
2731
2732 /* Unregister HCI device */
2733 void hci_unregister_dev(struct hci_dev *hdev)
2734 {
2735         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2736
2737         mutex_lock(&hdev->unregister_lock);
2738         hci_dev_set_flag(hdev, HCI_UNREGISTER);
2739         mutex_unlock(&hdev->unregister_lock);
2740
2741         write_lock(&hci_dev_list_lock);
2742         list_del(&hdev->list);
2743         write_unlock(&hci_dev_list_lock);
2744
2745         cancel_work_sync(&hdev->power_on);
2746
2747         hci_cmd_sync_clear(hdev);
2748
2749         hci_unregister_suspend_notifier(hdev);
2750
2751         msft_unregister(hdev);
2752
2753         hci_dev_do_close(hdev);
2754
2755         if (!test_bit(HCI_INIT, &hdev->flags) &&
2756             !hci_dev_test_flag(hdev, HCI_SETUP) &&
2757             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2758                 hci_dev_lock(hdev);
2759                 mgmt_index_removed(hdev);
2760                 hci_dev_unlock(hdev);
2761         }
2762
2763         /* mgmt_index_removed should take care of emptying the
2764          * pending list */
2765         BUG_ON(!list_empty(&hdev->mgmt_pending));
2766
2767         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2768
2769         if (hdev->rfkill) {
2770                 rfkill_unregister(hdev->rfkill);
2771                 rfkill_destroy(hdev->rfkill);
2772         }
2773
2774         device_del(&hdev->dev);
2775         /* Actual cleanup is deferred until hci_release_dev(). */
2776         hci_dev_put(hdev);
2777 }
2778 EXPORT_SYMBOL(hci_unregister_dev);
2779
2780 /* Release HCI device */
2781 void hci_release_dev(struct hci_dev *hdev)
2782 {
2783         debugfs_remove_recursive(hdev->debugfs);
2784         kfree_const(hdev->hw_info);
2785         kfree_const(hdev->fw_info);
2786
2787         destroy_workqueue(hdev->workqueue);
2788         destroy_workqueue(hdev->req_workqueue);
2789
2790         hci_dev_lock(hdev);
2791         hci_bdaddr_list_clear(&hdev->reject_list);
2792         hci_bdaddr_list_clear(&hdev->accept_list);
2793         hci_uuids_clear(hdev);
2794         hci_link_keys_clear(hdev);
2795         hci_smp_ltks_clear(hdev);
2796         hci_smp_irks_clear(hdev);
2797         hci_remote_oob_data_clear(hdev);
2798         hci_adv_instances_clear(hdev);
2799         hci_adv_monitors_clear(hdev);
2800         hci_bdaddr_list_clear(&hdev->le_accept_list);
2801         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2802         hci_conn_params_clear_all(hdev);
2803         hci_discovery_filter_clear(hdev);
2804         hci_blocked_keys_clear(hdev);
2805         hci_dev_unlock(hdev);
2806
2807         ida_simple_remove(&hci_index_ida, hdev->id);
2808         kfree_skb(hdev->sent_cmd);
2809         kfree_skb(hdev->recv_event);
2810         kfree(hdev);
2811 }
2812 EXPORT_SYMBOL(hci_release_dev);
2813
2814 int hci_register_suspend_notifier(struct hci_dev *hdev)
2815 {
2816         int ret = 0;
2817
2818         if (!hdev->suspend_notifier.notifier_call &&
2819             !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2820                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2821                 ret = register_pm_notifier(&hdev->suspend_notifier);
2822         }
2823
2824         return ret;
2825 }
2826
2827 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2828 {
2829         int ret = 0;
2830
2831         if (hdev->suspend_notifier.notifier_call) {
2832                 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2833                 if (!ret)
2834                         hdev->suspend_notifier.notifier_call = NULL;
2835         }
2836
2837         return ret;
2838 }
2839
2840 /* Suspend HCI device */
2841 int hci_suspend_dev(struct hci_dev *hdev)
2842 {
2843         int ret;
2844
2845         bt_dev_dbg(hdev, "");
2846
2847         /* Suspend should only act on when powered. */
2848         if (!hdev_is_powered(hdev) ||
2849             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2850                 return 0;
2851
2852         /* If powering down don't attempt to suspend */
2853         if (mgmt_powering_down(hdev))
2854                 return 0;
2855
2856         hci_req_sync_lock(hdev);
2857         ret = hci_suspend_sync(hdev);
2858         hci_req_sync_unlock(hdev);
2859
2860         hci_clear_wake_reason(hdev);
2861         mgmt_suspending(hdev, hdev->suspend_state);
2862
2863         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2864         return ret;
2865 }
2866 EXPORT_SYMBOL(hci_suspend_dev);
2867
2868 /* Resume HCI device */
2869 int hci_resume_dev(struct hci_dev *hdev)
2870 {
2871         int ret;
2872
2873         bt_dev_dbg(hdev, "");
2874
2875         /* Resume should only act on when powered. */
2876         if (!hdev_is_powered(hdev) ||
2877             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2878                 return 0;
2879
2880         /* If powering down don't attempt to resume */
2881         if (mgmt_powering_down(hdev))
2882                 return 0;
2883
2884         hci_req_sync_lock(hdev);
2885         ret = hci_resume_sync(hdev);
2886         hci_req_sync_unlock(hdev);
2887
2888         mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2889                       hdev->wake_addr_type);
2890
2891         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2892         return ret;
2893 }
2894 EXPORT_SYMBOL(hci_resume_dev);
2895
2896 /* Reset HCI device */
2897 int hci_reset_dev(struct hci_dev *hdev)
2898 {
2899         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2900         struct sk_buff *skb;
2901
2902         skb = bt_skb_alloc(3, GFP_ATOMIC);
2903         if (!skb)
2904                 return -ENOMEM;
2905
2906         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2907         skb_put_data(skb, hw_err, 3);
2908
2909         bt_dev_err(hdev, "Injecting HCI hardware error event");
2910
2911         /* Send Hardware Error to upper stack */
2912         return hci_recv_frame(hdev, skb);
2913 }
2914 EXPORT_SYMBOL(hci_reset_dev);
2915
2916 /* Receive frame from HCI drivers */
2917 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2918 {
2919         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2920                       && !test_bit(HCI_INIT, &hdev->flags))) {
2921                 kfree_skb(skb);
2922                 return -ENXIO;
2923         }
2924
2925         switch (hci_skb_pkt_type(skb)) {
2926         case HCI_EVENT_PKT:
2927                 break;
2928         case HCI_ACLDATA_PKT:
2929                 /* Detect if ISO packet has been sent as ACL */
2930                 if (hci_conn_num(hdev, ISO_LINK)) {
2931                         __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2932                         __u8 type;
2933
2934                         type = hci_conn_lookup_type(hdev, hci_handle(handle));
2935                         if (type == ISO_LINK)
2936                                 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2937                 }
2938                 break;
2939         case HCI_SCODATA_PKT:
2940                 break;
2941         case HCI_ISODATA_PKT:
2942                 break;
2943         default:
2944                 kfree_skb(skb);
2945                 return -EINVAL;
2946         }
2947
2948         /* Incoming skb */
2949         bt_cb(skb)->incoming = 1;
2950
2951         /* Time stamp */
2952         __net_timestamp(skb);
2953
2954         skb_queue_tail(&hdev->rx_q, skb);
2955         queue_work(hdev->workqueue, &hdev->rx_work);
2956
2957         return 0;
2958 }
2959 EXPORT_SYMBOL(hci_recv_frame);
2960
2961 /* Receive diagnostic message from HCI drivers */
2962 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2963 {
2964         /* Mark as diagnostic packet */
2965         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2966
2967         /* Time stamp */
2968         __net_timestamp(skb);
2969
2970         skb_queue_tail(&hdev->rx_q, skb);
2971         queue_work(hdev->workqueue, &hdev->rx_work);
2972
2973         return 0;
2974 }
2975 EXPORT_SYMBOL(hci_recv_diag);
2976
2977 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2978 {
2979         va_list vargs;
2980
2981         va_start(vargs, fmt);
2982         kfree_const(hdev->hw_info);
2983         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2984         va_end(vargs);
2985 }
2986 EXPORT_SYMBOL(hci_set_hw_info);
2987
2988 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2989 {
2990         va_list vargs;
2991
2992         va_start(vargs, fmt);
2993         kfree_const(hdev->fw_info);
2994         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2995         va_end(vargs);
2996 }
2997 EXPORT_SYMBOL(hci_set_fw_info);
2998
2999 /* ---- Interface to upper protocols ---- */
3000
3001 int hci_register_cb(struct hci_cb *cb)
3002 {
3003         BT_DBG("%p name %s", cb, cb->name);
3004
3005         mutex_lock(&hci_cb_list_lock);
3006         list_add_tail(&cb->list, &hci_cb_list);
3007         mutex_unlock(&hci_cb_list_lock);
3008
3009         return 0;
3010 }
3011 EXPORT_SYMBOL(hci_register_cb);
3012
3013 int hci_unregister_cb(struct hci_cb *cb)
3014 {
3015         BT_DBG("%p name %s", cb, cb->name);
3016
3017         mutex_lock(&hci_cb_list_lock);
3018         list_del(&cb->list);
3019         mutex_unlock(&hci_cb_list_lock);
3020
3021         return 0;
3022 }
3023 EXPORT_SYMBOL(hci_unregister_cb);
3024
3025 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3026 {
3027         int err;
3028
3029         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3030                skb->len);
3031
3032         /* Time stamp */
3033         __net_timestamp(skb);
3034
3035         /* Send copy to monitor */
3036         hci_send_to_monitor(hdev, skb);
3037
3038         if (atomic_read(&hdev->promisc)) {
3039                 /* Send copy to the sockets */
3040                 hci_send_to_sock(hdev, skb);
3041         }
3042
3043         /* Get rid of skb owner, prior to sending to the driver. */
3044         skb_orphan(skb);
3045
3046         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3047                 kfree_skb(skb);
3048                 return -EINVAL;
3049         }
3050
3051         err = hdev->send(hdev, skb);
3052         if (err < 0) {
3053                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3054                 kfree_skb(skb);
3055                 return err;
3056         }
3057
3058         return 0;
3059 }
3060
3061 /* Send HCI command */
3062 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3063                  const void *param)
3064 {
3065         struct sk_buff *skb;
3066
3067         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3068
3069         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3070         if (!skb) {
3071                 bt_dev_err(hdev, "no memory for command");
3072                 return -ENOMEM;
3073         }
3074
3075         /* Stand-alone HCI commands must be flagged as
3076          * single-command requests.
3077          */
3078         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3079
3080         skb_queue_tail(&hdev->cmd_q, skb);
3081         queue_work(hdev->workqueue, &hdev->cmd_work);
3082
3083         return 0;
3084 }
3085
3086 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3087                    const void *param)
3088 {
3089         struct sk_buff *skb;
3090
3091         if (hci_opcode_ogf(opcode) != 0x3f) {
3092                 /* A controller receiving a command shall respond with either
3093                  * a Command Status Event or a Command Complete Event.
3094                  * Therefore, all standard HCI commands must be sent via the
3095                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3096                  * Some vendors do not comply with this rule for vendor-specific
3097                  * commands and do not return any event. We want to support
3098                  * unresponded commands for such cases only.
3099                  */
3100                 bt_dev_err(hdev, "unresponded command not supported");
3101                 return -EINVAL;
3102         }
3103
3104         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3105         if (!skb) {
3106                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3107                            opcode);
3108                 return -ENOMEM;
3109         }
3110
3111         hci_send_frame(hdev, skb);
3112
3113         return 0;
3114 }
3115 EXPORT_SYMBOL(__hci_cmd_send);
3116
3117 /* Get data from the previously sent command */
3118 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3119 {
3120         struct hci_command_hdr *hdr;
3121
3122         if (!hdev->sent_cmd)
3123                 return NULL;
3124
3125         hdr = (void *) hdev->sent_cmd->data;
3126
3127         if (hdr->opcode != cpu_to_le16(opcode))
3128                 return NULL;
3129
3130         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3131
3132         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3133 }
3134
3135 /* Get data from last received event */
3136 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3137 {
3138         struct hci_event_hdr *hdr;
3139         int offset;
3140
3141         if (!hdev->recv_event)
3142                 return NULL;
3143
3144         hdr = (void *)hdev->recv_event->data;
3145         offset = sizeof(*hdr);
3146
3147         if (hdr->evt != event) {
3148                 /* In case of LE metaevent check the subevent match */
3149                 if (hdr->evt == HCI_EV_LE_META) {
3150                         struct hci_ev_le_meta *ev;
3151
3152                         ev = (void *)hdev->recv_event->data + offset;
3153                         offset += sizeof(*ev);
3154                         if (ev->subevent == event)
3155                                 goto found;
3156                 }
3157                 return NULL;
3158         }
3159
3160 found:
3161         bt_dev_dbg(hdev, "event 0x%2.2x", event);
3162
3163         return hdev->recv_event->data + offset;
3164 }
3165
3166 /* Send ACL data */
3167 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3168 {
3169         struct hci_acl_hdr *hdr;
3170         int len = skb->len;
3171
3172         skb_push(skb, HCI_ACL_HDR_SIZE);
3173         skb_reset_transport_header(skb);
3174         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3175         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3176         hdr->dlen   = cpu_to_le16(len);
3177 }
3178
3179 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3180                           struct sk_buff *skb, __u16 flags)
3181 {
3182         struct hci_conn *conn = chan->conn;
3183         struct hci_dev *hdev = conn->hdev;
3184         struct sk_buff *list;
3185
3186         skb->len = skb_headlen(skb);
3187         skb->data_len = 0;
3188
3189         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3190
3191         switch (hdev->dev_type) {
3192         case HCI_PRIMARY:
3193                 hci_add_acl_hdr(skb, conn->handle, flags);
3194                 break;
3195         case HCI_AMP:
3196                 hci_add_acl_hdr(skb, chan->handle, flags);
3197                 break;
3198         default:
3199                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3200                 return;
3201         }
3202
3203         list = skb_shinfo(skb)->frag_list;
3204         if (!list) {
3205                 /* Non fragmented */
3206                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3207
3208                 skb_queue_tail(queue, skb);
3209         } else {
3210                 /* Fragmented */
3211                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3212
3213                 skb_shinfo(skb)->frag_list = NULL;
3214
3215                 /* Queue all fragments atomically. We need to use spin_lock_bh
3216                  * here because of 6LoWPAN links, as there this function is
3217                  * called from softirq and using normal spin lock could cause
3218                  * deadlocks.
3219                  */
3220                 spin_lock_bh(&queue->lock);
3221
3222                 __skb_queue_tail(queue, skb);
3223
3224                 flags &= ~ACL_START;
3225                 flags |= ACL_CONT;
3226                 do {
3227                         skb = list; list = list->next;
3228
3229                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3230                         hci_add_acl_hdr(skb, conn->handle, flags);
3231
3232                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3233
3234                         __skb_queue_tail(queue, skb);
3235                 } while (list);
3236
3237                 spin_unlock_bh(&queue->lock);
3238         }
3239 }
3240
3241 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3242 {
3243         struct hci_dev *hdev = chan->conn->hdev;
3244
3245         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3246
3247         hci_queue_acl(chan, &chan->data_q, skb, flags);
3248
3249         queue_work(hdev->workqueue, &hdev->tx_work);
3250 }
3251
3252 /* Send SCO data */
3253 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3254 {
3255         struct hci_dev *hdev = conn->hdev;
3256         struct hci_sco_hdr hdr;
3257
3258         BT_DBG("%s len %d", hdev->name, skb->len);
3259
3260         hdr.handle = cpu_to_le16(conn->handle);
3261         hdr.dlen   = skb->len;
3262
3263         skb_push(skb, HCI_SCO_HDR_SIZE);
3264         skb_reset_transport_header(skb);
3265         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3266
3267         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3268
3269         skb_queue_tail(&conn->data_q, skb);
3270         queue_work(hdev->workqueue, &hdev->tx_work);
3271 }
3272
3273 /* Send ISO data */
3274 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3275 {
3276         struct hci_iso_hdr *hdr;
3277         int len = skb->len;
3278
3279         skb_push(skb, HCI_ISO_HDR_SIZE);
3280         skb_reset_transport_header(skb);
3281         hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3282         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3283         hdr->dlen   = cpu_to_le16(len);
3284 }
3285
3286 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3287                           struct sk_buff *skb)
3288 {
3289         struct hci_dev *hdev = conn->hdev;
3290         struct sk_buff *list;
3291         __u16 flags;
3292
3293         skb->len = skb_headlen(skb);
3294         skb->data_len = 0;
3295
3296         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3297
3298         list = skb_shinfo(skb)->frag_list;
3299
3300         flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3301         hci_add_iso_hdr(skb, conn->handle, flags);
3302
3303         if (!list) {
3304                 /* Non fragmented */
3305                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3306
3307                 skb_queue_tail(queue, skb);
3308         } else {
3309                 /* Fragmented */
3310                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3311
3312                 skb_shinfo(skb)->frag_list = NULL;
3313
3314                 __skb_queue_tail(queue, skb);
3315
3316                 do {
3317                         skb = list; list = list->next;
3318
3319                         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3320                         flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3321                                                    0x00);
3322                         hci_add_iso_hdr(skb, conn->handle, flags);
3323
3324                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3325
3326                         __skb_queue_tail(queue, skb);
3327                 } while (list);
3328         }
3329 }
3330
3331 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3332 {
3333         struct hci_dev *hdev = conn->hdev;
3334
3335         BT_DBG("%s len %d", hdev->name, skb->len);
3336
3337         hci_queue_iso(conn, &conn->data_q, skb);
3338
3339         queue_work(hdev->workqueue, &hdev->tx_work);
3340 }
3341
3342 /* ---- HCI TX task (outgoing data) ---- */
3343
3344 /* HCI Connection scheduler */
3345 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3346 {
3347         struct hci_dev *hdev;
3348         int cnt, q;
3349
3350         if (!conn) {
3351                 *quote = 0;
3352                 return;
3353         }
3354
3355         hdev = conn->hdev;
3356
3357         switch (conn->type) {
3358         case ACL_LINK:
3359                 cnt = hdev->acl_cnt;
3360                 break;
3361         case AMP_LINK:
3362                 cnt = hdev->block_cnt;
3363                 break;
3364         case SCO_LINK:
3365         case ESCO_LINK:
3366                 cnt = hdev->sco_cnt;
3367                 break;
3368         case LE_LINK:
3369                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3370                 break;
3371         case ISO_LINK:
3372                 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3373                         hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3374                 break;
3375         default:
3376                 cnt = 0;
3377                 bt_dev_err(hdev, "unknown link type %d", conn->type);
3378         }
3379
3380         q = cnt / num;
3381         *quote = q ? q : 1;
3382 }
3383
3384 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3385                                      int *quote)
3386 {
3387         struct hci_conn_hash *h = &hdev->conn_hash;
3388         struct hci_conn *conn = NULL, *c;
3389         unsigned int num = 0, min = ~0;
3390
3391         /* We don't have to lock device here. Connections are always
3392          * added and removed with TX task disabled. */
3393
3394         rcu_read_lock();
3395
3396         list_for_each_entry_rcu(c, &h->list, list) {
3397                 if (c->type != type || skb_queue_empty(&c->data_q))
3398                         continue;
3399
3400                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3401                         continue;
3402
3403                 num++;
3404
3405                 if (c->sent < min) {
3406                         min  = c->sent;
3407                         conn = c;
3408                 }
3409
3410                 if (hci_conn_num(hdev, type) == num)
3411                         break;
3412         }
3413
3414         rcu_read_unlock();
3415
3416         hci_quote_sent(conn, num, quote);
3417
3418         BT_DBG("conn %p quote %d", conn, *quote);
3419         return conn;
3420 }
3421
3422 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3423 {
3424         struct hci_conn_hash *h = &hdev->conn_hash;
3425         struct hci_conn *c;
3426
3427         bt_dev_err(hdev, "link tx timeout");
3428
3429         rcu_read_lock();
3430
3431         /* Kill stalled connections */
3432         list_for_each_entry_rcu(c, &h->list, list) {
3433                 if (c->type == type && c->sent) {
3434                         bt_dev_err(hdev, "killing stalled connection %pMR",
3435                                    &c->dst);
3436                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3437                 }
3438         }
3439
3440         rcu_read_unlock();
3441 }
3442
3443 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3444                                       int *quote)
3445 {
3446         struct hci_conn_hash *h = &hdev->conn_hash;
3447         struct hci_chan *chan = NULL;
3448         unsigned int num = 0, min = ~0, cur_prio = 0;
3449         struct hci_conn *conn;
3450         int conn_num = 0;
3451
3452         BT_DBG("%s", hdev->name);
3453
3454         rcu_read_lock();
3455
3456         list_for_each_entry_rcu(conn, &h->list, list) {
3457                 struct hci_chan *tmp;
3458
3459                 if (conn->type != type)
3460                         continue;
3461
3462                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3463                         continue;
3464
3465                 conn_num++;
3466
3467                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3468                         struct sk_buff *skb;
3469
3470                         if (skb_queue_empty(&tmp->data_q))
3471                                 continue;
3472
3473                         skb = skb_peek(&tmp->data_q);
3474                         if (skb->priority < cur_prio)
3475                                 continue;
3476
3477                         if (skb->priority > cur_prio) {
3478                                 num = 0;
3479                                 min = ~0;
3480                                 cur_prio = skb->priority;
3481                         }
3482
3483                         num++;
3484
3485                         if (conn->sent < min) {
3486                                 min  = conn->sent;
3487                                 chan = tmp;
3488                         }
3489                 }
3490
3491                 if (hci_conn_num(hdev, type) == conn_num)
3492                         break;
3493         }
3494
3495         rcu_read_unlock();
3496
3497         if (!chan)
3498                 return NULL;
3499
3500         hci_quote_sent(chan->conn, num, quote);
3501
3502         BT_DBG("chan %p quote %d", chan, *quote);
3503         return chan;
3504 }
3505
3506 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3507 {
3508         struct hci_conn_hash *h = &hdev->conn_hash;
3509         struct hci_conn *conn;
3510         int num = 0;
3511
3512         BT_DBG("%s", hdev->name);
3513
3514         rcu_read_lock();
3515
3516         list_for_each_entry_rcu(conn, &h->list, list) {
3517                 struct hci_chan *chan;
3518
3519                 if (conn->type != type)
3520                         continue;
3521
3522                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3523                         continue;
3524
3525                 num++;
3526
3527                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3528                         struct sk_buff *skb;
3529
3530                         if (chan->sent) {
3531                                 chan->sent = 0;
3532                                 continue;
3533                         }
3534
3535                         if (skb_queue_empty(&chan->data_q))
3536                                 continue;
3537
3538                         skb = skb_peek(&chan->data_q);
3539                         if (skb->priority >= HCI_PRIO_MAX - 1)
3540                                 continue;
3541
3542                         skb->priority = HCI_PRIO_MAX - 1;
3543
3544                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3545                                skb->priority);
3546                 }
3547
3548                 if (hci_conn_num(hdev, type) == num)
3549                         break;
3550         }
3551
3552         rcu_read_unlock();
3553
3554 }
3555
3556 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3557 {
3558         /* Calculate count of blocks used by this packet */
3559         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3560 }
3561
3562 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3563 {
3564         unsigned long last_tx;
3565
3566         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3567                 return;
3568
3569         switch (type) {
3570         case LE_LINK:
3571                 last_tx = hdev->le_last_tx;
3572                 break;
3573         default:
3574                 last_tx = hdev->acl_last_tx;
3575                 break;
3576         }
3577
3578         /* tx timeout must be longer than maximum link supervision timeout
3579          * (40.9 seconds)
3580          */
3581         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3582                 hci_link_tx_to(hdev, type);
3583 }
3584
3585 /* Schedule SCO */
3586 static void hci_sched_sco(struct hci_dev *hdev)
3587 {
3588         struct hci_conn *conn;
3589         struct sk_buff *skb;
3590         int quote;
3591
3592         BT_DBG("%s", hdev->name);
3593
3594         if (!hci_conn_num(hdev, SCO_LINK))
3595                 return;
3596
3597         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3598                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3599                         BT_DBG("skb %p len %d", skb, skb->len);
3600                         hci_send_frame(hdev, skb);
3601
3602                         conn->sent++;
3603                         if (conn->sent == ~0)
3604                                 conn->sent = 0;
3605                 }
3606         }
3607 }
3608
3609 static void hci_sched_esco(struct hci_dev *hdev)
3610 {
3611         struct hci_conn *conn;
3612         struct sk_buff *skb;
3613         int quote;
3614
3615         BT_DBG("%s", hdev->name);
3616
3617         if (!hci_conn_num(hdev, ESCO_LINK))
3618                 return;
3619
3620         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3621                                                      &quote))) {
3622                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3623                         BT_DBG("skb %p len %d", skb, skb->len);
3624                         hci_send_frame(hdev, skb);
3625
3626                         conn->sent++;
3627                         if (conn->sent == ~0)
3628                                 conn->sent = 0;
3629                 }
3630         }
3631 }
3632
3633 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3634 {
3635         unsigned int cnt = hdev->acl_cnt;
3636         struct hci_chan *chan;
3637         struct sk_buff *skb;
3638         int quote;
3639
3640         __check_timeout(hdev, cnt, ACL_LINK);
3641
3642         while (hdev->acl_cnt &&
3643                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3644                 u32 priority = (skb_peek(&chan->data_q))->priority;
3645                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3646                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3647                                skb->len, skb->priority);
3648
3649                         /* Stop if priority has changed */
3650                         if (skb->priority < priority)
3651                                 break;
3652
3653                         skb = skb_dequeue(&chan->data_q);
3654
3655                         hci_conn_enter_active_mode(chan->conn,
3656                                                    bt_cb(skb)->force_active);
3657
3658                         hci_send_frame(hdev, skb);
3659                         hdev->acl_last_tx = jiffies;
3660
3661                         hdev->acl_cnt--;
3662                         chan->sent++;
3663                         chan->conn->sent++;
3664
3665                         /* Send pending SCO packets right away */
3666                         hci_sched_sco(hdev);
3667                         hci_sched_esco(hdev);
3668                 }
3669         }
3670
3671         if (cnt != hdev->acl_cnt)
3672                 hci_prio_recalculate(hdev, ACL_LINK);
3673 }
3674
3675 static void hci_sched_acl_blk(struct hci_dev *hdev)
3676 {
3677         unsigned int cnt = hdev->block_cnt;
3678         struct hci_chan *chan;
3679         struct sk_buff *skb;
3680         int quote;
3681         u8 type;
3682
3683         BT_DBG("%s", hdev->name);
3684
3685         if (hdev->dev_type == HCI_AMP)
3686                 type = AMP_LINK;
3687         else
3688                 type = ACL_LINK;
3689
3690         __check_timeout(hdev, cnt, type);
3691
3692         while (hdev->block_cnt > 0 &&
3693                (chan = hci_chan_sent(hdev, type, &quote))) {
3694                 u32 priority = (skb_peek(&chan->data_q))->priority;
3695                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3696                         int blocks;
3697
3698                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3699                                skb->len, skb->priority);
3700
3701                         /* Stop if priority has changed */
3702                         if (skb->priority < priority)
3703                                 break;
3704
3705                         skb = skb_dequeue(&chan->data_q);
3706
3707                         blocks = __get_blocks(hdev, skb);
3708                         if (blocks > hdev->block_cnt)
3709                                 return;
3710
3711                         hci_conn_enter_active_mode(chan->conn,
3712                                                    bt_cb(skb)->force_active);
3713
3714                         hci_send_frame(hdev, skb);
3715                         hdev->acl_last_tx = jiffies;
3716
3717                         hdev->block_cnt -= blocks;
3718                         quote -= blocks;
3719
3720                         chan->sent += blocks;
3721                         chan->conn->sent += blocks;
3722                 }
3723         }
3724
3725         if (cnt != hdev->block_cnt)
3726                 hci_prio_recalculate(hdev, type);
3727 }
3728
3729 static void hci_sched_acl(struct hci_dev *hdev)
3730 {
3731         BT_DBG("%s", hdev->name);
3732
3733         /* No ACL link over BR/EDR controller */
3734         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3735                 return;
3736
3737         /* No AMP link over AMP controller */
3738         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3739                 return;
3740
3741         switch (hdev->flow_ctl_mode) {
3742         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3743                 hci_sched_acl_pkt(hdev);
3744                 break;
3745
3746         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3747                 hci_sched_acl_blk(hdev);
3748                 break;
3749         }
3750 }
3751
3752 static void hci_sched_le(struct hci_dev *hdev)
3753 {
3754         struct hci_chan *chan;
3755         struct sk_buff *skb;
3756         int quote, cnt, tmp;
3757
3758         BT_DBG("%s", hdev->name);
3759
3760         if (!hci_conn_num(hdev, LE_LINK))
3761                 return;
3762
3763         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3764
3765         __check_timeout(hdev, cnt, LE_LINK);
3766
3767         tmp = cnt;
3768         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3769                 u32 priority = (skb_peek(&chan->data_q))->priority;
3770                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3771                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3772                                skb->len, skb->priority);
3773
3774                         /* Stop if priority has changed */
3775                         if (skb->priority < priority)
3776                                 break;
3777
3778                         skb = skb_dequeue(&chan->data_q);
3779
3780                         hci_send_frame(hdev, skb);
3781                         hdev->le_last_tx = jiffies;
3782
3783                         cnt--;
3784                         chan->sent++;
3785                         chan->conn->sent++;
3786
3787                         /* Send pending SCO packets right away */
3788                         hci_sched_sco(hdev);
3789                         hci_sched_esco(hdev);
3790                 }
3791         }
3792
3793         if (hdev->le_pkts)
3794                 hdev->le_cnt = cnt;
3795         else
3796                 hdev->acl_cnt = cnt;
3797
3798         if (cnt != tmp)
3799                 hci_prio_recalculate(hdev, LE_LINK);
3800 }
3801
3802 /* Schedule CIS */
3803 static void hci_sched_iso(struct hci_dev *hdev)
3804 {
3805         struct hci_conn *conn;
3806         struct sk_buff *skb;
3807         int quote, *cnt;
3808
3809         BT_DBG("%s", hdev->name);
3810
3811         if (!hci_conn_num(hdev, ISO_LINK))
3812                 return;
3813
3814         cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3815                 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3816         while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3817                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3818                         BT_DBG("skb %p len %d", skb, skb->len);
3819                         hci_send_frame(hdev, skb);
3820
3821                         conn->sent++;
3822                         if (conn->sent == ~0)
3823                                 conn->sent = 0;
3824                         (*cnt)--;
3825                 }
3826         }
3827 }
3828
3829 static void hci_tx_work(struct work_struct *work)
3830 {
3831         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3832         struct sk_buff *skb;
3833
3834         BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3835                hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3836
3837         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3838                 /* Schedule queues and send stuff to HCI driver */
3839                 hci_sched_sco(hdev);
3840                 hci_sched_esco(hdev);
3841                 hci_sched_iso(hdev);
3842                 hci_sched_acl(hdev);
3843                 hci_sched_le(hdev);
3844         }
3845
3846         /* Send next queued raw (unknown type) packet */
3847         while ((skb = skb_dequeue(&hdev->raw_q)))
3848                 hci_send_frame(hdev, skb);
3849 }
3850
3851 /* ----- HCI RX task (incoming data processing) ----- */
3852
3853 /* ACL data packet */
3854 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3855 {
3856         struct hci_acl_hdr *hdr = (void *) skb->data;
3857         struct hci_conn *conn;
3858         __u16 handle, flags;
3859
3860         skb_pull(skb, HCI_ACL_HDR_SIZE);
3861
3862         handle = __le16_to_cpu(hdr->handle);
3863         flags  = hci_flags(handle);
3864         handle = hci_handle(handle);
3865
3866         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3867                handle, flags);
3868
3869         hdev->stat.acl_rx++;
3870
3871         hci_dev_lock(hdev);
3872         conn = hci_conn_hash_lookup_handle(hdev, handle);
3873         hci_dev_unlock(hdev);
3874
3875         if (conn) {
3876                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3877
3878                 /* Send to upper protocol */
3879                 l2cap_recv_acldata(conn, skb, flags);
3880                 return;
3881         } else {
3882                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3883                            handle);
3884         }
3885
3886         kfree_skb(skb);
3887 }
3888
3889 /* SCO data packet */
3890 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3891 {
3892         struct hci_sco_hdr *hdr = (void *) skb->data;
3893         struct hci_conn *conn;
3894         __u16 handle, flags;
3895
3896         skb_pull(skb, HCI_SCO_HDR_SIZE);
3897
3898         handle = __le16_to_cpu(hdr->handle);
3899         flags  = hci_flags(handle);
3900         handle = hci_handle(handle);
3901
3902         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3903                handle, flags);
3904
3905         hdev->stat.sco_rx++;
3906
3907         hci_dev_lock(hdev);
3908         conn = hci_conn_hash_lookup_handle(hdev, handle);
3909         hci_dev_unlock(hdev);
3910
3911         if (conn) {
3912                 /* Send to upper protocol */
3913                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3914                 sco_recv_scodata(conn, skb);
3915                 return;
3916         } else {
3917                 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3918                                        handle);
3919         }
3920
3921         kfree_skb(skb);
3922 }
3923
3924 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3925 {
3926         struct hci_iso_hdr *hdr;
3927         struct hci_conn *conn;
3928         __u16 handle, flags;
3929
3930         hdr = skb_pull_data(skb, sizeof(*hdr));
3931         if (!hdr) {
3932                 bt_dev_err(hdev, "ISO packet too small");
3933                 goto drop;
3934         }
3935
3936         handle = __le16_to_cpu(hdr->handle);
3937         flags  = hci_flags(handle);
3938         handle = hci_handle(handle);
3939
3940         bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3941                    handle, flags);
3942
3943         hci_dev_lock(hdev);
3944         conn = hci_conn_hash_lookup_handle(hdev, handle);
3945         hci_dev_unlock(hdev);
3946
3947         if (!conn) {
3948                 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3949                            handle);
3950                 goto drop;
3951         }
3952
3953         /* Send to upper protocol */
3954         iso_recv(conn, skb, flags);
3955         return;
3956
3957 drop:
3958         kfree_skb(skb);
3959 }
3960
3961 static bool hci_req_is_complete(struct hci_dev *hdev)
3962 {
3963         struct sk_buff *skb;
3964
3965         skb = skb_peek(&hdev->cmd_q);
3966         if (!skb)
3967                 return true;
3968
3969         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3970 }
3971
3972 static void hci_resend_last(struct hci_dev *hdev)
3973 {
3974         struct hci_command_hdr *sent;
3975         struct sk_buff *skb;
3976         u16 opcode;
3977
3978         if (!hdev->sent_cmd)
3979                 return;
3980
3981         sent = (void *) hdev->sent_cmd->data;
3982         opcode = __le16_to_cpu(sent->opcode);
3983         if (opcode == HCI_OP_RESET)
3984                 return;
3985
3986         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3987         if (!skb)
3988                 return;
3989
3990         skb_queue_head(&hdev->cmd_q, skb);
3991         queue_work(hdev->workqueue, &hdev->cmd_work);
3992 }
3993
3994 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3995                           hci_req_complete_t *req_complete,
3996                           hci_req_complete_skb_t *req_complete_skb)
3997 {
3998         struct sk_buff *skb;
3999         unsigned long flags;
4000
4001         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4002
4003         /* If the completed command doesn't match the last one that was
4004          * sent we need to do special handling of it.
4005          */
4006         if (!hci_sent_cmd_data(hdev, opcode)) {
4007                 /* Some CSR based controllers generate a spontaneous
4008                  * reset complete event during init and any pending
4009                  * command will never be completed. In such a case we
4010                  * need to resend whatever was the last sent
4011                  * command.
4012                  */
4013                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4014                         hci_resend_last(hdev);
4015
4016                 return;
4017         }
4018
4019         /* If we reach this point this event matches the last command sent */
4020         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4021
4022         /* If the command succeeded and there's still more commands in
4023          * this request the request is not yet complete.
4024          */
4025         if (!status && !hci_req_is_complete(hdev))
4026                 return;
4027
4028         /* If this was the last command in a request the complete
4029          * callback would be found in hdev->sent_cmd instead of the
4030          * command queue (hdev->cmd_q).
4031          */
4032         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4033                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4034                 return;
4035         }
4036
4037         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4038                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4039                 return;
4040         }
4041
4042         /* Remove all pending commands belonging to this request */
4043         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4044         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4045                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4046                         __skb_queue_head(&hdev->cmd_q, skb);
4047                         break;
4048                 }
4049
4050                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4051                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4052                 else
4053                         *req_complete = bt_cb(skb)->hci.req_complete;
4054                 dev_kfree_skb_irq(skb);
4055         }
4056         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4057 }
4058
4059 static void hci_rx_work(struct work_struct *work)
4060 {
4061         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4062         struct sk_buff *skb;
4063
4064         BT_DBG("%s", hdev->name);
4065
4066         /* The kcov_remote functions used for collecting packet parsing
4067          * coverage information from this background thread and associate
4068          * the coverage with the syscall's thread which originally injected
4069          * the packet. This helps fuzzing the kernel.
4070          */
4071         for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4072                 kcov_remote_start_common(skb_get_kcov_handle(skb));
4073
4074                 /* Send copy to monitor */
4075                 hci_send_to_monitor(hdev, skb);
4076
4077                 if (atomic_read(&hdev->promisc)) {
4078                         /* Send copy to the sockets */
4079                         hci_send_to_sock(hdev, skb);
4080                 }
4081
4082                 /* If the device has been opened in HCI_USER_CHANNEL,
4083                  * the userspace has exclusive access to device.
4084                  * When device is HCI_INIT, we still need to process
4085                  * the data packets to the driver in order
4086                  * to complete its setup().
4087                  */
4088                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4089                     !test_bit(HCI_INIT, &hdev->flags)) {
4090                         kfree_skb(skb);
4091                         continue;
4092                 }
4093
4094                 if (test_bit(HCI_INIT, &hdev->flags)) {
4095                         /* Don't process data packets in this states. */
4096                         switch (hci_skb_pkt_type(skb)) {
4097                         case HCI_ACLDATA_PKT:
4098                         case HCI_SCODATA_PKT:
4099                         case HCI_ISODATA_PKT:
4100                                 kfree_skb(skb);
4101                                 continue;
4102                         }
4103                 }
4104
4105                 /* Process frame */
4106                 switch (hci_skb_pkt_type(skb)) {
4107                 case HCI_EVENT_PKT:
4108                         BT_DBG("%s Event packet", hdev->name);
4109                         hci_event_packet(hdev, skb);
4110                         break;
4111
4112                 case HCI_ACLDATA_PKT:
4113                         BT_DBG("%s ACL data packet", hdev->name);
4114                         hci_acldata_packet(hdev, skb);
4115                         break;
4116
4117                 case HCI_SCODATA_PKT:
4118                         BT_DBG("%s SCO data packet", hdev->name);
4119                         hci_scodata_packet(hdev, skb);
4120                         break;
4121
4122                 case HCI_ISODATA_PKT:
4123                         BT_DBG("%s ISO data packet", hdev->name);
4124                         hci_isodata_packet(hdev, skb);
4125                         break;
4126
4127                 default:
4128                         kfree_skb(skb);
4129                         break;
4130                 }
4131         }
4132 }
4133
4134 static void hci_cmd_work(struct work_struct *work)
4135 {
4136         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4137         struct sk_buff *skb;
4138
4139         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4140                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4141
4142         /* Send queued commands */
4143         if (atomic_read(&hdev->cmd_cnt)) {
4144                 skb = skb_dequeue(&hdev->cmd_q);
4145                 if (!skb)
4146                         return;
4147
4148                 kfree_skb(hdev->sent_cmd);
4149
4150                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4151                 if (hdev->sent_cmd) {
4152                         int res;
4153                         if (hci_req_status_pend(hdev))
4154                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4155                         atomic_dec(&hdev->cmd_cnt);
4156
4157                         res = hci_send_frame(hdev, skb);
4158                         if (res < 0)
4159                                 __hci_cmd_sync_cancel(hdev, -res);
4160
4161                         rcu_read_lock();
4162                         if (test_bit(HCI_RESET, &hdev->flags) ||
4163                             hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4164                                 cancel_delayed_work(&hdev->cmd_timer);
4165                         else
4166                                 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4167                                                    HCI_CMD_TIMEOUT);
4168                         rcu_read_unlock();
4169                 } else {
4170                         skb_queue_head(&hdev->cmd_q, skb);
4171                         queue_work(hdev->workqueue, &hdev->cmd_work);
4172                 }
4173         }
4174 }