Bluetooth: hci_sync: add lock to protect HCI_UNREGISTER
[platform/kernel/linux-starfive.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
67 {
68         __u8 scan = opt;
69
70         BT_DBG("%s %x", req->hdev->name, scan);
71
72         /* Inquiry and Page scans */
73         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74         return 0;
75 }
76
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
78 {
79         __u8 auth = opt;
80
81         BT_DBG("%s %x", req->hdev->name, auth);
82
83         /* Authentication */
84         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85         return 0;
86 }
87
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89 {
90         __u8 encrypt = opt;
91
92         BT_DBG("%s %x", req->hdev->name, encrypt);
93
94         /* Encryption */
95         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96         return 0;
97 }
98
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 {
101         __le16 policy = cpu_to_le16(opt);
102
103         BT_DBG("%s %x", req->hdev->name, policy);
104
105         /* Default link policy */
106         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107         return 0;
108 }
109
110 /* Get HCI device by index.
111  * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
113 {
114         struct hci_dev *hdev = NULL, *d;
115
116         BT_DBG("%d", index);
117
118         if (index < 0)
119                 return NULL;
120
121         read_lock(&hci_dev_list_lock);
122         list_for_each_entry(d, &hci_dev_list, list) {
123                 if (d->id == index) {
124                         hdev = hci_dev_hold(d);
125                         break;
126                 }
127         }
128         read_unlock(&hci_dev_list_lock);
129         return hdev;
130 }
131
132 /* ---- Inquiry support ---- */
133
134 bool hci_discovery_active(struct hci_dev *hdev)
135 {
136         struct discovery_state *discov = &hdev->discovery;
137
138         switch (discov->state) {
139         case DISCOVERY_FINDING:
140         case DISCOVERY_RESOLVING:
141                 return true;
142
143         default:
144                 return false;
145         }
146 }
147
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 {
150         int old_state = hdev->discovery.state;
151
152         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153
154         if (old_state == state)
155                 return;
156
157         hdev->discovery.state = state;
158
159         switch (state) {
160         case DISCOVERY_STOPPED:
161                 hci_update_passive_scan(hdev);
162
163                 if (old_state != DISCOVERY_STARTING)
164                         mgmt_discovering(hdev, 0);
165                 break;
166         case DISCOVERY_STARTING:
167                 break;
168         case DISCOVERY_FINDING:
169                 mgmt_discovering(hdev, 1);
170                 break;
171         case DISCOVERY_RESOLVING:
172                 break;
173         case DISCOVERY_STOPPING:
174                 break;
175         }
176 }
177
178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
179 {
180         struct discovery_state *cache = &hdev->discovery;
181         struct inquiry_entry *p, *n;
182
183         list_for_each_entry_safe(p, n, &cache->all, all) {
184                 list_del(&p->all);
185                 kfree(p);
186         }
187
188         INIT_LIST_HEAD(&cache->unknown);
189         INIT_LIST_HEAD(&cache->resolve);
190 }
191
192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
193                                                bdaddr_t *bdaddr)
194 {
195         struct discovery_state *cache = &hdev->discovery;
196         struct inquiry_entry *e;
197
198         BT_DBG("cache %p, %pMR", cache, bdaddr);
199
200         list_for_each_entry(e, &cache->all, all) {
201                 if (!bacmp(&e->data.bdaddr, bdaddr))
202                         return e;
203         }
204
205         return NULL;
206 }
207
208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
209                                                        bdaddr_t *bdaddr)
210 {
211         struct discovery_state *cache = &hdev->discovery;
212         struct inquiry_entry *e;
213
214         BT_DBG("cache %p, %pMR", cache, bdaddr);
215
216         list_for_each_entry(e, &cache->unknown, list) {
217                 if (!bacmp(&e->data.bdaddr, bdaddr))
218                         return e;
219         }
220
221         return NULL;
222 }
223
224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
225                                                        bdaddr_t *bdaddr,
226                                                        int state)
227 {
228         struct discovery_state *cache = &hdev->discovery;
229         struct inquiry_entry *e;
230
231         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
232
233         list_for_each_entry(e, &cache->resolve, list) {
234                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
235                         return e;
236                 if (!bacmp(&e->data.bdaddr, bdaddr))
237                         return e;
238         }
239
240         return NULL;
241 }
242
243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244                                       struct inquiry_entry *ie)
245 {
246         struct discovery_state *cache = &hdev->discovery;
247         struct list_head *pos = &cache->resolve;
248         struct inquiry_entry *p;
249
250         list_del(&ie->list);
251
252         list_for_each_entry(p, &cache->resolve, list) {
253                 if (p->name_state != NAME_PENDING &&
254                     abs(p->data.rssi) >= abs(ie->data.rssi))
255                         break;
256                 pos = &p->list;
257         }
258
259         list_add(&ie->list, pos);
260 }
261
262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
263                              bool name_known)
264 {
265         struct discovery_state *cache = &hdev->discovery;
266         struct inquiry_entry *ie;
267         u32 flags = 0;
268
269         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
270
271         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
272
273         if (!data->ssp_mode)
274                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
275
276         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
277         if (ie) {
278                 if (!ie->data.ssp_mode)
279                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
280
281                 if (ie->name_state == NAME_NEEDED &&
282                     data->rssi != ie->data.rssi) {
283                         ie->data.rssi = data->rssi;
284                         hci_inquiry_cache_update_resolve(hdev, ie);
285                 }
286
287                 goto update;
288         }
289
290         /* Entry not in the cache. Add new one. */
291         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
292         if (!ie) {
293                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
294                 goto done;
295         }
296
297         list_add(&ie->all, &cache->all);
298
299         if (name_known) {
300                 ie->name_state = NAME_KNOWN;
301         } else {
302                 ie->name_state = NAME_NOT_KNOWN;
303                 list_add(&ie->list, &cache->unknown);
304         }
305
306 update:
307         if (name_known && ie->name_state != NAME_KNOWN &&
308             ie->name_state != NAME_PENDING) {
309                 ie->name_state = NAME_KNOWN;
310                 list_del(&ie->list);
311         }
312
313         memcpy(&ie->data, data, sizeof(*data));
314         ie->timestamp = jiffies;
315         cache->timestamp = jiffies;
316
317         if (ie->name_state == NAME_NOT_KNOWN)
318                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
319
320 done:
321         return flags;
322 }
323
324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
325 {
326         struct discovery_state *cache = &hdev->discovery;
327         struct inquiry_info *info = (struct inquiry_info *) buf;
328         struct inquiry_entry *e;
329         int copied = 0;
330
331         list_for_each_entry(e, &cache->all, all) {
332                 struct inquiry_data *data = &e->data;
333
334                 if (copied >= num)
335                         break;
336
337                 bacpy(&info->bdaddr, &data->bdaddr);
338                 info->pscan_rep_mode    = data->pscan_rep_mode;
339                 info->pscan_period_mode = data->pscan_period_mode;
340                 info->pscan_mode        = data->pscan_mode;
341                 memcpy(info->dev_class, data->dev_class, 3);
342                 info->clock_offset      = data->clock_offset;
343
344                 info++;
345                 copied++;
346         }
347
348         BT_DBG("cache %p, copied %d", cache, copied);
349         return copied;
350 }
351
352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
353 {
354         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355         struct hci_dev *hdev = req->hdev;
356         struct hci_cp_inquiry cp;
357
358         BT_DBG("%s", hdev->name);
359
360         if (test_bit(HCI_INQUIRY, &hdev->flags))
361                 return 0;
362
363         /* Start Inquiry */
364         memcpy(&cp.lap, &ir->lap, 3);
365         cp.length  = ir->length;
366         cp.num_rsp = ir->num_rsp;
367         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
368
369         return 0;
370 }
371
372 int hci_inquiry(void __user *arg)
373 {
374         __u8 __user *ptr = arg;
375         struct hci_inquiry_req ir;
376         struct hci_dev *hdev;
377         int err = 0, do_inquiry = 0, max_rsp;
378         long timeo;
379         __u8 *buf;
380
381         if (copy_from_user(&ir, ptr, sizeof(ir)))
382                 return -EFAULT;
383
384         hdev = hci_dev_get(ir.dev_id);
385         if (!hdev)
386                 return -ENODEV;
387
388         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
389                 err = -EBUSY;
390                 goto done;
391         }
392
393         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
394                 err = -EOPNOTSUPP;
395                 goto done;
396         }
397
398         if (hdev->dev_type != HCI_PRIMARY) {
399                 err = -EOPNOTSUPP;
400                 goto done;
401         }
402
403         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
404                 err = -EOPNOTSUPP;
405                 goto done;
406         }
407
408         /* Restrict maximum inquiry length to 60 seconds */
409         if (ir.length > 60) {
410                 err = -EINVAL;
411                 goto done;
412         }
413
414         hci_dev_lock(hdev);
415         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417                 hci_inquiry_cache_flush(hdev);
418                 do_inquiry = 1;
419         }
420         hci_dev_unlock(hdev);
421
422         timeo = ir.length * msecs_to_jiffies(2000);
423
424         if (do_inquiry) {
425                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
426                                    timeo, NULL);
427                 if (err < 0)
428                         goto done;
429
430                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431                  * cleared). If it is interrupted by a signal, return -EINTR.
432                  */
433                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434                                 TASK_INTERRUPTIBLE)) {
435                         err = -EINTR;
436                         goto done;
437                 }
438         }
439
440         /* for unlimited number of responses we will use buffer with
441          * 255 entries
442          */
443         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
444
445         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
446          * copy it to the user space.
447          */
448         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
449         if (!buf) {
450                 err = -ENOMEM;
451                 goto done;
452         }
453
454         hci_dev_lock(hdev);
455         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456         hci_dev_unlock(hdev);
457
458         BT_DBG("num_rsp %d", ir.num_rsp);
459
460         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
461                 ptr += sizeof(ir);
462                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
463                                  ir.num_rsp))
464                         err = -EFAULT;
465         } else
466                 err = -EFAULT;
467
468         kfree(buf);
469
470 done:
471         hci_dev_put(hdev);
472         return err;
473 }
474
475 static int hci_dev_do_open(struct hci_dev *hdev)
476 {
477         int ret = 0;
478
479         BT_DBG("%s %p", hdev->name, hdev);
480
481         hci_req_sync_lock(hdev);
482
483         ret = hci_dev_open_sync(hdev);
484
485         hci_req_sync_unlock(hdev);
486         return ret;
487 }
488
489 /* ---- HCI ioctl helpers ---- */
490
491 int hci_dev_open(__u16 dev)
492 {
493         struct hci_dev *hdev;
494         int err;
495
496         hdev = hci_dev_get(dev);
497         if (!hdev)
498                 return -ENODEV;
499
500         /* Devices that are marked as unconfigured can only be powered
501          * up as user channel. Trying to bring them up as normal devices
502          * will result into a failure. Only user channel operation is
503          * possible.
504          *
505          * When this function is called for a user channel, the flag
506          * HCI_USER_CHANNEL will be set first before attempting to
507          * open the device.
508          */
509         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
511                 err = -EOPNOTSUPP;
512                 goto done;
513         }
514
515         /* We need to ensure that no other power on/off work is pending
516          * before proceeding to call hci_dev_do_open. This is
517          * particularly important if the setup procedure has not yet
518          * completed.
519          */
520         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521                 cancel_delayed_work(&hdev->power_off);
522
523         /* After this call it is guaranteed that the setup procedure
524          * has finished. This means that error conditions like RFKILL
525          * or no valid public or static random address apply.
526          */
527         flush_workqueue(hdev->req_workqueue);
528
529         /* For controllers not using the management interface and that
530          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531          * so that pairing works for them. Once the management interface
532          * is in use this bit will be cleared again and userspace has
533          * to explicitly enable it.
534          */
535         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536             !hci_dev_test_flag(hdev, HCI_MGMT))
537                 hci_dev_set_flag(hdev, HCI_BONDABLE);
538
539         err = hci_dev_do_open(hdev);
540
541 done:
542         hci_dev_put(hdev);
543         return err;
544 }
545
546 int hci_dev_do_close(struct hci_dev *hdev)
547 {
548         int err;
549
550         BT_DBG("%s %p", hdev->name, hdev);
551
552         hci_req_sync_lock(hdev);
553
554         err = hci_dev_close_sync(hdev);
555
556         hci_req_sync_unlock(hdev);
557
558         return err;
559 }
560
561 int hci_dev_close(__u16 dev)
562 {
563         struct hci_dev *hdev;
564         int err;
565
566         hdev = hci_dev_get(dev);
567         if (!hdev)
568                 return -ENODEV;
569
570         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
571                 err = -EBUSY;
572                 goto done;
573         }
574
575         cancel_work_sync(&hdev->power_on);
576         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577                 cancel_delayed_work(&hdev->power_off);
578
579         err = hci_dev_do_close(hdev);
580
581 done:
582         hci_dev_put(hdev);
583         return err;
584 }
585
586 static int hci_dev_do_reset(struct hci_dev *hdev)
587 {
588         int ret;
589
590         BT_DBG("%s %p", hdev->name, hdev);
591
592         hci_req_sync_lock(hdev);
593
594         /* Drop queues */
595         skb_queue_purge(&hdev->rx_q);
596         skb_queue_purge(&hdev->cmd_q);
597
598         /* Cancel these to avoid queueing non-chained pending work */
599         hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
600         /* Wait for
601          *
602          *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
603          *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
604          *
605          * inside RCU section to see the flag or complete scheduling.
606          */
607         synchronize_rcu();
608         /* Explicitly cancel works in case scheduled after setting the flag. */
609         cancel_delayed_work(&hdev->cmd_timer);
610         cancel_delayed_work(&hdev->ncmd_timer);
611
612         /* Avoid potential lockdep warnings from the *_flush() calls by
613          * ensuring the workqueue is empty up front.
614          */
615         drain_workqueue(hdev->workqueue);
616
617         hci_dev_lock(hdev);
618         hci_inquiry_cache_flush(hdev);
619         hci_conn_hash_flush(hdev);
620         hci_dev_unlock(hdev);
621
622         if (hdev->flush)
623                 hdev->flush(hdev);
624
625         hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
626
627         atomic_set(&hdev->cmd_cnt, 1);
628         hdev->acl_cnt = 0;
629         hdev->sco_cnt = 0;
630         hdev->le_cnt = 0;
631         hdev->iso_cnt = 0;
632
633         ret = hci_reset_sync(hdev);
634
635         hci_req_sync_unlock(hdev);
636         return ret;
637 }
638
639 int hci_dev_reset(__u16 dev)
640 {
641         struct hci_dev *hdev;
642         int err;
643
644         hdev = hci_dev_get(dev);
645         if (!hdev)
646                 return -ENODEV;
647
648         if (!test_bit(HCI_UP, &hdev->flags)) {
649                 err = -ENETDOWN;
650                 goto done;
651         }
652
653         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
654                 err = -EBUSY;
655                 goto done;
656         }
657
658         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
659                 err = -EOPNOTSUPP;
660                 goto done;
661         }
662
663         err = hci_dev_do_reset(hdev);
664
665 done:
666         hci_dev_put(hdev);
667         return err;
668 }
669
670 int hci_dev_reset_stat(__u16 dev)
671 {
672         struct hci_dev *hdev;
673         int ret = 0;
674
675         hdev = hci_dev_get(dev);
676         if (!hdev)
677                 return -ENODEV;
678
679         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
680                 ret = -EBUSY;
681                 goto done;
682         }
683
684         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
685                 ret = -EOPNOTSUPP;
686                 goto done;
687         }
688
689         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
690
691 done:
692         hci_dev_put(hdev);
693         return ret;
694 }
695
696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
697 {
698         bool conn_changed, discov_changed;
699
700         BT_DBG("%s scan 0x%02x", hdev->name, scan);
701
702         if ((scan & SCAN_PAGE))
703                 conn_changed = !hci_dev_test_and_set_flag(hdev,
704                                                           HCI_CONNECTABLE);
705         else
706                 conn_changed = hci_dev_test_and_clear_flag(hdev,
707                                                            HCI_CONNECTABLE);
708
709         if ((scan & SCAN_INQUIRY)) {
710                 discov_changed = !hci_dev_test_and_set_flag(hdev,
711                                                             HCI_DISCOVERABLE);
712         } else {
713                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
714                 discov_changed = hci_dev_test_and_clear_flag(hdev,
715                                                              HCI_DISCOVERABLE);
716         }
717
718         if (!hci_dev_test_flag(hdev, HCI_MGMT))
719                 return;
720
721         if (conn_changed || discov_changed) {
722                 /* In case this was disabled through mgmt */
723                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
724
725                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
726                         hci_update_adv_data(hdev, hdev->cur_adv_instance);
727
728                 mgmt_new_settings(hdev);
729         }
730 }
731
732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
733 {
734         struct hci_dev *hdev;
735         struct hci_dev_req dr;
736         int err = 0;
737
738         if (copy_from_user(&dr, arg, sizeof(dr)))
739                 return -EFAULT;
740
741         hdev = hci_dev_get(dr.dev_id);
742         if (!hdev)
743                 return -ENODEV;
744
745         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
746                 err = -EBUSY;
747                 goto done;
748         }
749
750         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
751                 err = -EOPNOTSUPP;
752                 goto done;
753         }
754
755         if (hdev->dev_type != HCI_PRIMARY) {
756                 err = -EOPNOTSUPP;
757                 goto done;
758         }
759
760         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
761                 err = -EOPNOTSUPP;
762                 goto done;
763         }
764
765         switch (cmd) {
766         case HCISETAUTH:
767                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768                                    HCI_INIT_TIMEOUT, NULL);
769                 break;
770
771         case HCISETENCRYPT:
772                 if (!lmp_encrypt_capable(hdev)) {
773                         err = -EOPNOTSUPP;
774                         break;
775                 }
776
777                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
778                         /* Auth must be enabled first */
779                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
780                                            HCI_INIT_TIMEOUT, NULL);
781                         if (err)
782                                 break;
783                 }
784
785                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
786                                    HCI_INIT_TIMEOUT, NULL);
787                 break;
788
789         case HCISETSCAN:
790                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
791                                    HCI_INIT_TIMEOUT, NULL);
792
793                 /* Ensure that the connectable and discoverable states
794                  * get correctly modified as this was a non-mgmt change.
795                  */
796                 if (!err)
797                         hci_update_passive_scan_state(hdev, dr.dev_opt);
798                 break;
799
800         case HCISETLINKPOL:
801                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
802                                    HCI_INIT_TIMEOUT, NULL);
803                 break;
804
805         case HCISETLINKMODE:
806                 hdev->link_mode = ((__u16) dr.dev_opt) &
807                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
808                 break;
809
810         case HCISETPTYPE:
811                 if (hdev->pkt_type == (__u16) dr.dev_opt)
812                         break;
813
814                 hdev->pkt_type = (__u16) dr.dev_opt;
815                 mgmt_phy_configuration_changed(hdev, NULL);
816                 break;
817
818         case HCISETACLMTU:
819                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
820                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
821                 break;
822
823         case HCISETSCOMTU:
824                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
825                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
826                 break;
827
828         default:
829                 err = -EINVAL;
830                 break;
831         }
832
833 done:
834         hci_dev_put(hdev);
835         return err;
836 }
837
838 int hci_get_dev_list(void __user *arg)
839 {
840         struct hci_dev *hdev;
841         struct hci_dev_list_req *dl;
842         struct hci_dev_req *dr;
843         int n = 0, size, err;
844         __u16 dev_num;
845
846         if (get_user(dev_num, (__u16 __user *) arg))
847                 return -EFAULT;
848
849         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
850                 return -EINVAL;
851
852         size = sizeof(*dl) + dev_num * sizeof(*dr);
853
854         dl = kzalloc(size, GFP_KERNEL);
855         if (!dl)
856                 return -ENOMEM;
857
858         dr = dl->dev_req;
859
860         read_lock(&hci_dev_list_lock);
861         list_for_each_entry(hdev, &hci_dev_list, list) {
862                 unsigned long flags = hdev->flags;
863
864                 /* When the auto-off is configured it means the transport
865                  * is running, but in that case still indicate that the
866                  * device is actually down.
867                  */
868                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
869                         flags &= ~BIT(HCI_UP);
870
871                 (dr + n)->dev_id  = hdev->id;
872                 (dr + n)->dev_opt = flags;
873
874                 if (++n >= dev_num)
875                         break;
876         }
877         read_unlock(&hci_dev_list_lock);
878
879         dl->dev_num = n;
880         size = sizeof(*dl) + n * sizeof(*dr);
881
882         err = copy_to_user(arg, dl, size);
883         kfree(dl);
884
885         return err ? -EFAULT : 0;
886 }
887
888 int hci_get_dev_info(void __user *arg)
889 {
890         struct hci_dev *hdev;
891         struct hci_dev_info di;
892         unsigned long flags;
893         int err = 0;
894
895         if (copy_from_user(&di, arg, sizeof(di)))
896                 return -EFAULT;
897
898         hdev = hci_dev_get(di.dev_id);
899         if (!hdev)
900                 return -ENODEV;
901
902         /* When the auto-off is configured it means the transport
903          * is running, but in that case still indicate that the
904          * device is actually down.
905          */
906         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
907                 flags = hdev->flags & ~BIT(HCI_UP);
908         else
909                 flags = hdev->flags;
910
911         strcpy(di.name, hdev->name);
912         di.bdaddr   = hdev->bdaddr;
913         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
914         di.flags    = flags;
915         di.pkt_type = hdev->pkt_type;
916         if (lmp_bredr_capable(hdev)) {
917                 di.acl_mtu  = hdev->acl_mtu;
918                 di.acl_pkts = hdev->acl_pkts;
919                 di.sco_mtu  = hdev->sco_mtu;
920                 di.sco_pkts = hdev->sco_pkts;
921         } else {
922                 di.acl_mtu  = hdev->le_mtu;
923                 di.acl_pkts = hdev->le_pkts;
924                 di.sco_mtu  = 0;
925                 di.sco_pkts = 0;
926         }
927         di.link_policy = hdev->link_policy;
928         di.link_mode   = hdev->link_mode;
929
930         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
931         memcpy(&di.features, &hdev->features, sizeof(di.features));
932
933         if (copy_to_user(arg, &di, sizeof(di)))
934                 err = -EFAULT;
935
936         hci_dev_put(hdev);
937
938         return err;
939 }
940
941 /* ---- Interface to HCI drivers ---- */
942
943 static int hci_rfkill_set_block(void *data, bool blocked)
944 {
945         struct hci_dev *hdev = data;
946
947         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
948
949         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
950                 return -EBUSY;
951
952         if (blocked) {
953                 hci_dev_set_flag(hdev, HCI_RFKILLED);
954                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
955                     !hci_dev_test_flag(hdev, HCI_CONFIG))
956                         hci_dev_do_close(hdev);
957         } else {
958                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
959         }
960
961         return 0;
962 }
963
964 static const struct rfkill_ops hci_rfkill_ops = {
965         .set_block = hci_rfkill_set_block,
966 };
967
968 static void hci_power_on(struct work_struct *work)
969 {
970         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
971         int err;
972
973         BT_DBG("%s", hdev->name);
974
975         if (test_bit(HCI_UP, &hdev->flags) &&
976             hci_dev_test_flag(hdev, HCI_MGMT) &&
977             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
978                 cancel_delayed_work(&hdev->power_off);
979                 err = hci_powered_update_sync(hdev);
980                 mgmt_power_on(hdev, err);
981                 return;
982         }
983
984         err = hci_dev_do_open(hdev);
985         if (err < 0) {
986                 hci_dev_lock(hdev);
987                 mgmt_set_powered_failed(hdev, err);
988                 hci_dev_unlock(hdev);
989                 return;
990         }
991
992         /* During the HCI setup phase, a few error conditions are
993          * ignored and they need to be checked now. If they are still
994          * valid, it is important to turn the device back off.
995          */
996         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
997             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
998             (hdev->dev_type == HCI_PRIMARY &&
999              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1000              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1001                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1002                 hci_dev_do_close(hdev);
1003         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1004                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1005                                    HCI_AUTO_OFF_TIMEOUT);
1006         }
1007
1008         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1009                 /* For unconfigured devices, set the HCI_RAW flag
1010                  * so that userspace can easily identify them.
1011                  */
1012                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1013                         set_bit(HCI_RAW, &hdev->flags);
1014
1015                 /* For fully configured devices, this will send
1016                  * the Index Added event. For unconfigured devices,
1017                  * it will send Unconfigued Index Added event.
1018                  *
1019                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1020                  * and no event will be send.
1021                  */
1022                 mgmt_index_added(hdev);
1023         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1024                 /* When the controller is now configured, then it
1025                  * is important to clear the HCI_RAW flag.
1026                  */
1027                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1028                         clear_bit(HCI_RAW, &hdev->flags);
1029
1030                 /* Powering on the controller with HCI_CONFIG set only
1031                  * happens with the transition from unconfigured to
1032                  * configured. This will send the Index Added event.
1033                  */
1034                 mgmt_index_added(hdev);
1035         }
1036 }
1037
1038 static void hci_power_off(struct work_struct *work)
1039 {
1040         struct hci_dev *hdev = container_of(work, struct hci_dev,
1041                                             power_off.work);
1042
1043         BT_DBG("%s", hdev->name);
1044
1045         hci_dev_do_close(hdev);
1046 }
1047
1048 static void hci_error_reset(struct work_struct *work)
1049 {
1050         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1051
1052         BT_DBG("%s", hdev->name);
1053
1054         if (hdev->hw_error)
1055                 hdev->hw_error(hdev, hdev->hw_error_code);
1056         else
1057                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1058
1059         if (hci_dev_do_close(hdev))
1060                 return;
1061
1062         hci_dev_do_open(hdev);
1063 }
1064
1065 void hci_uuids_clear(struct hci_dev *hdev)
1066 {
1067         struct bt_uuid *uuid, *tmp;
1068
1069         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1070                 list_del(&uuid->list);
1071                 kfree(uuid);
1072         }
1073 }
1074
1075 void hci_link_keys_clear(struct hci_dev *hdev)
1076 {
1077         struct link_key *key;
1078
1079         list_for_each_entry(key, &hdev->link_keys, list) {
1080                 list_del_rcu(&key->list);
1081                 kfree_rcu(key, rcu);
1082         }
1083 }
1084
1085 void hci_smp_ltks_clear(struct hci_dev *hdev)
1086 {
1087         struct smp_ltk *k;
1088
1089         list_for_each_entry(k, &hdev->long_term_keys, list) {
1090                 list_del_rcu(&k->list);
1091                 kfree_rcu(k, rcu);
1092         }
1093 }
1094
1095 void hci_smp_irks_clear(struct hci_dev *hdev)
1096 {
1097         struct smp_irk *k;
1098
1099         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1100                 list_del_rcu(&k->list);
1101                 kfree_rcu(k, rcu);
1102         }
1103 }
1104
1105 void hci_blocked_keys_clear(struct hci_dev *hdev)
1106 {
1107         struct blocked_key *b;
1108
1109         list_for_each_entry(b, &hdev->blocked_keys, list) {
1110                 list_del_rcu(&b->list);
1111                 kfree_rcu(b, rcu);
1112         }
1113 }
1114
1115 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1116 {
1117         bool blocked = false;
1118         struct blocked_key *b;
1119
1120         rcu_read_lock();
1121         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1122                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1123                         blocked = true;
1124                         break;
1125                 }
1126         }
1127
1128         rcu_read_unlock();
1129         return blocked;
1130 }
1131
1132 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1133 {
1134         struct link_key *k;
1135
1136         rcu_read_lock();
1137         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1138                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1139                         rcu_read_unlock();
1140
1141                         if (hci_is_blocked_key(hdev,
1142                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
1143                                                k->val)) {
1144                                 bt_dev_warn_ratelimited(hdev,
1145                                                         "Link key blocked for %pMR",
1146                                                         &k->bdaddr);
1147                                 return NULL;
1148                         }
1149
1150                         return k;
1151                 }
1152         }
1153         rcu_read_unlock();
1154
1155         return NULL;
1156 }
1157
1158 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1159                                u8 key_type, u8 old_key_type)
1160 {
1161         /* Legacy key */
1162         if (key_type < 0x03)
1163                 return true;
1164
1165         /* Debug keys are insecure so don't store them persistently */
1166         if (key_type == HCI_LK_DEBUG_COMBINATION)
1167                 return false;
1168
1169         /* Changed combination key and there's no previous one */
1170         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1171                 return false;
1172
1173         /* Security mode 3 case */
1174         if (!conn)
1175                 return true;
1176
1177         /* BR/EDR key derived using SC from an LE link */
1178         if (conn->type == LE_LINK)
1179                 return true;
1180
1181         /* Neither local nor remote side had no-bonding as requirement */
1182         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1183                 return true;
1184
1185         /* Local side had dedicated bonding as requirement */
1186         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1187                 return true;
1188
1189         /* Remote side had dedicated bonding as requirement */
1190         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1191                 return true;
1192
1193         /* If none of the above criteria match, then don't store the key
1194          * persistently */
1195         return false;
1196 }
1197
1198 static u8 ltk_role(u8 type)
1199 {
1200         if (type == SMP_LTK)
1201                 return HCI_ROLE_MASTER;
1202
1203         return HCI_ROLE_SLAVE;
1204 }
1205
1206 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1207                              u8 addr_type, u8 role)
1208 {
1209         struct smp_ltk *k;
1210
1211         rcu_read_lock();
1212         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1213                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1214                         continue;
1215
1216                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1217                         rcu_read_unlock();
1218
1219                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1220                                                k->val)) {
1221                                 bt_dev_warn_ratelimited(hdev,
1222                                                         "LTK blocked for %pMR",
1223                                                         &k->bdaddr);
1224                                 return NULL;
1225                         }
1226
1227                         return k;
1228                 }
1229         }
1230         rcu_read_unlock();
1231
1232         return NULL;
1233 }
1234
1235 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1236 {
1237         struct smp_irk *irk_to_return = NULL;
1238         struct smp_irk *irk;
1239
1240         rcu_read_lock();
1241         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1242                 if (!bacmp(&irk->rpa, rpa)) {
1243                         irk_to_return = irk;
1244                         goto done;
1245                 }
1246         }
1247
1248         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1249                 if (smp_irk_matches(hdev, irk->val, rpa)) {
1250                         bacpy(&irk->rpa, rpa);
1251                         irk_to_return = irk;
1252                         goto done;
1253                 }
1254         }
1255
1256 done:
1257         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1258                                                 irk_to_return->val)) {
1259                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1260                                         &irk_to_return->bdaddr);
1261                 irk_to_return = NULL;
1262         }
1263
1264         rcu_read_unlock();
1265
1266         return irk_to_return;
1267 }
1268
1269 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1270                                      u8 addr_type)
1271 {
1272         struct smp_irk *irk_to_return = NULL;
1273         struct smp_irk *irk;
1274
1275         /* Identity Address must be public or static random */
1276         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1277                 return NULL;
1278
1279         rcu_read_lock();
1280         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1281                 if (addr_type == irk->addr_type &&
1282                     bacmp(bdaddr, &irk->bdaddr) == 0) {
1283                         irk_to_return = irk;
1284                         goto done;
1285                 }
1286         }
1287
1288 done:
1289
1290         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1291                                                 irk_to_return->val)) {
1292                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1293                                         &irk_to_return->bdaddr);
1294                 irk_to_return = NULL;
1295         }
1296
1297         rcu_read_unlock();
1298
1299         return irk_to_return;
1300 }
1301
1302 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1303                                   bdaddr_t *bdaddr, u8 *val, u8 type,
1304                                   u8 pin_len, bool *persistent)
1305 {
1306         struct link_key *key, *old_key;
1307         u8 old_key_type;
1308
1309         old_key = hci_find_link_key(hdev, bdaddr);
1310         if (old_key) {
1311                 old_key_type = old_key->type;
1312                 key = old_key;
1313         } else {
1314                 old_key_type = conn ? conn->key_type : 0xff;
1315                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1316                 if (!key)
1317                         return NULL;
1318                 list_add_rcu(&key->list, &hdev->link_keys);
1319         }
1320
1321         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1322
1323         /* Some buggy controller combinations generate a changed
1324          * combination key for legacy pairing even when there's no
1325          * previous key */
1326         if (type == HCI_LK_CHANGED_COMBINATION &&
1327             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1328                 type = HCI_LK_COMBINATION;
1329                 if (conn)
1330                         conn->key_type = type;
1331         }
1332
1333         bacpy(&key->bdaddr, bdaddr);
1334         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1335         key->pin_len = pin_len;
1336
1337         if (type == HCI_LK_CHANGED_COMBINATION)
1338                 key->type = old_key_type;
1339         else
1340                 key->type = type;
1341
1342         if (persistent)
1343                 *persistent = hci_persistent_key(hdev, conn, type,
1344                                                  old_key_type);
1345
1346         return key;
1347 }
1348
1349 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1350                             u8 addr_type, u8 type, u8 authenticated,
1351                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1352 {
1353         struct smp_ltk *key, *old_key;
1354         u8 role = ltk_role(type);
1355
1356         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1357         if (old_key)
1358                 key = old_key;
1359         else {
1360                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1361                 if (!key)
1362                         return NULL;
1363                 list_add_rcu(&key->list, &hdev->long_term_keys);
1364         }
1365
1366         bacpy(&key->bdaddr, bdaddr);
1367         key->bdaddr_type = addr_type;
1368         memcpy(key->val, tk, sizeof(key->val));
1369         key->authenticated = authenticated;
1370         key->ediv = ediv;
1371         key->rand = rand;
1372         key->enc_size = enc_size;
1373         key->type = type;
1374
1375         return key;
1376 }
1377
1378 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1379                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
1380 {
1381         struct smp_irk *irk;
1382
1383         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1384         if (!irk) {
1385                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1386                 if (!irk)
1387                         return NULL;
1388
1389                 bacpy(&irk->bdaddr, bdaddr);
1390                 irk->addr_type = addr_type;
1391
1392                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1393         }
1394
1395         memcpy(irk->val, val, 16);
1396         bacpy(&irk->rpa, rpa);
1397
1398         return irk;
1399 }
1400
1401 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1402 {
1403         struct link_key *key;
1404
1405         key = hci_find_link_key(hdev, bdaddr);
1406         if (!key)
1407                 return -ENOENT;
1408
1409         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1410
1411         list_del_rcu(&key->list);
1412         kfree_rcu(key, rcu);
1413
1414         return 0;
1415 }
1416
1417 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1418 {
1419         struct smp_ltk *k, *tmp;
1420         int removed = 0;
1421
1422         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1423                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1424                         continue;
1425
1426                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1427
1428                 list_del_rcu(&k->list);
1429                 kfree_rcu(k, rcu);
1430                 removed++;
1431         }
1432
1433         return removed ? 0 : -ENOENT;
1434 }
1435
1436 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1437 {
1438         struct smp_irk *k, *tmp;
1439
1440         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1441                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1442                         continue;
1443
1444                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1445
1446                 list_del_rcu(&k->list);
1447                 kfree_rcu(k, rcu);
1448         }
1449 }
1450
1451 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1452 {
1453         struct smp_ltk *k;
1454         struct smp_irk *irk;
1455         u8 addr_type;
1456
1457         if (type == BDADDR_BREDR) {
1458                 if (hci_find_link_key(hdev, bdaddr))
1459                         return true;
1460                 return false;
1461         }
1462
1463         /* Convert to HCI addr type which struct smp_ltk uses */
1464         if (type == BDADDR_LE_PUBLIC)
1465                 addr_type = ADDR_LE_DEV_PUBLIC;
1466         else
1467                 addr_type = ADDR_LE_DEV_RANDOM;
1468
1469         irk = hci_get_irk(hdev, bdaddr, addr_type);
1470         if (irk) {
1471                 bdaddr = &irk->bdaddr;
1472                 addr_type = irk->addr_type;
1473         }
1474
1475         rcu_read_lock();
1476         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1477                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1478                         rcu_read_unlock();
1479                         return true;
1480                 }
1481         }
1482         rcu_read_unlock();
1483
1484         return false;
1485 }
1486
1487 /* HCI command timer function */
1488 static void hci_cmd_timeout(struct work_struct *work)
1489 {
1490         struct hci_dev *hdev = container_of(work, struct hci_dev,
1491                                             cmd_timer.work);
1492
1493         if (hdev->sent_cmd) {
1494                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1495                 u16 opcode = __le16_to_cpu(sent->opcode);
1496
1497                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1498         } else {
1499                 bt_dev_err(hdev, "command tx timeout");
1500         }
1501
1502         if (hdev->cmd_timeout)
1503                 hdev->cmd_timeout(hdev);
1504
1505         atomic_set(&hdev->cmd_cnt, 1);
1506         queue_work(hdev->workqueue, &hdev->cmd_work);
1507 }
1508
1509 /* HCI ncmd timer function */
1510 static void hci_ncmd_timeout(struct work_struct *work)
1511 {
1512         struct hci_dev *hdev = container_of(work, struct hci_dev,
1513                                             ncmd_timer.work);
1514
1515         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1516
1517         /* During HCI_INIT phase no events can be injected if the ncmd timer
1518          * triggers since the procedure has its own timeout handling.
1519          */
1520         if (test_bit(HCI_INIT, &hdev->flags))
1521                 return;
1522
1523         /* This is an irrecoverable state, inject hardware error event */
1524         hci_reset_dev(hdev);
1525 }
1526
1527 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1528                                           bdaddr_t *bdaddr, u8 bdaddr_type)
1529 {
1530         struct oob_data *data;
1531
1532         list_for_each_entry(data, &hdev->remote_oob_data, list) {
1533                 if (bacmp(bdaddr, &data->bdaddr) != 0)
1534                         continue;
1535                 if (data->bdaddr_type != bdaddr_type)
1536                         continue;
1537                 return data;
1538         }
1539
1540         return NULL;
1541 }
1542
1543 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1544                                u8 bdaddr_type)
1545 {
1546         struct oob_data *data;
1547
1548         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1549         if (!data)
1550                 return -ENOENT;
1551
1552         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1553
1554         list_del(&data->list);
1555         kfree(data);
1556
1557         return 0;
1558 }
1559
1560 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1561 {
1562         struct oob_data *data, *n;
1563
1564         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1565                 list_del(&data->list);
1566                 kfree(data);
1567         }
1568 }
1569
1570 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1571                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
1572                             u8 *hash256, u8 *rand256)
1573 {
1574         struct oob_data *data;
1575
1576         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1577         if (!data) {
1578                 data = kmalloc(sizeof(*data), GFP_KERNEL);
1579                 if (!data)
1580                         return -ENOMEM;
1581
1582                 bacpy(&data->bdaddr, bdaddr);
1583                 data->bdaddr_type = bdaddr_type;
1584                 list_add(&data->list, &hdev->remote_oob_data);
1585         }
1586
1587         if (hash192 && rand192) {
1588                 memcpy(data->hash192, hash192, sizeof(data->hash192));
1589                 memcpy(data->rand192, rand192, sizeof(data->rand192));
1590                 if (hash256 && rand256)
1591                         data->present = 0x03;
1592         } else {
1593                 memset(data->hash192, 0, sizeof(data->hash192));
1594                 memset(data->rand192, 0, sizeof(data->rand192));
1595                 if (hash256 && rand256)
1596                         data->present = 0x02;
1597                 else
1598                         data->present = 0x00;
1599         }
1600
1601         if (hash256 && rand256) {
1602                 memcpy(data->hash256, hash256, sizeof(data->hash256));
1603                 memcpy(data->rand256, rand256, sizeof(data->rand256));
1604         } else {
1605                 memset(data->hash256, 0, sizeof(data->hash256));
1606                 memset(data->rand256, 0, sizeof(data->rand256));
1607                 if (hash192 && rand192)
1608                         data->present = 0x01;
1609         }
1610
1611         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1612
1613         return 0;
1614 }
1615
1616 /* This function requires the caller holds hdev->lock */
1617 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1618 {
1619         struct adv_info *adv_instance;
1620
1621         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1622                 if (adv_instance->instance == instance)
1623                         return adv_instance;
1624         }
1625
1626         return NULL;
1627 }
1628
1629 /* This function requires the caller holds hdev->lock */
1630 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1631 {
1632         struct adv_info *cur_instance;
1633
1634         cur_instance = hci_find_adv_instance(hdev, instance);
1635         if (!cur_instance)
1636                 return NULL;
1637
1638         if (cur_instance == list_last_entry(&hdev->adv_instances,
1639                                             struct adv_info, list))
1640                 return list_first_entry(&hdev->adv_instances,
1641                                                  struct adv_info, list);
1642         else
1643                 return list_next_entry(cur_instance, list);
1644 }
1645
1646 /* This function requires the caller holds hdev->lock */
1647 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1648 {
1649         struct adv_info *adv_instance;
1650
1651         adv_instance = hci_find_adv_instance(hdev, instance);
1652         if (!adv_instance)
1653                 return -ENOENT;
1654
1655         BT_DBG("%s removing %dMR", hdev->name, instance);
1656
1657         if (hdev->cur_adv_instance == instance) {
1658                 if (hdev->adv_instance_timeout) {
1659                         cancel_delayed_work(&hdev->adv_instance_expire);
1660                         hdev->adv_instance_timeout = 0;
1661                 }
1662                 hdev->cur_adv_instance = 0x00;
1663         }
1664
1665         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1666
1667         list_del(&adv_instance->list);
1668         kfree(adv_instance);
1669
1670         hdev->adv_instance_cnt--;
1671
1672         return 0;
1673 }
1674
1675 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1676 {
1677         struct adv_info *adv_instance, *n;
1678
1679         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1680                 adv_instance->rpa_expired = rpa_expired;
1681 }
1682
1683 /* This function requires the caller holds hdev->lock */
1684 void hci_adv_instances_clear(struct hci_dev *hdev)
1685 {
1686         struct adv_info *adv_instance, *n;
1687
1688         if (hdev->adv_instance_timeout) {
1689                 cancel_delayed_work(&hdev->adv_instance_expire);
1690                 hdev->adv_instance_timeout = 0;
1691         }
1692
1693         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1694                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1695                 list_del(&adv_instance->list);
1696                 kfree(adv_instance);
1697         }
1698
1699         hdev->adv_instance_cnt = 0;
1700         hdev->cur_adv_instance = 0x00;
1701 }
1702
1703 static void adv_instance_rpa_expired(struct work_struct *work)
1704 {
1705         struct adv_info *adv_instance = container_of(work, struct adv_info,
1706                                                      rpa_expired_cb.work);
1707
1708         BT_DBG("");
1709
1710         adv_instance->rpa_expired = true;
1711 }
1712
1713 /* This function requires the caller holds hdev->lock */
1714 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1715                                       u32 flags, u16 adv_data_len, u8 *adv_data,
1716                                       u16 scan_rsp_len, u8 *scan_rsp_data,
1717                                       u16 timeout, u16 duration, s8 tx_power,
1718                                       u32 min_interval, u32 max_interval,
1719                                       u8 mesh_handle)
1720 {
1721         struct adv_info *adv;
1722
1723         adv = hci_find_adv_instance(hdev, instance);
1724         if (adv) {
1725                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1726                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1727                 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1728         } else {
1729                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1730                     instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1731                         return ERR_PTR(-EOVERFLOW);
1732
1733                 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1734                 if (!adv)
1735                         return ERR_PTR(-ENOMEM);
1736
1737                 adv->pending = true;
1738                 adv->instance = instance;
1739                 list_add(&adv->list, &hdev->adv_instances);
1740                 hdev->adv_instance_cnt++;
1741         }
1742
1743         adv->flags = flags;
1744         adv->min_interval = min_interval;
1745         adv->max_interval = max_interval;
1746         adv->tx_power = tx_power;
1747         /* Defining a mesh_handle changes the timing units to ms,
1748          * rather than seconds, and ties the instance to the requested
1749          * mesh_tx queue.
1750          */
1751         adv->mesh = mesh_handle;
1752
1753         hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1754                                   scan_rsp_len, scan_rsp_data);
1755
1756         adv->timeout = timeout;
1757         adv->remaining_time = timeout;
1758
1759         if (duration == 0)
1760                 adv->duration = hdev->def_multi_adv_rotation_duration;
1761         else
1762                 adv->duration = duration;
1763
1764         INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1765
1766         BT_DBG("%s for %dMR", hdev->name, instance);
1767
1768         return adv;
1769 }
1770
1771 /* This function requires the caller holds hdev->lock */
1772 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1773                                       u32 flags, u8 data_len, u8 *data,
1774                                       u32 min_interval, u32 max_interval)
1775 {
1776         struct adv_info *adv;
1777
1778         adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1779                                    0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1780                                    min_interval, max_interval, 0);
1781         if (IS_ERR(adv))
1782                 return adv;
1783
1784         adv->periodic = true;
1785         adv->per_adv_data_len = data_len;
1786
1787         if (data)
1788                 memcpy(adv->per_adv_data, data, data_len);
1789
1790         return adv;
1791 }
1792
1793 /* This function requires the caller holds hdev->lock */
1794 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1795                               u16 adv_data_len, u8 *adv_data,
1796                               u16 scan_rsp_len, u8 *scan_rsp_data)
1797 {
1798         struct adv_info *adv;
1799
1800         adv = hci_find_adv_instance(hdev, instance);
1801
1802         /* If advertisement doesn't exist, we can't modify its data */
1803         if (!adv)
1804                 return -ENOENT;
1805
1806         if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1807                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1808                 memcpy(adv->adv_data, adv_data, adv_data_len);
1809                 adv->adv_data_len = adv_data_len;
1810                 adv->adv_data_changed = true;
1811         }
1812
1813         if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1814                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1815                 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1816                 adv->scan_rsp_len = scan_rsp_len;
1817                 adv->scan_rsp_changed = true;
1818         }
1819
1820         /* Mark as changed if there are flags which would affect it */
1821         if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1822             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1823                 adv->scan_rsp_changed = true;
1824
1825         return 0;
1826 }
1827
1828 /* This function requires the caller holds hdev->lock */
1829 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1830 {
1831         u32 flags;
1832         struct adv_info *adv;
1833
1834         if (instance == 0x00) {
1835                 /* Instance 0 always manages the "Tx Power" and "Flags"
1836                  * fields
1837                  */
1838                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1839
1840                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1841                  * corresponds to the "connectable" instance flag.
1842                  */
1843                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1844                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1845
1846                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1847                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1848                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1849                         flags |= MGMT_ADV_FLAG_DISCOV;
1850
1851                 return flags;
1852         }
1853
1854         adv = hci_find_adv_instance(hdev, instance);
1855
1856         /* Return 0 when we got an invalid instance identifier. */
1857         if (!adv)
1858                 return 0;
1859
1860         return adv->flags;
1861 }
1862
1863 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1864 {
1865         struct adv_info *adv;
1866
1867         /* Instance 0x00 always set local name */
1868         if (instance == 0x00)
1869                 return true;
1870
1871         adv = hci_find_adv_instance(hdev, instance);
1872         if (!adv)
1873                 return false;
1874
1875         if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1876             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1877                 return true;
1878
1879         return adv->scan_rsp_len ? true : false;
1880 }
1881
1882 /* This function requires the caller holds hdev->lock */
1883 void hci_adv_monitors_clear(struct hci_dev *hdev)
1884 {
1885         struct adv_monitor *monitor;
1886         int handle;
1887
1888         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1889                 hci_free_adv_monitor(hdev, monitor);
1890
1891         idr_destroy(&hdev->adv_monitors_idr);
1892 }
1893
1894 /* Frees the monitor structure and do some bookkeepings.
1895  * This function requires the caller holds hdev->lock.
1896  */
1897 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1898 {
1899         struct adv_pattern *pattern;
1900         struct adv_pattern *tmp;
1901
1902         if (!monitor)
1903                 return;
1904
1905         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1906                 list_del(&pattern->list);
1907                 kfree(pattern);
1908         }
1909
1910         if (monitor->handle)
1911                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1912
1913         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1914                 hdev->adv_monitors_cnt--;
1915                 mgmt_adv_monitor_removed(hdev, monitor->handle);
1916         }
1917
1918         kfree(monitor);
1919 }
1920
1921 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1922  * also attempts to forward the request to the controller.
1923  * This function requires the caller holds hci_req_sync_lock.
1924  */
1925 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1926 {
1927         int min, max, handle;
1928         int status = 0;
1929
1930         if (!monitor)
1931                 return -EINVAL;
1932
1933         hci_dev_lock(hdev);
1934
1935         min = HCI_MIN_ADV_MONITOR_HANDLE;
1936         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1937         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1938                            GFP_KERNEL);
1939
1940         hci_dev_unlock(hdev);
1941
1942         if (handle < 0)
1943                 return handle;
1944
1945         monitor->handle = handle;
1946
1947         if (!hdev_is_powered(hdev))
1948                 return status;
1949
1950         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1951         case HCI_ADV_MONITOR_EXT_NONE:
1952                 bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name,
1953                            monitor->handle, status);
1954                 /* Message was not forwarded to controller - not an error */
1955                 break;
1956
1957         case HCI_ADV_MONITOR_EXT_MSFT:
1958                 status = msft_add_monitor_pattern(hdev, monitor);
1959                 bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name,
1960                            monitor->handle, status);
1961                 break;
1962         }
1963
1964         return status;
1965 }
1966
1967 /* Attempts to tell the controller and free the monitor. If somehow the
1968  * controller doesn't have a corresponding handle, remove anyway.
1969  * This function requires the caller holds hci_req_sync_lock.
1970  */
1971 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1972                                   struct adv_monitor *monitor)
1973 {
1974         int status = 0;
1975
1976         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1977         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1978                 bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name,
1979                            monitor->handle, status);
1980                 goto free_monitor;
1981
1982         case HCI_ADV_MONITOR_EXT_MSFT:
1983                 status = msft_remove_monitor(hdev, monitor);
1984                 bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
1985                            hdev->name, monitor->handle, status);
1986                 break;
1987         }
1988
1989         /* In case no matching handle registered, just free the monitor */
1990         if (status == -ENOENT)
1991                 goto free_monitor;
1992
1993         return status;
1994
1995 free_monitor:
1996         if (status == -ENOENT)
1997                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1998                             monitor->handle);
1999         hci_free_adv_monitor(hdev, monitor);
2000
2001         return status;
2002 }
2003
2004 /* This function requires the caller holds hci_req_sync_lock */
2005 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2006 {
2007         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2008
2009         if (!monitor)
2010                 return -EINVAL;
2011
2012         return hci_remove_adv_monitor(hdev, monitor);
2013 }
2014
2015 /* This function requires the caller holds hci_req_sync_lock */
2016 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2017 {
2018         struct adv_monitor *monitor;
2019         int idr_next_id = 0;
2020         int status = 0;
2021
2022         while (1) {
2023                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2024                 if (!monitor)
2025                         break;
2026
2027                 status = hci_remove_adv_monitor(hdev, monitor);
2028                 if (status)
2029                         return status;
2030
2031                 idr_next_id++;
2032         }
2033
2034         return status;
2035 }
2036
2037 /* This function requires the caller holds hdev->lock */
2038 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2039 {
2040         return !idr_is_empty(&hdev->adv_monitors_idr);
2041 }
2042
2043 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2044 {
2045         if (msft_monitor_supported(hdev))
2046                 return HCI_ADV_MONITOR_EXT_MSFT;
2047
2048         return HCI_ADV_MONITOR_EXT_NONE;
2049 }
2050
2051 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2052                                          bdaddr_t *bdaddr, u8 type)
2053 {
2054         struct bdaddr_list *b;
2055
2056         list_for_each_entry(b, bdaddr_list, list) {
2057                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2058                         return b;
2059         }
2060
2061         return NULL;
2062 }
2063
2064 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2065                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2066                                 u8 type)
2067 {
2068         struct bdaddr_list_with_irk *b;
2069
2070         list_for_each_entry(b, bdaddr_list, list) {
2071                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2072                         return b;
2073         }
2074
2075         return NULL;
2076 }
2077
2078 struct bdaddr_list_with_flags *
2079 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2080                                   bdaddr_t *bdaddr, u8 type)
2081 {
2082         struct bdaddr_list_with_flags *b;
2083
2084         list_for_each_entry(b, bdaddr_list, list) {
2085                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2086                         return b;
2087         }
2088
2089         return NULL;
2090 }
2091
2092 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2093 {
2094         struct bdaddr_list *b, *n;
2095
2096         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2097                 list_del(&b->list);
2098                 kfree(b);
2099         }
2100 }
2101
2102 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2103 {
2104         struct bdaddr_list *entry;
2105
2106         if (!bacmp(bdaddr, BDADDR_ANY))
2107                 return -EBADF;
2108
2109         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2110                 return -EEXIST;
2111
2112         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2113         if (!entry)
2114                 return -ENOMEM;
2115
2116         bacpy(&entry->bdaddr, bdaddr);
2117         entry->bdaddr_type = type;
2118
2119         list_add(&entry->list, list);
2120
2121         return 0;
2122 }
2123
2124 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2125                                         u8 type, u8 *peer_irk, u8 *local_irk)
2126 {
2127         struct bdaddr_list_with_irk *entry;
2128
2129         if (!bacmp(bdaddr, BDADDR_ANY))
2130                 return -EBADF;
2131
2132         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2133                 return -EEXIST;
2134
2135         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2136         if (!entry)
2137                 return -ENOMEM;
2138
2139         bacpy(&entry->bdaddr, bdaddr);
2140         entry->bdaddr_type = type;
2141
2142         if (peer_irk)
2143                 memcpy(entry->peer_irk, peer_irk, 16);
2144
2145         if (local_irk)
2146                 memcpy(entry->local_irk, local_irk, 16);
2147
2148         list_add(&entry->list, list);
2149
2150         return 0;
2151 }
2152
2153 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2154                                    u8 type, u32 flags)
2155 {
2156         struct bdaddr_list_with_flags *entry;
2157
2158         if (!bacmp(bdaddr, BDADDR_ANY))
2159                 return -EBADF;
2160
2161         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2162                 return -EEXIST;
2163
2164         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2165         if (!entry)
2166                 return -ENOMEM;
2167
2168         bacpy(&entry->bdaddr, bdaddr);
2169         entry->bdaddr_type = type;
2170         entry->flags = flags;
2171
2172         list_add(&entry->list, list);
2173
2174         return 0;
2175 }
2176
2177 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2178 {
2179         struct bdaddr_list *entry;
2180
2181         if (!bacmp(bdaddr, BDADDR_ANY)) {
2182                 hci_bdaddr_list_clear(list);
2183                 return 0;
2184         }
2185
2186         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2187         if (!entry)
2188                 return -ENOENT;
2189
2190         list_del(&entry->list);
2191         kfree(entry);
2192
2193         return 0;
2194 }
2195
2196 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2197                                                         u8 type)
2198 {
2199         struct bdaddr_list_with_irk *entry;
2200
2201         if (!bacmp(bdaddr, BDADDR_ANY)) {
2202                 hci_bdaddr_list_clear(list);
2203                 return 0;
2204         }
2205
2206         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2207         if (!entry)
2208                 return -ENOENT;
2209
2210         list_del(&entry->list);
2211         kfree(entry);
2212
2213         return 0;
2214 }
2215
2216 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2217                                    u8 type)
2218 {
2219         struct bdaddr_list_with_flags *entry;
2220
2221         if (!bacmp(bdaddr, BDADDR_ANY)) {
2222                 hci_bdaddr_list_clear(list);
2223                 return 0;
2224         }
2225
2226         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2227         if (!entry)
2228                 return -ENOENT;
2229
2230         list_del(&entry->list);
2231         kfree(entry);
2232
2233         return 0;
2234 }
2235
2236 /* This function requires the caller holds hdev->lock */
2237 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2238                                                bdaddr_t *addr, u8 addr_type)
2239 {
2240         struct hci_conn_params *params;
2241
2242         list_for_each_entry(params, &hdev->le_conn_params, list) {
2243                 if (bacmp(&params->addr, addr) == 0 &&
2244                     params->addr_type == addr_type) {
2245                         return params;
2246                 }
2247         }
2248
2249         return NULL;
2250 }
2251
2252 /* This function requires the caller holds hdev->lock */
2253 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2254                                                   bdaddr_t *addr, u8 addr_type)
2255 {
2256         struct hci_conn_params *param;
2257
2258         list_for_each_entry(param, list, action) {
2259                 if (bacmp(&param->addr, addr) == 0 &&
2260                     param->addr_type == addr_type)
2261                         return param;
2262         }
2263
2264         return NULL;
2265 }
2266
2267 /* This function requires the caller holds hdev->lock */
2268 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2269                                             bdaddr_t *addr, u8 addr_type)
2270 {
2271         struct hci_conn_params *params;
2272
2273         params = hci_conn_params_lookup(hdev, addr, addr_type);
2274         if (params)
2275                 return params;
2276
2277         params = kzalloc(sizeof(*params), GFP_KERNEL);
2278         if (!params) {
2279                 bt_dev_err(hdev, "out of memory");
2280                 return NULL;
2281         }
2282
2283         bacpy(&params->addr, addr);
2284         params->addr_type = addr_type;
2285
2286         list_add(&params->list, &hdev->le_conn_params);
2287         INIT_LIST_HEAD(&params->action);
2288
2289         params->conn_min_interval = hdev->le_conn_min_interval;
2290         params->conn_max_interval = hdev->le_conn_max_interval;
2291         params->conn_latency = hdev->le_conn_latency;
2292         params->supervision_timeout = hdev->le_supv_timeout;
2293         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2294
2295         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2296
2297         return params;
2298 }
2299
2300 static void hci_conn_params_free(struct hci_conn_params *params)
2301 {
2302         if (params->conn) {
2303                 hci_conn_drop(params->conn);
2304                 hci_conn_put(params->conn);
2305         }
2306
2307         list_del(&params->action);
2308         list_del(&params->list);
2309         kfree(params);
2310 }
2311
2312 /* This function requires the caller holds hdev->lock */
2313 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2314 {
2315         struct hci_conn_params *params;
2316
2317         params = hci_conn_params_lookup(hdev, addr, addr_type);
2318         if (!params)
2319                 return;
2320
2321         hci_conn_params_free(params);
2322
2323         hci_update_passive_scan(hdev);
2324
2325         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2326 }
2327
2328 /* This function requires the caller holds hdev->lock */
2329 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2330 {
2331         struct hci_conn_params *params, *tmp;
2332
2333         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2334                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2335                         continue;
2336
2337                 /* If trying to establish one time connection to disabled
2338                  * device, leave the params, but mark them as just once.
2339                  */
2340                 if (params->explicit_connect) {
2341                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2342                         continue;
2343                 }
2344
2345                 list_del(&params->list);
2346                 kfree(params);
2347         }
2348
2349         BT_DBG("All LE disabled connection parameters were removed");
2350 }
2351
2352 /* This function requires the caller holds hdev->lock */
2353 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2354 {
2355         struct hci_conn_params *params, *tmp;
2356
2357         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2358                 hci_conn_params_free(params);
2359
2360         BT_DBG("All LE connection parameters were removed");
2361 }
2362
2363 /* Copy the Identity Address of the controller.
2364  *
2365  * If the controller has a public BD_ADDR, then by default use that one.
2366  * If this is a LE only controller without a public address, default to
2367  * the static random address.
2368  *
2369  * For debugging purposes it is possible to force controllers with a
2370  * public address to use the static random address instead.
2371  *
2372  * In case BR/EDR has been disabled on a dual-mode controller and
2373  * userspace has configured a static address, then that address
2374  * becomes the identity address instead of the public BR/EDR address.
2375  */
2376 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2377                                u8 *bdaddr_type)
2378 {
2379         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2380             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2381             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2382              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2383                 bacpy(bdaddr, &hdev->static_addr);
2384                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2385         } else {
2386                 bacpy(bdaddr, &hdev->bdaddr);
2387                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2388         }
2389 }
2390
2391 static void hci_clear_wake_reason(struct hci_dev *hdev)
2392 {
2393         hci_dev_lock(hdev);
2394
2395         hdev->wake_reason = 0;
2396         bacpy(&hdev->wake_addr, BDADDR_ANY);
2397         hdev->wake_addr_type = 0;
2398
2399         hci_dev_unlock(hdev);
2400 }
2401
2402 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2403                                 void *data)
2404 {
2405         struct hci_dev *hdev =
2406                 container_of(nb, struct hci_dev, suspend_notifier);
2407         int ret = 0;
2408
2409         /* Userspace has full control of this device. Do nothing. */
2410         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2411                 return NOTIFY_DONE;
2412
2413         if (action == PM_SUSPEND_PREPARE)
2414                 ret = hci_suspend_dev(hdev);
2415         else if (action == PM_POST_SUSPEND)
2416                 ret = hci_resume_dev(hdev);
2417
2418         if (ret)
2419                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2420                            action, ret);
2421
2422         return NOTIFY_DONE;
2423 }
2424
2425 /* Alloc HCI device */
2426 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2427 {
2428         struct hci_dev *hdev;
2429         unsigned int alloc_size;
2430
2431         alloc_size = sizeof(*hdev);
2432         if (sizeof_priv) {
2433                 /* Fixme: May need ALIGN-ment? */
2434                 alloc_size += sizeof_priv;
2435         }
2436
2437         hdev = kzalloc(alloc_size, GFP_KERNEL);
2438         if (!hdev)
2439                 return NULL;
2440
2441         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2442         hdev->esco_type = (ESCO_HV1);
2443         hdev->link_mode = (HCI_LM_ACCEPT);
2444         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2445         hdev->io_capability = 0x03;     /* No Input No Output */
2446         hdev->manufacturer = 0xffff;    /* Default to internal use */
2447         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2448         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2449         hdev->adv_instance_cnt = 0;
2450         hdev->cur_adv_instance = 0x00;
2451         hdev->adv_instance_timeout = 0;
2452
2453         hdev->advmon_allowlist_duration = 300;
2454         hdev->advmon_no_filter_duration = 500;
2455         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
2456
2457         hdev->sniff_max_interval = 800;
2458         hdev->sniff_min_interval = 80;
2459
2460         hdev->le_adv_channel_map = 0x07;
2461         hdev->le_adv_min_interval = 0x0800;
2462         hdev->le_adv_max_interval = 0x0800;
2463         hdev->le_scan_interval = 0x0060;
2464         hdev->le_scan_window = 0x0030;
2465         hdev->le_scan_int_suspend = 0x0400;
2466         hdev->le_scan_window_suspend = 0x0012;
2467         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2468         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2469         hdev->le_scan_int_adv_monitor = 0x0060;
2470         hdev->le_scan_window_adv_monitor = 0x0030;
2471         hdev->le_scan_int_connect = 0x0060;
2472         hdev->le_scan_window_connect = 0x0060;
2473         hdev->le_conn_min_interval = 0x0018;
2474         hdev->le_conn_max_interval = 0x0028;
2475         hdev->le_conn_latency = 0x0000;
2476         hdev->le_supv_timeout = 0x002a;
2477         hdev->le_def_tx_len = 0x001b;
2478         hdev->le_def_tx_time = 0x0148;
2479         hdev->le_max_tx_len = 0x001b;
2480         hdev->le_max_tx_time = 0x0148;
2481         hdev->le_max_rx_len = 0x001b;
2482         hdev->le_max_rx_time = 0x0148;
2483         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2484         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2485         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2486         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2487         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2488         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2489         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2490         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2491         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2492
2493         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2494         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2495         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2496         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2497         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2498         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2499
2500         /* default 1.28 sec page scan */
2501         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2502         hdev->def_page_scan_int = 0x0800;
2503         hdev->def_page_scan_window = 0x0012;
2504
2505         mutex_init(&hdev->lock);
2506         mutex_init(&hdev->req_lock);
2507
2508         INIT_LIST_HEAD(&hdev->mesh_pending);
2509         INIT_LIST_HEAD(&hdev->mgmt_pending);
2510         INIT_LIST_HEAD(&hdev->reject_list);
2511         INIT_LIST_HEAD(&hdev->accept_list);
2512         INIT_LIST_HEAD(&hdev->uuids);
2513         INIT_LIST_HEAD(&hdev->link_keys);
2514         INIT_LIST_HEAD(&hdev->long_term_keys);
2515         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2516         INIT_LIST_HEAD(&hdev->remote_oob_data);
2517         INIT_LIST_HEAD(&hdev->le_accept_list);
2518         INIT_LIST_HEAD(&hdev->le_resolv_list);
2519         INIT_LIST_HEAD(&hdev->le_conn_params);
2520         INIT_LIST_HEAD(&hdev->pend_le_conns);
2521         INIT_LIST_HEAD(&hdev->pend_le_reports);
2522         INIT_LIST_HEAD(&hdev->conn_hash.list);
2523         INIT_LIST_HEAD(&hdev->adv_instances);
2524         INIT_LIST_HEAD(&hdev->blocked_keys);
2525         INIT_LIST_HEAD(&hdev->monitored_devices);
2526
2527         INIT_LIST_HEAD(&hdev->local_codecs);
2528         INIT_WORK(&hdev->rx_work, hci_rx_work);
2529         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2530         INIT_WORK(&hdev->tx_work, hci_tx_work);
2531         INIT_WORK(&hdev->power_on, hci_power_on);
2532         INIT_WORK(&hdev->error_reset, hci_error_reset);
2533
2534         hci_cmd_sync_init(hdev);
2535
2536         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2537
2538         skb_queue_head_init(&hdev->rx_q);
2539         skb_queue_head_init(&hdev->cmd_q);
2540         skb_queue_head_init(&hdev->raw_q);
2541
2542         init_waitqueue_head(&hdev->req_wait_q);
2543
2544         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2545         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2546
2547         hci_devcd_setup(hdev);
2548         hci_request_setup(hdev);
2549
2550         hci_init_sysfs(hdev);
2551         discovery_init(hdev);
2552
2553         return hdev;
2554 }
2555 EXPORT_SYMBOL(hci_alloc_dev_priv);
2556
2557 /* Free HCI device */
2558 void hci_free_dev(struct hci_dev *hdev)
2559 {
2560         /* will free via device release */
2561         put_device(&hdev->dev);
2562 }
2563 EXPORT_SYMBOL(hci_free_dev);
2564
2565 /* Register HCI device */
2566 int hci_register_dev(struct hci_dev *hdev)
2567 {
2568         int id, error;
2569
2570         if (!hdev->open || !hdev->close || !hdev->send)
2571                 return -EINVAL;
2572
2573         /* Do not allow HCI_AMP devices to register at index 0,
2574          * so the index can be used as the AMP controller ID.
2575          */
2576         switch (hdev->dev_type) {
2577         case HCI_PRIMARY:
2578                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2579                 break;
2580         case HCI_AMP:
2581                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2582                 break;
2583         default:
2584                 return -EINVAL;
2585         }
2586
2587         if (id < 0)
2588                 return id;
2589
2590         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2591         hdev->id = id;
2592
2593         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2594
2595         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2596         if (!hdev->workqueue) {
2597                 error = -ENOMEM;
2598                 goto err;
2599         }
2600
2601         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2602                                                       hdev->name);
2603         if (!hdev->req_workqueue) {
2604                 destroy_workqueue(hdev->workqueue);
2605                 error = -ENOMEM;
2606                 goto err;
2607         }
2608
2609         if (!IS_ERR_OR_NULL(bt_debugfs))
2610                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2611
2612         dev_set_name(&hdev->dev, "%s", hdev->name);
2613
2614         error = device_add(&hdev->dev);
2615         if (error < 0)
2616                 goto err_wqueue;
2617
2618         hci_leds_init(hdev);
2619
2620         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2621                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2622                                     hdev);
2623         if (hdev->rfkill) {
2624                 if (rfkill_register(hdev->rfkill) < 0) {
2625                         rfkill_destroy(hdev->rfkill);
2626                         hdev->rfkill = NULL;
2627                 }
2628         }
2629
2630         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2631                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2632
2633         hci_dev_set_flag(hdev, HCI_SETUP);
2634         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2635
2636         if (hdev->dev_type == HCI_PRIMARY) {
2637                 /* Assume BR/EDR support until proven otherwise (such as
2638                  * through reading supported features during init.
2639                  */
2640                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2641         }
2642
2643         write_lock(&hci_dev_list_lock);
2644         list_add(&hdev->list, &hci_dev_list);
2645         write_unlock(&hci_dev_list_lock);
2646
2647         /* Devices that are marked for raw-only usage are unconfigured
2648          * and should not be included in normal operation.
2649          */
2650         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2651                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2652
2653         /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2654          * callback.
2655          */
2656         if (hdev->wakeup)
2657                 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2658
2659         hci_sock_dev_event(hdev, HCI_DEV_REG);
2660         hci_dev_hold(hdev);
2661
2662         error = hci_register_suspend_notifier(hdev);
2663         if (error)
2664                 BT_WARN("register suspend notifier failed error:%d\n", error);
2665
2666         queue_work(hdev->req_workqueue, &hdev->power_on);
2667
2668         idr_init(&hdev->adv_monitors_idr);
2669         msft_register(hdev);
2670
2671         return id;
2672
2673 err_wqueue:
2674         debugfs_remove_recursive(hdev->debugfs);
2675         destroy_workqueue(hdev->workqueue);
2676         destroy_workqueue(hdev->req_workqueue);
2677 err:
2678         ida_simple_remove(&hci_index_ida, hdev->id);
2679
2680         return error;
2681 }
2682 EXPORT_SYMBOL(hci_register_dev);
2683
2684 /* Unregister HCI device */
2685 void hci_unregister_dev(struct hci_dev *hdev)
2686 {
2687         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2688
2689         mutex_lock(&hdev->unregister_lock);
2690         hci_dev_set_flag(hdev, HCI_UNREGISTER);
2691         mutex_unlock(&hdev->unregister_lock);
2692
2693         write_lock(&hci_dev_list_lock);
2694         list_del(&hdev->list);
2695         write_unlock(&hci_dev_list_lock);
2696
2697         cancel_work_sync(&hdev->power_on);
2698
2699         hci_cmd_sync_clear(hdev);
2700
2701         hci_unregister_suspend_notifier(hdev);
2702
2703         msft_unregister(hdev);
2704
2705         hci_dev_do_close(hdev);
2706
2707         if (!test_bit(HCI_INIT, &hdev->flags) &&
2708             !hci_dev_test_flag(hdev, HCI_SETUP) &&
2709             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2710                 hci_dev_lock(hdev);
2711                 mgmt_index_removed(hdev);
2712                 hci_dev_unlock(hdev);
2713         }
2714
2715         /* mgmt_index_removed should take care of emptying the
2716          * pending list */
2717         BUG_ON(!list_empty(&hdev->mgmt_pending));
2718
2719         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2720
2721         if (hdev->rfkill) {
2722                 rfkill_unregister(hdev->rfkill);
2723                 rfkill_destroy(hdev->rfkill);
2724         }
2725
2726         device_del(&hdev->dev);
2727         /* Actual cleanup is deferred until hci_release_dev(). */
2728         hci_dev_put(hdev);
2729 }
2730 EXPORT_SYMBOL(hci_unregister_dev);
2731
2732 /* Release HCI device */
2733 void hci_release_dev(struct hci_dev *hdev)
2734 {
2735         debugfs_remove_recursive(hdev->debugfs);
2736         kfree_const(hdev->hw_info);
2737         kfree_const(hdev->fw_info);
2738
2739         destroy_workqueue(hdev->workqueue);
2740         destroy_workqueue(hdev->req_workqueue);
2741
2742         hci_dev_lock(hdev);
2743         hci_bdaddr_list_clear(&hdev->reject_list);
2744         hci_bdaddr_list_clear(&hdev->accept_list);
2745         hci_uuids_clear(hdev);
2746         hci_link_keys_clear(hdev);
2747         hci_smp_ltks_clear(hdev);
2748         hci_smp_irks_clear(hdev);
2749         hci_remote_oob_data_clear(hdev);
2750         hci_adv_instances_clear(hdev);
2751         hci_adv_monitors_clear(hdev);
2752         hci_bdaddr_list_clear(&hdev->le_accept_list);
2753         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2754         hci_conn_params_clear_all(hdev);
2755         hci_discovery_filter_clear(hdev);
2756         hci_blocked_keys_clear(hdev);
2757         hci_dev_unlock(hdev);
2758
2759         ida_simple_remove(&hci_index_ida, hdev->id);
2760         kfree_skb(hdev->sent_cmd);
2761         kfree_skb(hdev->recv_event);
2762         kfree(hdev);
2763 }
2764 EXPORT_SYMBOL(hci_release_dev);
2765
2766 int hci_register_suspend_notifier(struct hci_dev *hdev)
2767 {
2768         int ret = 0;
2769
2770         if (!hdev->suspend_notifier.notifier_call &&
2771             !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2772                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2773                 ret = register_pm_notifier(&hdev->suspend_notifier);
2774         }
2775
2776         return ret;
2777 }
2778
2779 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2780 {
2781         int ret = 0;
2782
2783         if (hdev->suspend_notifier.notifier_call) {
2784                 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2785                 if (!ret)
2786                         hdev->suspend_notifier.notifier_call = NULL;
2787         }
2788
2789         return ret;
2790 }
2791
2792 /* Suspend HCI device */
2793 int hci_suspend_dev(struct hci_dev *hdev)
2794 {
2795         int ret;
2796
2797         bt_dev_dbg(hdev, "");
2798
2799         /* Suspend should only act on when powered. */
2800         if (!hdev_is_powered(hdev) ||
2801             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2802                 return 0;
2803
2804         /* If powering down don't attempt to suspend */
2805         if (mgmt_powering_down(hdev))
2806                 return 0;
2807
2808         /* Cancel potentially blocking sync operation before suspend */
2809         __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
2810
2811         hci_req_sync_lock(hdev);
2812         ret = hci_suspend_sync(hdev);
2813         hci_req_sync_unlock(hdev);
2814
2815         hci_clear_wake_reason(hdev);
2816         mgmt_suspending(hdev, hdev->suspend_state);
2817
2818         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2819         return ret;
2820 }
2821 EXPORT_SYMBOL(hci_suspend_dev);
2822
2823 /* Resume HCI device */
2824 int hci_resume_dev(struct hci_dev *hdev)
2825 {
2826         int ret;
2827
2828         bt_dev_dbg(hdev, "");
2829
2830         /* Resume should only act on when powered. */
2831         if (!hdev_is_powered(hdev) ||
2832             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2833                 return 0;
2834
2835         /* If powering down don't attempt to resume */
2836         if (mgmt_powering_down(hdev))
2837                 return 0;
2838
2839         hci_req_sync_lock(hdev);
2840         ret = hci_resume_sync(hdev);
2841         hci_req_sync_unlock(hdev);
2842
2843         mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2844                       hdev->wake_addr_type);
2845
2846         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2847         return ret;
2848 }
2849 EXPORT_SYMBOL(hci_resume_dev);
2850
2851 /* Reset HCI device */
2852 int hci_reset_dev(struct hci_dev *hdev)
2853 {
2854         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2855         struct sk_buff *skb;
2856
2857         skb = bt_skb_alloc(3, GFP_ATOMIC);
2858         if (!skb)
2859                 return -ENOMEM;
2860
2861         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2862         skb_put_data(skb, hw_err, 3);
2863
2864         bt_dev_err(hdev, "Injecting HCI hardware error event");
2865
2866         /* Send Hardware Error to upper stack */
2867         return hci_recv_frame(hdev, skb);
2868 }
2869 EXPORT_SYMBOL(hci_reset_dev);
2870
2871 /* Receive frame from HCI drivers */
2872 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2873 {
2874         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2875                       && !test_bit(HCI_INIT, &hdev->flags))) {
2876                 kfree_skb(skb);
2877                 return -ENXIO;
2878         }
2879
2880         switch (hci_skb_pkt_type(skb)) {
2881         case HCI_EVENT_PKT:
2882                 break;
2883         case HCI_ACLDATA_PKT:
2884                 /* Detect if ISO packet has been sent as ACL */
2885                 if (hci_conn_num(hdev, ISO_LINK)) {
2886                         __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2887                         __u8 type;
2888
2889                         type = hci_conn_lookup_type(hdev, hci_handle(handle));
2890                         if (type == ISO_LINK)
2891                                 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2892                 }
2893                 break;
2894         case HCI_SCODATA_PKT:
2895                 break;
2896         case HCI_ISODATA_PKT:
2897                 break;
2898         default:
2899                 kfree_skb(skb);
2900                 return -EINVAL;
2901         }
2902
2903         /* Incoming skb */
2904         bt_cb(skb)->incoming = 1;
2905
2906         /* Time stamp */
2907         __net_timestamp(skb);
2908
2909         skb_queue_tail(&hdev->rx_q, skb);
2910         queue_work(hdev->workqueue, &hdev->rx_work);
2911
2912         return 0;
2913 }
2914 EXPORT_SYMBOL(hci_recv_frame);
2915
2916 /* Receive diagnostic message from HCI drivers */
2917 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2918 {
2919         /* Mark as diagnostic packet */
2920         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2921
2922         /* Time stamp */
2923         __net_timestamp(skb);
2924
2925         skb_queue_tail(&hdev->rx_q, skb);
2926         queue_work(hdev->workqueue, &hdev->rx_work);
2927
2928         return 0;
2929 }
2930 EXPORT_SYMBOL(hci_recv_diag);
2931
2932 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2933 {
2934         va_list vargs;
2935
2936         va_start(vargs, fmt);
2937         kfree_const(hdev->hw_info);
2938         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2939         va_end(vargs);
2940 }
2941 EXPORT_SYMBOL(hci_set_hw_info);
2942
2943 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2944 {
2945         va_list vargs;
2946
2947         va_start(vargs, fmt);
2948         kfree_const(hdev->fw_info);
2949         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2950         va_end(vargs);
2951 }
2952 EXPORT_SYMBOL(hci_set_fw_info);
2953
2954 /* ---- Interface to upper protocols ---- */
2955
2956 int hci_register_cb(struct hci_cb *cb)
2957 {
2958         BT_DBG("%p name %s", cb, cb->name);
2959
2960         mutex_lock(&hci_cb_list_lock);
2961         list_add_tail(&cb->list, &hci_cb_list);
2962         mutex_unlock(&hci_cb_list_lock);
2963
2964         return 0;
2965 }
2966 EXPORT_SYMBOL(hci_register_cb);
2967
2968 int hci_unregister_cb(struct hci_cb *cb)
2969 {
2970         BT_DBG("%p name %s", cb, cb->name);
2971
2972         mutex_lock(&hci_cb_list_lock);
2973         list_del(&cb->list);
2974         mutex_unlock(&hci_cb_list_lock);
2975
2976         return 0;
2977 }
2978 EXPORT_SYMBOL(hci_unregister_cb);
2979
2980 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2981 {
2982         int err;
2983
2984         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2985                skb->len);
2986
2987         /* Time stamp */
2988         __net_timestamp(skb);
2989
2990         /* Send copy to monitor */
2991         hci_send_to_monitor(hdev, skb);
2992
2993         if (atomic_read(&hdev->promisc)) {
2994                 /* Send copy to the sockets */
2995                 hci_send_to_sock(hdev, skb);
2996         }
2997
2998         /* Get rid of skb owner, prior to sending to the driver. */
2999         skb_orphan(skb);
3000
3001         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3002                 kfree_skb(skb);
3003                 return -EINVAL;
3004         }
3005
3006         err = hdev->send(hdev, skb);
3007         if (err < 0) {
3008                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3009                 kfree_skb(skb);
3010                 return err;
3011         }
3012
3013         return 0;
3014 }
3015
3016 /* Send HCI command */
3017 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3018                  const void *param)
3019 {
3020         struct sk_buff *skb;
3021
3022         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3023
3024         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3025         if (!skb) {
3026                 bt_dev_err(hdev, "no memory for command");
3027                 return -ENOMEM;
3028         }
3029
3030         /* Stand-alone HCI commands must be flagged as
3031          * single-command requests.
3032          */
3033         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3034
3035         skb_queue_tail(&hdev->cmd_q, skb);
3036         queue_work(hdev->workqueue, &hdev->cmd_work);
3037
3038         return 0;
3039 }
3040
3041 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3042                    const void *param)
3043 {
3044         struct sk_buff *skb;
3045
3046         if (hci_opcode_ogf(opcode) != 0x3f) {
3047                 /* A controller receiving a command shall respond with either
3048                  * a Command Status Event or a Command Complete Event.
3049                  * Therefore, all standard HCI commands must be sent via the
3050                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3051                  * Some vendors do not comply with this rule for vendor-specific
3052                  * commands and do not return any event. We want to support
3053                  * unresponded commands for such cases only.
3054                  */
3055                 bt_dev_err(hdev, "unresponded command not supported");
3056                 return -EINVAL;
3057         }
3058
3059         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3060         if (!skb) {
3061                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3062                            opcode);
3063                 return -ENOMEM;
3064         }
3065
3066         hci_send_frame(hdev, skb);
3067
3068         return 0;
3069 }
3070 EXPORT_SYMBOL(__hci_cmd_send);
3071
3072 /* Get data from the previously sent command */
3073 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3074 {
3075         struct hci_command_hdr *hdr;
3076
3077         if (!hdev->sent_cmd)
3078                 return NULL;
3079
3080         hdr = (void *) hdev->sent_cmd->data;
3081
3082         if (hdr->opcode != cpu_to_le16(opcode))
3083                 return NULL;
3084
3085         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3086
3087         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3088 }
3089
3090 /* Get data from last received event */
3091 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3092 {
3093         struct hci_event_hdr *hdr;
3094         int offset;
3095
3096         if (!hdev->recv_event)
3097                 return NULL;
3098
3099         hdr = (void *)hdev->recv_event->data;
3100         offset = sizeof(*hdr);
3101
3102         if (hdr->evt != event) {
3103                 /* In case of LE metaevent check the subevent match */
3104                 if (hdr->evt == HCI_EV_LE_META) {
3105                         struct hci_ev_le_meta *ev;
3106
3107                         ev = (void *)hdev->recv_event->data + offset;
3108                         offset += sizeof(*ev);
3109                         if (ev->subevent == event)
3110                                 goto found;
3111                 }
3112                 return NULL;
3113         }
3114
3115 found:
3116         bt_dev_dbg(hdev, "event 0x%2.2x", event);
3117
3118         return hdev->recv_event->data + offset;
3119 }
3120
3121 /* Send ACL data */
3122 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3123 {
3124         struct hci_acl_hdr *hdr;
3125         int len = skb->len;
3126
3127         skb_push(skb, HCI_ACL_HDR_SIZE);
3128         skb_reset_transport_header(skb);
3129         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3130         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3131         hdr->dlen   = cpu_to_le16(len);
3132 }
3133
3134 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3135                           struct sk_buff *skb, __u16 flags)
3136 {
3137         struct hci_conn *conn = chan->conn;
3138         struct hci_dev *hdev = conn->hdev;
3139         struct sk_buff *list;
3140
3141         skb->len = skb_headlen(skb);
3142         skb->data_len = 0;
3143
3144         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3145
3146         switch (hdev->dev_type) {
3147         case HCI_PRIMARY:
3148                 hci_add_acl_hdr(skb, conn->handle, flags);
3149                 break;
3150         case HCI_AMP:
3151                 hci_add_acl_hdr(skb, chan->handle, flags);
3152                 break;
3153         default:
3154                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3155                 return;
3156         }
3157
3158         list = skb_shinfo(skb)->frag_list;
3159         if (!list) {
3160                 /* Non fragmented */
3161                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3162
3163                 skb_queue_tail(queue, skb);
3164         } else {
3165                 /* Fragmented */
3166                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3167
3168                 skb_shinfo(skb)->frag_list = NULL;
3169
3170                 /* Queue all fragments atomically. We need to use spin_lock_bh
3171                  * here because of 6LoWPAN links, as there this function is
3172                  * called from softirq and using normal spin lock could cause
3173                  * deadlocks.
3174                  */
3175                 spin_lock_bh(&queue->lock);
3176
3177                 __skb_queue_tail(queue, skb);
3178
3179                 flags &= ~ACL_START;
3180                 flags |= ACL_CONT;
3181                 do {
3182                         skb = list; list = list->next;
3183
3184                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3185                         hci_add_acl_hdr(skb, conn->handle, flags);
3186
3187                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3188
3189                         __skb_queue_tail(queue, skb);
3190                 } while (list);
3191
3192                 spin_unlock_bh(&queue->lock);
3193         }
3194 }
3195
3196 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3197 {
3198         struct hci_dev *hdev = chan->conn->hdev;
3199
3200         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3201
3202         hci_queue_acl(chan, &chan->data_q, skb, flags);
3203
3204         queue_work(hdev->workqueue, &hdev->tx_work);
3205 }
3206
3207 /* Send SCO data */
3208 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3209 {
3210         struct hci_dev *hdev = conn->hdev;
3211         struct hci_sco_hdr hdr;
3212
3213         BT_DBG("%s len %d", hdev->name, skb->len);
3214
3215         hdr.handle = cpu_to_le16(conn->handle);
3216         hdr.dlen   = skb->len;
3217
3218         skb_push(skb, HCI_SCO_HDR_SIZE);
3219         skb_reset_transport_header(skb);
3220         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3221
3222         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3223
3224         skb_queue_tail(&conn->data_q, skb);
3225         queue_work(hdev->workqueue, &hdev->tx_work);
3226 }
3227
3228 /* Send ISO data */
3229 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3230 {
3231         struct hci_iso_hdr *hdr;
3232         int len = skb->len;
3233
3234         skb_push(skb, HCI_ISO_HDR_SIZE);
3235         skb_reset_transport_header(skb);
3236         hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3237         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3238         hdr->dlen   = cpu_to_le16(len);
3239 }
3240
3241 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3242                           struct sk_buff *skb)
3243 {
3244         struct hci_dev *hdev = conn->hdev;
3245         struct sk_buff *list;
3246         __u16 flags;
3247
3248         skb->len = skb_headlen(skb);
3249         skb->data_len = 0;
3250
3251         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3252
3253         list = skb_shinfo(skb)->frag_list;
3254
3255         flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3256         hci_add_iso_hdr(skb, conn->handle, flags);
3257
3258         if (!list) {
3259                 /* Non fragmented */
3260                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3261
3262                 skb_queue_tail(queue, skb);
3263         } else {
3264                 /* Fragmented */
3265                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3266
3267                 skb_shinfo(skb)->frag_list = NULL;
3268
3269                 __skb_queue_tail(queue, skb);
3270
3271                 do {
3272                         skb = list; list = list->next;
3273
3274                         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3275                         flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3276                                                    0x00);
3277                         hci_add_iso_hdr(skb, conn->handle, flags);
3278
3279                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3280
3281                         __skb_queue_tail(queue, skb);
3282                 } while (list);
3283         }
3284 }
3285
3286 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3287 {
3288         struct hci_dev *hdev = conn->hdev;
3289
3290         BT_DBG("%s len %d", hdev->name, skb->len);
3291
3292         hci_queue_iso(conn, &conn->data_q, skb);
3293
3294         queue_work(hdev->workqueue, &hdev->tx_work);
3295 }
3296
3297 /* ---- HCI TX task (outgoing data) ---- */
3298
3299 /* HCI Connection scheduler */
3300 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3301 {
3302         struct hci_dev *hdev;
3303         int cnt, q;
3304
3305         if (!conn) {
3306                 *quote = 0;
3307                 return;
3308         }
3309
3310         hdev = conn->hdev;
3311
3312         switch (conn->type) {
3313         case ACL_LINK:
3314                 cnt = hdev->acl_cnt;
3315                 break;
3316         case AMP_LINK:
3317                 cnt = hdev->block_cnt;
3318                 break;
3319         case SCO_LINK:
3320         case ESCO_LINK:
3321                 cnt = hdev->sco_cnt;
3322                 break;
3323         case LE_LINK:
3324                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3325                 break;
3326         case ISO_LINK:
3327                 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3328                         hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3329                 break;
3330         default:
3331                 cnt = 0;
3332                 bt_dev_err(hdev, "unknown link type %d", conn->type);
3333         }
3334
3335         q = cnt / num;
3336         *quote = q ? q : 1;
3337 }
3338
3339 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3340                                      int *quote)
3341 {
3342         struct hci_conn_hash *h = &hdev->conn_hash;
3343         struct hci_conn *conn = NULL, *c;
3344         unsigned int num = 0, min = ~0;
3345
3346         /* We don't have to lock device here. Connections are always
3347          * added and removed with TX task disabled. */
3348
3349         rcu_read_lock();
3350
3351         list_for_each_entry_rcu(c, &h->list, list) {
3352                 if (c->type != type || skb_queue_empty(&c->data_q))
3353                         continue;
3354
3355                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3356                         continue;
3357
3358                 num++;
3359
3360                 if (c->sent < min) {
3361                         min  = c->sent;
3362                         conn = c;
3363                 }
3364
3365                 if (hci_conn_num(hdev, type) == num)
3366                         break;
3367         }
3368
3369         rcu_read_unlock();
3370
3371         hci_quote_sent(conn, num, quote);
3372
3373         BT_DBG("conn %p quote %d", conn, *quote);
3374         return conn;
3375 }
3376
3377 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3378 {
3379         struct hci_conn_hash *h = &hdev->conn_hash;
3380         struct hci_conn *c;
3381
3382         bt_dev_err(hdev, "link tx timeout");
3383
3384         rcu_read_lock();
3385
3386         /* Kill stalled connections */
3387         list_for_each_entry_rcu(c, &h->list, list) {
3388                 if (c->type == type && c->sent) {
3389                         bt_dev_err(hdev, "killing stalled connection %pMR",
3390                                    &c->dst);
3391                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3392                 }
3393         }
3394
3395         rcu_read_unlock();
3396 }
3397
3398 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3399                                       int *quote)
3400 {
3401         struct hci_conn_hash *h = &hdev->conn_hash;
3402         struct hci_chan *chan = NULL;
3403         unsigned int num = 0, min = ~0, cur_prio = 0;
3404         struct hci_conn *conn;
3405         int conn_num = 0;
3406
3407         BT_DBG("%s", hdev->name);
3408
3409         rcu_read_lock();
3410
3411         list_for_each_entry_rcu(conn, &h->list, list) {
3412                 struct hci_chan *tmp;
3413
3414                 if (conn->type != type)
3415                         continue;
3416
3417                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3418                         continue;
3419
3420                 conn_num++;
3421
3422                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3423                         struct sk_buff *skb;
3424
3425                         if (skb_queue_empty(&tmp->data_q))
3426                                 continue;
3427
3428                         skb = skb_peek(&tmp->data_q);
3429                         if (skb->priority < cur_prio)
3430                                 continue;
3431
3432                         if (skb->priority > cur_prio) {
3433                                 num = 0;
3434                                 min = ~0;
3435                                 cur_prio = skb->priority;
3436                         }
3437
3438                         num++;
3439
3440                         if (conn->sent < min) {
3441                                 min  = conn->sent;
3442                                 chan = tmp;
3443                         }
3444                 }
3445
3446                 if (hci_conn_num(hdev, type) == conn_num)
3447                         break;
3448         }
3449
3450         rcu_read_unlock();
3451
3452         if (!chan)
3453                 return NULL;
3454
3455         hci_quote_sent(chan->conn, num, quote);
3456
3457         BT_DBG("chan %p quote %d", chan, *quote);
3458         return chan;
3459 }
3460
3461 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3462 {
3463         struct hci_conn_hash *h = &hdev->conn_hash;
3464         struct hci_conn *conn;
3465         int num = 0;
3466
3467         BT_DBG("%s", hdev->name);
3468
3469         rcu_read_lock();
3470
3471         list_for_each_entry_rcu(conn, &h->list, list) {
3472                 struct hci_chan *chan;
3473
3474                 if (conn->type != type)
3475                         continue;
3476
3477                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3478                         continue;
3479
3480                 num++;
3481
3482                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3483                         struct sk_buff *skb;
3484
3485                         if (chan->sent) {
3486                                 chan->sent = 0;
3487                                 continue;
3488                         }
3489
3490                         if (skb_queue_empty(&chan->data_q))
3491                                 continue;
3492
3493                         skb = skb_peek(&chan->data_q);
3494                         if (skb->priority >= HCI_PRIO_MAX - 1)
3495                                 continue;
3496
3497                         skb->priority = HCI_PRIO_MAX - 1;
3498
3499                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3500                                skb->priority);
3501                 }
3502
3503                 if (hci_conn_num(hdev, type) == num)
3504                         break;
3505         }
3506
3507         rcu_read_unlock();
3508
3509 }
3510
3511 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3512 {
3513         /* Calculate count of blocks used by this packet */
3514         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3515 }
3516
3517 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3518 {
3519         unsigned long last_tx;
3520
3521         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3522                 return;
3523
3524         switch (type) {
3525         case LE_LINK:
3526                 last_tx = hdev->le_last_tx;
3527                 break;
3528         default:
3529                 last_tx = hdev->acl_last_tx;
3530                 break;
3531         }
3532
3533         /* tx timeout must be longer than maximum link supervision timeout
3534          * (40.9 seconds)
3535          */
3536         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3537                 hci_link_tx_to(hdev, type);
3538 }
3539
3540 /* Schedule SCO */
3541 static void hci_sched_sco(struct hci_dev *hdev)
3542 {
3543         struct hci_conn *conn;
3544         struct sk_buff *skb;
3545         int quote;
3546
3547         BT_DBG("%s", hdev->name);
3548
3549         if (!hci_conn_num(hdev, SCO_LINK))
3550                 return;
3551
3552         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3553                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3554                         BT_DBG("skb %p len %d", skb, skb->len);
3555                         hci_send_frame(hdev, skb);
3556
3557                         conn->sent++;
3558                         if (conn->sent == ~0)
3559                                 conn->sent = 0;
3560                 }
3561         }
3562 }
3563
3564 static void hci_sched_esco(struct hci_dev *hdev)
3565 {
3566         struct hci_conn *conn;
3567         struct sk_buff *skb;
3568         int quote;
3569
3570         BT_DBG("%s", hdev->name);
3571
3572         if (!hci_conn_num(hdev, ESCO_LINK))
3573                 return;
3574
3575         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3576                                                      &quote))) {
3577                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3578                         BT_DBG("skb %p len %d", skb, skb->len);
3579                         hci_send_frame(hdev, skb);
3580
3581                         conn->sent++;
3582                         if (conn->sent == ~0)
3583                                 conn->sent = 0;
3584                 }
3585         }
3586 }
3587
3588 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3589 {
3590         unsigned int cnt = hdev->acl_cnt;
3591         struct hci_chan *chan;
3592         struct sk_buff *skb;
3593         int quote;
3594
3595         __check_timeout(hdev, cnt, ACL_LINK);
3596
3597         while (hdev->acl_cnt &&
3598                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3599                 u32 priority = (skb_peek(&chan->data_q))->priority;
3600                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3601                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3602                                skb->len, skb->priority);
3603
3604                         /* Stop if priority has changed */
3605                         if (skb->priority < priority)
3606                                 break;
3607
3608                         skb = skb_dequeue(&chan->data_q);
3609
3610                         hci_conn_enter_active_mode(chan->conn,
3611                                                    bt_cb(skb)->force_active);
3612
3613                         hci_send_frame(hdev, skb);
3614                         hdev->acl_last_tx = jiffies;
3615
3616                         hdev->acl_cnt--;
3617                         chan->sent++;
3618                         chan->conn->sent++;
3619
3620                         /* Send pending SCO packets right away */
3621                         hci_sched_sco(hdev);
3622                         hci_sched_esco(hdev);
3623                 }
3624         }
3625
3626         if (cnt != hdev->acl_cnt)
3627                 hci_prio_recalculate(hdev, ACL_LINK);
3628 }
3629
3630 static void hci_sched_acl_blk(struct hci_dev *hdev)
3631 {
3632         unsigned int cnt = hdev->block_cnt;
3633         struct hci_chan *chan;
3634         struct sk_buff *skb;
3635         int quote;
3636         u8 type;
3637
3638         BT_DBG("%s", hdev->name);
3639
3640         if (hdev->dev_type == HCI_AMP)
3641                 type = AMP_LINK;
3642         else
3643                 type = ACL_LINK;
3644
3645         __check_timeout(hdev, cnt, type);
3646
3647         while (hdev->block_cnt > 0 &&
3648                (chan = hci_chan_sent(hdev, type, &quote))) {
3649                 u32 priority = (skb_peek(&chan->data_q))->priority;
3650                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3651                         int blocks;
3652
3653                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3654                                skb->len, skb->priority);
3655
3656                         /* Stop if priority has changed */
3657                         if (skb->priority < priority)
3658                                 break;
3659
3660                         skb = skb_dequeue(&chan->data_q);
3661
3662                         blocks = __get_blocks(hdev, skb);
3663                         if (blocks > hdev->block_cnt)
3664                                 return;
3665
3666                         hci_conn_enter_active_mode(chan->conn,
3667                                                    bt_cb(skb)->force_active);
3668
3669                         hci_send_frame(hdev, skb);
3670                         hdev->acl_last_tx = jiffies;
3671
3672                         hdev->block_cnt -= blocks;
3673                         quote -= blocks;
3674
3675                         chan->sent += blocks;
3676                         chan->conn->sent += blocks;
3677                 }
3678         }
3679
3680         if (cnt != hdev->block_cnt)
3681                 hci_prio_recalculate(hdev, type);
3682 }
3683
3684 static void hci_sched_acl(struct hci_dev *hdev)
3685 {
3686         BT_DBG("%s", hdev->name);
3687
3688         /* No ACL link over BR/EDR controller */
3689         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3690                 return;
3691
3692         /* No AMP link over AMP controller */
3693         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3694                 return;
3695
3696         switch (hdev->flow_ctl_mode) {
3697         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3698                 hci_sched_acl_pkt(hdev);
3699                 break;
3700
3701         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3702                 hci_sched_acl_blk(hdev);
3703                 break;
3704         }
3705 }
3706
3707 static void hci_sched_le(struct hci_dev *hdev)
3708 {
3709         struct hci_chan *chan;
3710         struct sk_buff *skb;
3711         int quote, cnt, tmp;
3712
3713         BT_DBG("%s", hdev->name);
3714
3715         if (!hci_conn_num(hdev, LE_LINK))
3716                 return;
3717
3718         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3719
3720         __check_timeout(hdev, cnt, LE_LINK);
3721
3722         tmp = cnt;
3723         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3724                 u32 priority = (skb_peek(&chan->data_q))->priority;
3725                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3726                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3727                                skb->len, skb->priority);
3728
3729                         /* Stop if priority has changed */
3730                         if (skb->priority < priority)
3731                                 break;
3732
3733                         skb = skb_dequeue(&chan->data_q);
3734
3735                         hci_send_frame(hdev, skb);
3736                         hdev->le_last_tx = jiffies;
3737
3738                         cnt--;
3739                         chan->sent++;
3740                         chan->conn->sent++;
3741
3742                         /* Send pending SCO packets right away */
3743                         hci_sched_sco(hdev);
3744                         hci_sched_esco(hdev);
3745                 }
3746         }
3747
3748         if (hdev->le_pkts)
3749                 hdev->le_cnt = cnt;
3750         else
3751                 hdev->acl_cnt = cnt;
3752
3753         if (cnt != tmp)
3754                 hci_prio_recalculate(hdev, LE_LINK);
3755 }
3756
3757 /* Schedule CIS */
3758 static void hci_sched_iso(struct hci_dev *hdev)
3759 {
3760         struct hci_conn *conn;
3761         struct sk_buff *skb;
3762         int quote, *cnt;
3763
3764         BT_DBG("%s", hdev->name);
3765
3766         if (!hci_conn_num(hdev, ISO_LINK))
3767                 return;
3768
3769         cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3770                 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3771         while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3772                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3773                         BT_DBG("skb %p len %d", skb, skb->len);
3774                         hci_send_frame(hdev, skb);
3775
3776                         conn->sent++;
3777                         if (conn->sent == ~0)
3778                                 conn->sent = 0;
3779                         (*cnt)--;
3780                 }
3781         }
3782 }
3783
3784 static void hci_tx_work(struct work_struct *work)
3785 {
3786         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3787         struct sk_buff *skb;
3788
3789         BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3790                hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3791
3792         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3793                 /* Schedule queues and send stuff to HCI driver */
3794                 hci_sched_sco(hdev);
3795                 hci_sched_esco(hdev);
3796                 hci_sched_iso(hdev);
3797                 hci_sched_acl(hdev);
3798                 hci_sched_le(hdev);
3799         }
3800
3801         /* Send next queued raw (unknown type) packet */
3802         while ((skb = skb_dequeue(&hdev->raw_q)))
3803                 hci_send_frame(hdev, skb);
3804 }
3805
3806 /* ----- HCI RX task (incoming data processing) ----- */
3807
3808 /* ACL data packet */
3809 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3810 {
3811         struct hci_acl_hdr *hdr = (void *) skb->data;
3812         struct hci_conn *conn;
3813         __u16 handle, flags;
3814
3815         skb_pull(skb, HCI_ACL_HDR_SIZE);
3816
3817         handle = __le16_to_cpu(hdr->handle);
3818         flags  = hci_flags(handle);
3819         handle = hci_handle(handle);
3820
3821         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3822                handle, flags);
3823
3824         hdev->stat.acl_rx++;
3825
3826         hci_dev_lock(hdev);
3827         conn = hci_conn_hash_lookup_handle(hdev, handle);
3828         hci_dev_unlock(hdev);
3829
3830         if (conn) {
3831                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3832
3833                 /* Send to upper protocol */
3834                 l2cap_recv_acldata(conn, skb, flags);
3835                 return;
3836         } else {
3837                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3838                            handle);
3839         }
3840
3841         kfree_skb(skb);
3842 }
3843
3844 /* SCO data packet */
3845 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3846 {
3847         struct hci_sco_hdr *hdr = (void *) skb->data;
3848         struct hci_conn *conn;
3849         __u16 handle, flags;
3850
3851         skb_pull(skb, HCI_SCO_HDR_SIZE);
3852
3853         handle = __le16_to_cpu(hdr->handle);
3854         flags  = hci_flags(handle);
3855         handle = hci_handle(handle);
3856
3857         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3858                handle, flags);
3859
3860         hdev->stat.sco_rx++;
3861
3862         hci_dev_lock(hdev);
3863         conn = hci_conn_hash_lookup_handle(hdev, handle);
3864         hci_dev_unlock(hdev);
3865
3866         if (conn) {
3867                 /* Send to upper protocol */
3868                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3869                 sco_recv_scodata(conn, skb);
3870                 return;
3871         } else {
3872                 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3873                                        handle);
3874         }
3875
3876         kfree_skb(skb);
3877 }
3878
3879 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3880 {
3881         struct hci_iso_hdr *hdr;
3882         struct hci_conn *conn;
3883         __u16 handle, flags;
3884
3885         hdr = skb_pull_data(skb, sizeof(*hdr));
3886         if (!hdr) {
3887                 bt_dev_err(hdev, "ISO packet too small");
3888                 goto drop;
3889         }
3890
3891         handle = __le16_to_cpu(hdr->handle);
3892         flags  = hci_flags(handle);
3893         handle = hci_handle(handle);
3894
3895         bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3896                    handle, flags);
3897
3898         hci_dev_lock(hdev);
3899         conn = hci_conn_hash_lookup_handle(hdev, handle);
3900         hci_dev_unlock(hdev);
3901
3902         if (!conn) {
3903                 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3904                            handle);
3905                 goto drop;
3906         }
3907
3908         /* Send to upper protocol */
3909         iso_recv(conn, skb, flags);
3910         return;
3911
3912 drop:
3913         kfree_skb(skb);
3914 }
3915
3916 static bool hci_req_is_complete(struct hci_dev *hdev)
3917 {
3918         struct sk_buff *skb;
3919
3920         skb = skb_peek(&hdev->cmd_q);
3921         if (!skb)
3922                 return true;
3923
3924         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3925 }
3926
3927 static void hci_resend_last(struct hci_dev *hdev)
3928 {
3929         struct hci_command_hdr *sent;
3930         struct sk_buff *skb;
3931         u16 opcode;
3932
3933         if (!hdev->sent_cmd)
3934                 return;
3935
3936         sent = (void *) hdev->sent_cmd->data;
3937         opcode = __le16_to_cpu(sent->opcode);
3938         if (opcode == HCI_OP_RESET)
3939                 return;
3940
3941         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3942         if (!skb)
3943                 return;
3944
3945         skb_queue_head(&hdev->cmd_q, skb);
3946         queue_work(hdev->workqueue, &hdev->cmd_work);
3947 }
3948
3949 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3950                           hci_req_complete_t *req_complete,
3951                           hci_req_complete_skb_t *req_complete_skb)
3952 {
3953         struct sk_buff *skb;
3954         unsigned long flags;
3955
3956         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3957
3958         /* If the completed command doesn't match the last one that was
3959          * sent we need to do special handling of it.
3960          */
3961         if (!hci_sent_cmd_data(hdev, opcode)) {
3962                 /* Some CSR based controllers generate a spontaneous
3963                  * reset complete event during init and any pending
3964                  * command will never be completed. In such a case we
3965                  * need to resend whatever was the last sent
3966                  * command.
3967                  */
3968                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3969                         hci_resend_last(hdev);
3970
3971                 return;
3972         }
3973
3974         /* If we reach this point this event matches the last command sent */
3975         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3976
3977         /* If the command succeeded and there's still more commands in
3978          * this request the request is not yet complete.
3979          */
3980         if (!status && !hci_req_is_complete(hdev))
3981                 return;
3982
3983         /* If this was the last command in a request the complete
3984          * callback would be found in hdev->sent_cmd instead of the
3985          * command queue (hdev->cmd_q).
3986          */
3987         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3988                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3989                 return;
3990         }
3991
3992         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3993                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3994                 return;
3995         }
3996
3997         /* Remove all pending commands belonging to this request */
3998         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3999         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4000                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4001                         __skb_queue_head(&hdev->cmd_q, skb);
4002                         break;
4003                 }
4004
4005                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4006                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4007                 else
4008                         *req_complete = bt_cb(skb)->hci.req_complete;
4009                 dev_kfree_skb_irq(skb);
4010         }
4011         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4012 }
4013
4014 static void hci_rx_work(struct work_struct *work)
4015 {
4016         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4017         struct sk_buff *skb;
4018
4019         BT_DBG("%s", hdev->name);
4020
4021         /* The kcov_remote functions used for collecting packet parsing
4022          * coverage information from this background thread and associate
4023          * the coverage with the syscall's thread which originally injected
4024          * the packet. This helps fuzzing the kernel.
4025          */
4026         for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4027                 kcov_remote_start_common(skb_get_kcov_handle(skb));
4028
4029                 /* Send copy to monitor */
4030                 hci_send_to_monitor(hdev, skb);
4031
4032                 if (atomic_read(&hdev->promisc)) {
4033                         /* Send copy to the sockets */
4034                         hci_send_to_sock(hdev, skb);
4035                 }
4036
4037                 /* If the device has been opened in HCI_USER_CHANNEL,
4038                  * the userspace has exclusive access to device.
4039                  * When device is HCI_INIT, we still need to process
4040                  * the data packets to the driver in order
4041                  * to complete its setup().
4042                  */
4043                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4044                     !test_bit(HCI_INIT, &hdev->flags)) {
4045                         kfree_skb(skb);
4046                         continue;
4047                 }
4048
4049                 if (test_bit(HCI_INIT, &hdev->flags)) {
4050                         /* Don't process data packets in this states. */
4051                         switch (hci_skb_pkt_type(skb)) {
4052                         case HCI_ACLDATA_PKT:
4053                         case HCI_SCODATA_PKT:
4054                         case HCI_ISODATA_PKT:
4055                                 kfree_skb(skb);
4056                                 continue;
4057                         }
4058                 }
4059
4060                 /* Process frame */
4061                 switch (hci_skb_pkt_type(skb)) {
4062                 case HCI_EVENT_PKT:
4063                         BT_DBG("%s Event packet", hdev->name);
4064                         hci_event_packet(hdev, skb);
4065                         break;
4066
4067                 case HCI_ACLDATA_PKT:
4068                         BT_DBG("%s ACL data packet", hdev->name);
4069                         hci_acldata_packet(hdev, skb);
4070                         break;
4071
4072                 case HCI_SCODATA_PKT:
4073                         BT_DBG("%s SCO data packet", hdev->name);
4074                         hci_scodata_packet(hdev, skb);
4075                         break;
4076
4077                 case HCI_ISODATA_PKT:
4078                         BT_DBG("%s ISO data packet", hdev->name);
4079                         hci_isodata_packet(hdev, skb);
4080                         break;
4081
4082                 default:
4083                         kfree_skb(skb);
4084                         break;
4085                 }
4086         }
4087 }
4088
4089 static void hci_cmd_work(struct work_struct *work)
4090 {
4091         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4092         struct sk_buff *skb;
4093
4094         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4095                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4096
4097         /* Send queued commands */
4098         if (atomic_read(&hdev->cmd_cnt)) {
4099                 skb = skb_dequeue(&hdev->cmd_q);
4100                 if (!skb)
4101                         return;
4102
4103                 kfree_skb(hdev->sent_cmd);
4104
4105                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4106                 if (hdev->sent_cmd) {
4107                         int res;
4108                         if (hci_req_status_pend(hdev))
4109                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4110                         atomic_dec(&hdev->cmd_cnt);
4111
4112                         res = hci_send_frame(hdev, skb);
4113                         if (res < 0)
4114                                 __hci_cmd_sync_cancel(hdev, -res);
4115
4116                         rcu_read_lock();
4117                         if (test_bit(HCI_RESET, &hdev->flags) ||
4118                             hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4119                                 cancel_delayed_work(&hdev->cmd_timer);
4120                         else
4121                                 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4122                                                    HCI_CMD_TIMEOUT);
4123                         rcu_read_unlock();
4124                 } else {
4125                         skb_queue_head(&hdev->cmd_q, skb);
4126                         queue_work(hdev->workqueue, &hdev->cmd_work);
4127                 }
4128         }
4129 }