Merge tag 'fsnotify_for_v5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48 #include "hci_codec.h"
49
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
53
54 /* HCI device list */
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
57
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60 DEFINE_MUTEX(hci_cb_list_lock);
61
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
64
65 static int hci_scan_req(struct hci_request *req, unsigned long opt)
66 {
67         __u8 scan = opt;
68
69         BT_DBG("%s %x", req->hdev->name, scan);
70
71         /* Inquiry and Page scans */
72         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
73         return 0;
74 }
75
76 static int hci_auth_req(struct hci_request *req, unsigned long opt)
77 {
78         __u8 auth = opt;
79
80         BT_DBG("%s %x", req->hdev->name, auth);
81
82         /* Authentication */
83         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
84         return 0;
85 }
86
87 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
88 {
89         __u8 encrypt = opt;
90
91         BT_DBG("%s %x", req->hdev->name, encrypt);
92
93         /* Encryption */
94         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
95         return 0;
96 }
97
98 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
99 {
100         __le16 policy = cpu_to_le16(opt);
101
102         BT_DBG("%s %x", req->hdev->name, policy);
103
104         /* Default link policy */
105         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
106         return 0;
107 }
108
109 /* Get HCI device by index.
110  * Device is held on return. */
111 struct hci_dev *hci_dev_get(int index)
112 {
113         struct hci_dev *hdev = NULL, *d;
114
115         BT_DBG("%d", index);
116
117         if (index < 0)
118                 return NULL;
119
120         read_lock(&hci_dev_list_lock);
121         list_for_each_entry(d, &hci_dev_list, list) {
122                 if (d->id == index) {
123                         hdev = hci_dev_hold(d);
124                         break;
125                 }
126         }
127         read_unlock(&hci_dev_list_lock);
128         return hdev;
129 }
130
131 /* ---- Inquiry support ---- */
132
133 bool hci_discovery_active(struct hci_dev *hdev)
134 {
135         struct discovery_state *discov = &hdev->discovery;
136
137         switch (discov->state) {
138         case DISCOVERY_FINDING:
139         case DISCOVERY_RESOLVING:
140                 return true;
141
142         default:
143                 return false;
144         }
145 }
146
147 void hci_discovery_set_state(struct hci_dev *hdev, int state)
148 {
149         int old_state = hdev->discovery.state;
150
151         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
152
153         if (old_state == state)
154                 return;
155
156         hdev->discovery.state = state;
157
158         switch (state) {
159         case DISCOVERY_STOPPED:
160                 hci_update_passive_scan(hdev);
161
162                 if (old_state != DISCOVERY_STARTING)
163                         mgmt_discovering(hdev, 0);
164                 break;
165         case DISCOVERY_STARTING:
166                 break;
167         case DISCOVERY_FINDING:
168                 mgmt_discovering(hdev, 1);
169                 break;
170         case DISCOVERY_RESOLVING:
171                 break;
172         case DISCOVERY_STOPPING:
173                 break;
174         }
175 }
176
177 void hci_inquiry_cache_flush(struct hci_dev *hdev)
178 {
179         struct discovery_state *cache = &hdev->discovery;
180         struct inquiry_entry *p, *n;
181
182         list_for_each_entry_safe(p, n, &cache->all, all) {
183                 list_del(&p->all);
184                 kfree(p);
185         }
186
187         INIT_LIST_HEAD(&cache->unknown);
188         INIT_LIST_HEAD(&cache->resolve);
189 }
190
191 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
192                                                bdaddr_t *bdaddr)
193 {
194         struct discovery_state *cache = &hdev->discovery;
195         struct inquiry_entry *e;
196
197         BT_DBG("cache %p, %pMR", cache, bdaddr);
198
199         list_for_each_entry(e, &cache->all, all) {
200                 if (!bacmp(&e->data.bdaddr, bdaddr))
201                         return e;
202         }
203
204         return NULL;
205 }
206
207 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
208                                                        bdaddr_t *bdaddr)
209 {
210         struct discovery_state *cache = &hdev->discovery;
211         struct inquiry_entry *e;
212
213         BT_DBG("cache %p, %pMR", cache, bdaddr);
214
215         list_for_each_entry(e, &cache->unknown, list) {
216                 if (!bacmp(&e->data.bdaddr, bdaddr))
217                         return e;
218         }
219
220         return NULL;
221 }
222
223 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
224                                                        bdaddr_t *bdaddr,
225                                                        int state)
226 {
227         struct discovery_state *cache = &hdev->discovery;
228         struct inquiry_entry *e;
229
230         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
231
232         list_for_each_entry(e, &cache->resolve, list) {
233                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
234                         return e;
235                 if (!bacmp(&e->data.bdaddr, bdaddr))
236                         return e;
237         }
238
239         return NULL;
240 }
241
242 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
243                                       struct inquiry_entry *ie)
244 {
245         struct discovery_state *cache = &hdev->discovery;
246         struct list_head *pos = &cache->resolve;
247         struct inquiry_entry *p;
248
249         list_del(&ie->list);
250
251         list_for_each_entry(p, &cache->resolve, list) {
252                 if (p->name_state != NAME_PENDING &&
253                     abs(p->data.rssi) >= abs(ie->data.rssi))
254                         break;
255                 pos = &p->list;
256         }
257
258         list_add(&ie->list, pos);
259 }
260
261 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
262                              bool name_known)
263 {
264         struct discovery_state *cache = &hdev->discovery;
265         struct inquiry_entry *ie;
266         u32 flags = 0;
267
268         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
269
270         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
271
272         if (!data->ssp_mode)
273                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
274
275         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
276         if (ie) {
277                 if (!ie->data.ssp_mode)
278                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
279
280                 if (ie->name_state == NAME_NEEDED &&
281                     data->rssi != ie->data.rssi) {
282                         ie->data.rssi = data->rssi;
283                         hci_inquiry_cache_update_resolve(hdev, ie);
284                 }
285
286                 goto update;
287         }
288
289         /* Entry not in the cache. Add new one. */
290         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
291         if (!ie) {
292                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
293                 goto done;
294         }
295
296         list_add(&ie->all, &cache->all);
297
298         if (name_known) {
299                 ie->name_state = NAME_KNOWN;
300         } else {
301                 ie->name_state = NAME_NOT_KNOWN;
302                 list_add(&ie->list, &cache->unknown);
303         }
304
305 update:
306         if (name_known && ie->name_state != NAME_KNOWN &&
307             ie->name_state != NAME_PENDING) {
308                 ie->name_state = NAME_KNOWN;
309                 list_del(&ie->list);
310         }
311
312         memcpy(&ie->data, data, sizeof(*data));
313         ie->timestamp = jiffies;
314         cache->timestamp = jiffies;
315
316         if (ie->name_state == NAME_NOT_KNOWN)
317                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
318
319 done:
320         return flags;
321 }
322
323 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
324 {
325         struct discovery_state *cache = &hdev->discovery;
326         struct inquiry_info *info = (struct inquiry_info *) buf;
327         struct inquiry_entry *e;
328         int copied = 0;
329
330         list_for_each_entry(e, &cache->all, all) {
331                 struct inquiry_data *data = &e->data;
332
333                 if (copied >= num)
334                         break;
335
336                 bacpy(&info->bdaddr, &data->bdaddr);
337                 info->pscan_rep_mode    = data->pscan_rep_mode;
338                 info->pscan_period_mode = data->pscan_period_mode;
339                 info->pscan_mode        = data->pscan_mode;
340                 memcpy(info->dev_class, data->dev_class, 3);
341                 info->clock_offset      = data->clock_offset;
342
343                 info++;
344                 copied++;
345         }
346
347         BT_DBG("cache %p, copied %d", cache, copied);
348         return copied;
349 }
350
351 static int hci_inq_req(struct hci_request *req, unsigned long opt)
352 {
353         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
354         struct hci_dev *hdev = req->hdev;
355         struct hci_cp_inquiry cp;
356
357         BT_DBG("%s", hdev->name);
358
359         if (test_bit(HCI_INQUIRY, &hdev->flags))
360                 return 0;
361
362         /* Start Inquiry */
363         memcpy(&cp.lap, &ir->lap, 3);
364         cp.length  = ir->length;
365         cp.num_rsp = ir->num_rsp;
366         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
367
368         return 0;
369 }
370
371 int hci_inquiry(void __user *arg)
372 {
373         __u8 __user *ptr = arg;
374         struct hci_inquiry_req ir;
375         struct hci_dev *hdev;
376         int err = 0, do_inquiry = 0, max_rsp;
377         long timeo;
378         __u8 *buf;
379
380         if (copy_from_user(&ir, ptr, sizeof(ir)))
381                 return -EFAULT;
382
383         hdev = hci_dev_get(ir.dev_id);
384         if (!hdev)
385                 return -ENODEV;
386
387         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
388                 err = -EBUSY;
389                 goto done;
390         }
391
392         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
393                 err = -EOPNOTSUPP;
394                 goto done;
395         }
396
397         if (hdev->dev_type != HCI_PRIMARY) {
398                 err = -EOPNOTSUPP;
399                 goto done;
400         }
401
402         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
403                 err = -EOPNOTSUPP;
404                 goto done;
405         }
406
407         /* Restrict maximum inquiry length to 60 seconds */
408         if (ir.length > 60) {
409                 err = -EINVAL;
410                 goto done;
411         }
412
413         hci_dev_lock(hdev);
414         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
415             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
416                 hci_inquiry_cache_flush(hdev);
417                 do_inquiry = 1;
418         }
419         hci_dev_unlock(hdev);
420
421         timeo = ir.length * msecs_to_jiffies(2000);
422
423         if (do_inquiry) {
424                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
425                                    timeo, NULL);
426                 if (err < 0)
427                         goto done;
428
429                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
430                  * cleared). If it is interrupted by a signal, return -EINTR.
431                  */
432                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
433                                 TASK_INTERRUPTIBLE)) {
434                         err = -EINTR;
435                         goto done;
436                 }
437         }
438
439         /* for unlimited number of responses we will use buffer with
440          * 255 entries
441          */
442         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
443
444         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
445          * copy it to the user space.
446          */
447         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
448         if (!buf) {
449                 err = -ENOMEM;
450                 goto done;
451         }
452
453         hci_dev_lock(hdev);
454         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
455         hci_dev_unlock(hdev);
456
457         BT_DBG("num_rsp %d", ir.num_rsp);
458
459         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
460                 ptr += sizeof(ir);
461                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
462                                  ir.num_rsp))
463                         err = -EFAULT;
464         } else
465                 err = -EFAULT;
466
467         kfree(buf);
468
469 done:
470         hci_dev_put(hdev);
471         return err;
472 }
473
474 static int hci_dev_do_open(struct hci_dev *hdev)
475 {
476         int ret = 0;
477
478         BT_DBG("%s %p", hdev->name, hdev);
479
480         hci_req_sync_lock(hdev);
481
482         ret = hci_dev_open_sync(hdev);
483
484         hci_req_sync_unlock(hdev);
485         return ret;
486 }
487
488 /* ---- HCI ioctl helpers ---- */
489
490 int hci_dev_open(__u16 dev)
491 {
492         struct hci_dev *hdev;
493         int err;
494
495         hdev = hci_dev_get(dev);
496         if (!hdev)
497                 return -ENODEV;
498
499         /* Devices that are marked as unconfigured can only be powered
500          * up as user channel. Trying to bring them up as normal devices
501          * will result into a failure. Only user channel operation is
502          * possible.
503          *
504          * When this function is called for a user channel, the flag
505          * HCI_USER_CHANNEL will be set first before attempting to
506          * open the device.
507          */
508         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
509             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
510                 err = -EOPNOTSUPP;
511                 goto done;
512         }
513
514         /* We need to ensure that no other power on/off work is pending
515          * before proceeding to call hci_dev_do_open. This is
516          * particularly important if the setup procedure has not yet
517          * completed.
518          */
519         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
520                 cancel_delayed_work(&hdev->power_off);
521
522         /* After this call it is guaranteed that the setup procedure
523          * has finished. This means that error conditions like RFKILL
524          * or no valid public or static random address apply.
525          */
526         flush_workqueue(hdev->req_workqueue);
527
528         /* For controllers not using the management interface and that
529          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
530          * so that pairing works for them. Once the management interface
531          * is in use this bit will be cleared again and userspace has
532          * to explicitly enable it.
533          */
534         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
535             !hci_dev_test_flag(hdev, HCI_MGMT))
536                 hci_dev_set_flag(hdev, HCI_BONDABLE);
537
538         err = hci_dev_do_open(hdev);
539
540 done:
541         hci_dev_put(hdev);
542         return err;
543 }
544
545 int hci_dev_do_close(struct hci_dev *hdev)
546 {
547         int err;
548
549         BT_DBG("%s %p", hdev->name, hdev);
550
551         hci_req_sync_lock(hdev);
552
553         err = hci_dev_close_sync(hdev);
554
555         hci_req_sync_unlock(hdev);
556
557         return err;
558 }
559
560 int hci_dev_close(__u16 dev)
561 {
562         struct hci_dev *hdev;
563         int err;
564
565         hdev = hci_dev_get(dev);
566         if (!hdev)
567                 return -ENODEV;
568
569         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
570                 err = -EBUSY;
571                 goto done;
572         }
573
574         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
575                 cancel_delayed_work(&hdev->power_off);
576
577         err = hci_dev_do_close(hdev);
578
579 done:
580         hci_dev_put(hdev);
581         return err;
582 }
583
584 static int hci_dev_do_reset(struct hci_dev *hdev)
585 {
586         int ret;
587
588         BT_DBG("%s %p", hdev->name, hdev);
589
590         hci_req_sync_lock(hdev);
591
592         /* Drop queues */
593         skb_queue_purge(&hdev->rx_q);
594         skb_queue_purge(&hdev->cmd_q);
595
596         /* Avoid potential lockdep warnings from the *_flush() calls by
597          * ensuring the workqueue is empty up front.
598          */
599         drain_workqueue(hdev->workqueue);
600
601         hci_dev_lock(hdev);
602         hci_inquiry_cache_flush(hdev);
603         hci_conn_hash_flush(hdev);
604         hci_dev_unlock(hdev);
605
606         if (hdev->flush)
607                 hdev->flush(hdev);
608
609         atomic_set(&hdev->cmd_cnt, 1);
610         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
611
612         ret = hci_reset_sync(hdev);
613
614         hci_req_sync_unlock(hdev);
615         return ret;
616 }
617
618 int hci_dev_reset(__u16 dev)
619 {
620         struct hci_dev *hdev;
621         int err;
622
623         hdev = hci_dev_get(dev);
624         if (!hdev)
625                 return -ENODEV;
626
627         if (!test_bit(HCI_UP, &hdev->flags)) {
628                 err = -ENETDOWN;
629                 goto done;
630         }
631
632         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
633                 err = -EBUSY;
634                 goto done;
635         }
636
637         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
638                 err = -EOPNOTSUPP;
639                 goto done;
640         }
641
642         err = hci_dev_do_reset(hdev);
643
644 done:
645         hci_dev_put(hdev);
646         return err;
647 }
648
649 int hci_dev_reset_stat(__u16 dev)
650 {
651         struct hci_dev *hdev;
652         int ret = 0;
653
654         hdev = hci_dev_get(dev);
655         if (!hdev)
656                 return -ENODEV;
657
658         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
659                 ret = -EBUSY;
660                 goto done;
661         }
662
663         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
664                 ret = -EOPNOTSUPP;
665                 goto done;
666         }
667
668         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
669
670 done:
671         hci_dev_put(hdev);
672         return ret;
673 }
674
675 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
676 {
677         bool conn_changed, discov_changed;
678
679         BT_DBG("%s scan 0x%02x", hdev->name, scan);
680
681         if ((scan & SCAN_PAGE))
682                 conn_changed = !hci_dev_test_and_set_flag(hdev,
683                                                           HCI_CONNECTABLE);
684         else
685                 conn_changed = hci_dev_test_and_clear_flag(hdev,
686                                                            HCI_CONNECTABLE);
687
688         if ((scan & SCAN_INQUIRY)) {
689                 discov_changed = !hci_dev_test_and_set_flag(hdev,
690                                                             HCI_DISCOVERABLE);
691         } else {
692                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
693                 discov_changed = hci_dev_test_and_clear_flag(hdev,
694                                                              HCI_DISCOVERABLE);
695         }
696
697         if (!hci_dev_test_flag(hdev, HCI_MGMT))
698                 return;
699
700         if (conn_changed || discov_changed) {
701                 /* In case this was disabled through mgmt */
702                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
703
704                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
705                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
706
707                 mgmt_new_settings(hdev);
708         }
709 }
710
711 int hci_dev_cmd(unsigned int cmd, void __user *arg)
712 {
713         struct hci_dev *hdev;
714         struct hci_dev_req dr;
715         int err = 0;
716
717         if (copy_from_user(&dr, arg, sizeof(dr)))
718                 return -EFAULT;
719
720         hdev = hci_dev_get(dr.dev_id);
721         if (!hdev)
722                 return -ENODEV;
723
724         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
725                 err = -EBUSY;
726                 goto done;
727         }
728
729         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
730                 err = -EOPNOTSUPP;
731                 goto done;
732         }
733
734         if (hdev->dev_type != HCI_PRIMARY) {
735                 err = -EOPNOTSUPP;
736                 goto done;
737         }
738
739         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
740                 err = -EOPNOTSUPP;
741                 goto done;
742         }
743
744         switch (cmd) {
745         case HCISETAUTH:
746                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
747                                    HCI_INIT_TIMEOUT, NULL);
748                 break;
749
750         case HCISETENCRYPT:
751                 if (!lmp_encrypt_capable(hdev)) {
752                         err = -EOPNOTSUPP;
753                         break;
754                 }
755
756                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
757                         /* Auth must be enabled first */
758                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
759                                            HCI_INIT_TIMEOUT, NULL);
760                         if (err)
761                                 break;
762                 }
763
764                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
765                                    HCI_INIT_TIMEOUT, NULL);
766                 break;
767
768         case HCISETSCAN:
769                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
770                                    HCI_INIT_TIMEOUT, NULL);
771
772                 /* Ensure that the connectable and discoverable states
773                  * get correctly modified as this was a non-mgmt change.
774                  */
775                 if (!err)
776                         hci_update_passive_scan_state(hdev, dr.dev_opt);
777                 break;
778
779         case HCISETLINKPOL:
780                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
781                                    HCI_INIT_TIMEOUT, NULL);
782                 break;
783
784         case HCISETLINKMODE:
785                 hdev->link_mode = ((__u16) dr.dev_opt) &
786                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
787                 break;
788
789         case HCISETPTYPE:
790                 if (hdev->pkt_type == (__u16) dr.dev_opt)
791                         break;
792
793                 hdev->pkt_type = (__u16) dr.dev_opt;
794                 mgmt_phy_configuration_changed(hdev, NULL);
795                 break;
796
797         case HCISETACLMTU:
798                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
799                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
800                 break;
801
802         case HCISETSCOMTU:
803                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
804                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
805                 break;
806
807         default:
808                 err = -EINVAL;
809                 break;
810         }
811
812 done:
813         hci_dev_put(hdev);
814         return err;
815 }
816
817 int hci_get_dev_list(void __user *arg)
818 {
819         struct hci_dev *hdev;
820         struct hci_dev_list_req *dl;
821         struct hci_dev_req *dr;
822         int n = 0, size, err;
823         __u16 dev_num;
824
825         if (get_user(dev_num, (__u16 __user *) arg))
826                 return -EFAULT;
827
828         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
829                 return -EINVAL;
830
831         size = sizeof(*dl) + dev_num * sizeof(*dr);
832
833         dl = kzalloc(size, GFP_KERNEL);
834         if (!dl)
835                 return -ENOMEM;
836
837         dr = dl->dev_req;
838
839         read_lock(&hci_dev_list_lock);
840         list_for_each_entry(hdev, &hci_dev_list, list) {
841                 unsigned long flags = hdev->flags;
842
843                 /* When the auto-off is configured it means the transport
844                  * is running, but in that case still indicate that the
845                  * device is actually down.
846                  */
847                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
848                         flags &= ~BIT(HCI_UP);
849
850                 (dr + n)->dev_id  = hdev->id;
851                 (dr + n)->dev_opt = flags;
852
853                 if (++n >= dev_num)
854                         break;
855         }
856         read_unlock(&hci_dev_list_lock);
857
858         dl->dev_num = n;
859         size = sizeof(*dl) + n * sizeof(*dr);
860
861         err = copy_to_user(arg, dl, size);
862         kfree(dl);
863
864         return err ? -EFAULT : 0;
865 }
866
867 int hci_get_dev_info(void __user *arg)
868 {
869         struct hci_dev *hdev;
870         struct hci_dev_info di;
871         unsigned long flags;
872         int err = 0;
873
874         if (copy_from_user(&di, arg, sizeof(di)))
875                 return -EFAULT;
876
877         hdev = hci_dev_get(di.dev_id);
878         if (!hdev)
879                 return -ENODEV;
880
881         /* When the auto-off is configured it means the transport
882          * is running, but in that case still indicate that the
883          * device is actually down.
884          */
885         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
886                 flags = hdev->flags & ~BIT(HCI_UP);
887         else
888                 flags = hdev->flags;
889
890         strcpy(di.name, hdev->name);
891         di.bdaddr   = hdev->bdaddr;
892         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
893         di.flags    = flags;
894         di.pkt_type = hdev->pkt_type;
895         if (lmp_bredr_capable(hdev)) {
896                 di.acl_mtu  = hdev->acl_mtu;
897                 di.acl_pkts = hdev->acl_pkts;
898                 di.sco_mtu  = hdev->sco_mtu;
899                 di.sco_pkts = hdev->sco_pkts;
900         } else {
901                 di.acl_mtu  = hdev->le_mtu;
902                 di.acl_pkts = hdev->le_pkts;
903                 di.sco_mtu  = 0;
904                 di.sco_pkts = 0;
905         }
906         di.link_policy = hdev->link_policy;
907         di.link_mode   = hdev->link_mode;
908
909         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
910         memcpy(&di.features, &hdev->features, sizeof(di.features));
911
912         if (copy_to_user(arg, &di, sizeof(di)))
913                 err = -EFAULT;
914
915         hci_dev_put(hdev);
916
917         return err;
918 }
919
920 /* ---- Interface to HCI drivers ---- */
921
922 static int hci_rfkill_set_block(void *data, bool blocked)
923 {
924         struct hci_dev *hdev = data;
925
926         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
927
928         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
929                 return -EBUSY;
930
931         if (blocked) {
932                 hci_dev_set_flag(hdev, HCI_RFKILLED);
933                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
934                     !hci_dev_test_flag(hdev, HCI_CONFIG))
935                         hci_dev_do_close(hdev);
936         } else {
937                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
938         }
939
940         return 0;
941 }
942
943 static const struct rfkill_ops hci_rfkill_ops = {
944         .set_block = hci_rfkill_set_block,
945 };
946
947 static void hci_power_on(struct work_struct *work)
948 {
949         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
950         int err;
951
952         BT_DBG("%s", hdev->name);
953
954         if (test_bit(HCI_UP, &hdev->flags) &&
955             hci_dev_test_flag(hdev, HCI_MGMT) &&
956             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
957                 cancel_delayed_work(&hdev->power_off);
958                 err = hci_powered_update_sync(hdev);
959                 mgmt_power_on(hdev, err);
960                 return;
961         }
962
963         err = hci_dev_do_open(hdev);
964         if (err < 0) {
965                 hci_dev_lock(hdev);
966                 mgmt_set_powered_failed(hdev, err);
967                 hci_dev_unlock(hdev);
968                 return;
969         }
970
971         /* During the HCI setup phase, a few error conditions are
972          * ignored and they need to be checked now. If they are still
973          * valid, it is important to turn the device back off.
974          */
975         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
976             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
977             (hdev->dev_type == HCI_PRIMARY &&
978              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
979              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
980                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
981                 hci_dev_do_close(hdev);
982         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
983                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
984                                    HCI_AUTO_OFF_TIMEOUT);
985         }
986
987         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
988                 /* For unconfigured devices, set the HCI_RAW flag
989                  * so that userspace can easily identify them.
990                  */
991                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
992                         set_bit(HCI_RAW, &hdev->flags);
993
994                 /* For fully configured devices, this will send
995                  * the Index Added event. For unconfigured devices,
996                  * it will send Unconfigued Index Added event.
997                  *
998                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
999                  * and no event will be send.
1000                  */
1001                 mgmt_index_added(hdev);
1002         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1003                 /* When the controller is now configured, then it
1004                  * is important to clear the HCI_RAW flag.
1005                  */
1006                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1007                         clear_bit(HCI_RAW, &hdev->flags);
1008
1009                 /* Powering on the controller with HCI_CONFIG set only
1010                  * happens with the transition from unconfigured to
1011                  * configured. This will send the Index Added event.
1012                  */
1013                 mgmt_index_added(hdev);
1014         }
1015 }
1016
1017 static void hci_power_off(struct work_struct *work)
1018 {
1019         struct hci_dev *hdev = container_of(work, struct hci_dev,
1020                                             power_off.work);
1021
1022         BT_DBG("%s", hdev->name);
1023
1024         hci_dev_do_close(hdev);
1025 }
1026
1027 static void hci_error_reset(struct work_struct *work)
1028 {
1029         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1030
1031         BT_DBG("%s", hdev->name);
1032
1033         if (hdev->hw_error)
1034                 hdev->hw_error(hdev, hdev->hw_error_code);
1035         else
1036                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1037
1038         if (hci_dev_do_close(hdev))
1039                 return;
1040
1041         hci_dev_do_open(hdev);
1042 }
1043
1044 void hci_uuids_clear(struct hci_dev *hdev)
1045 {
1046         struct bt_uuid *uuid, *tmp;
1047
1048         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1049                 list_del(&uuid->list);
1050                 kfree(uuid);
1051         }
1052 }
1053
1054 void hci_link_keys_clear(struct hci_dev *hdev)
1055 {
1056         struct link_key *key;
1057
1058         list_for_each_entry(key, &hdev->link_keys, list) {
1059                 list_del_rcu(&key->list);
1060                 kfree_rcu(key, rcu);
1061         }
1062 }
1063
1064 void hci_smp_ltks_clear(struct hci_dev *hdev)
1065 {
1066         struct smp_ltk *k;
1067
1068         list_for_each_entry(k, &hdev->long_term_keys, list) {
1069                 list_del_rcu(&k->list);
1070                 kfree_rcu(k, rcu);
1071         }
1072 }
1073
1074 void hci_smp_irks_clear(struct hci_dev *hdev)
1075 {
1076         struct smp_irk *k;
1077
1078         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1079                 list_del_rcu(&k->list);
1080                 kfree_rcu(k, rcu);
1081         }
1082 }
1083
1084 void hci_blocked_keys_clear(struct hci_dev *hdev)
1085 {
1086         struct blocked_key *b;
1087
1088         list_for_each_entry(b, &hdev->blocked_keys, list) {
1089                 list_del_rcu(&b->list);
1090                 kfree_rcu(b, rcu);
1091         }
1092 }
1093
1094 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1095 {
1096         bool blocked = false;
1097         struct blocked_key *b;
1098
1099         rcu_read_lock();
1100         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1101                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1102                         blocked = true;
1103                         break;
1104                 }
1105         }
1106
1107         rcu_read_unlock();
1108         return blocked;
1109 }
1110
1111 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1112 {
1113         struct link_key *k;
1114
1115         rcu_read_lock();
1116         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1117                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1118                         rcu_read_unlock();
1119
1120                         if (hci_is_blocked_key(hdev,
1121                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
1122                                                k->val)) {
1123                                 bt_dev_warn_ratelimited(hdev,
1124                                                         "Link key blocked for %pMR",
1125                                                         &k->bdaddr);
1126                                 return NULL;
1127                         }
1128
1129                         return k;
1130                 }
1131         }
1132         rcu_read_unlock();
1133
1134         return NULL;
1135 }
1136
1137 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1138                                u8 key_type, u8 old_key_type)
1139 {
1140         /* Legacy key */
1141         if (key_type < 0x03)
1142                 return true;
1143
1144         /* Debug keys are insecure so don't store them persistently */
1145         if (key_type == HCI_LK_DEBUG_COMBINATION)
1146                 return false;
1147
1148         /* Changed combination key and there's no previous one */
1149         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1150                 return false;
1151
1152         /* Security mode 3 case */
1153         if (!conn)
1154                 return true;
1155
1156         /* BR/EDR key derived using SC from an LE link */
1157         if (conn->type == LE_LINK)
1158                 return true;
1159
1160         /* Neither local nor remote side had no-bonding as requirement */
1161         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1162                 return true;
1163
1164         /* Local side had dedicated bonding as requirement */
1165         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1166                 return true;
1167
1168         /* Remote side had dedicated bonding as requirement */
1169         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1170                 return true;
1171
1172         /* If none of the above criteria match, then don't store the key
1173          * persistently */
1174         return false;
1175 }
1176
1177 static u8 ltk_role(u8 type)
1178 {
1179         if (type == SMP_LTK)
1180                 return HCI_ROLE_MASTER;
1181
1182         return HCI_ROLE_SLAVE;
1183 }
1184
1185 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1186                              u8 addr_type, u8 role)
1187 {
1188         struct smp_ltk *k;
1189
1190         rcu_read_lock();
1191         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1192                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1193                         continue;
1194
1195                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1196                         rcu_read_unlock();
1197
1198                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1199                                                k->val)) {
1200                                 bt_dev_warn_ratelimited(hdev,
1201                                                         "LTK blocked for %pMR",
1202                                                         &k->bdaddr);
1203                                 return NULL;
1204                         }
1205
1206                         return k;
1207                 }
1208         }
1209         rcu_read_unlock();
1210
1211         return NULL;
1212 }
1213
1214 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1215 {
1216         struct smp_irk *irk_to_return = NULL;
1217         struct smp_irk *irk;
1218
1219         rcu_read_lock();
1220         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1221                 if (!bacmp(&irk->rpa, rpa)) {
1222                         irk_to_return = irk;
1223                         goto done;
1224                 }
1225         }
1226
1227         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1228                 if (smp_irk_matches(hdev, irk->val, rpa)) {
1229                         bacpy(&irk->rpa, rpa);
1230                         irk_to_return = irk;
1231                         goto done;
1232                 }
1233         }
1234
1235 done:
1236         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1237                                                 irk_to_return->val)) {
1238                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1239                                         &irk_to_return->bdaddr);
1240                 irk_to_return = NULL;
1241         }
1242
1243         rcu_read_unlock();
1244
1245         return irk_to_return;
1246 }
1247
1248 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1249                                      u8 addr_type)
1250 {
1251         struct smp_irk *irk_to_return = NULL;
1252         struct smp_irk *irk;
1253
1254         /* Identity Address must be public or static random */
1255         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1256                 return NULL;
1257
1258         rcu_read_lock();
1259         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1260                 if (addr_type == irk->addr_type &&
1261                     bacmp(bdaddr, &irk->bdaddr) == 0) {
1262                         irk_to_return = irk;
1263                         goto done;
1264                 }
1265         }
1266
1267 done:
1268
1269         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1270                                                 irk_to_return->val)) {
1271                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1272                                         &irk_to_return->bdaddr);
1273                 irk_to_return = NULL;
1274         }
1275
1276         rcu_read_unlock();
1277
1278         return irk_to_return;
1279 }
1280
1281 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1282                                   bdaddr_t *bdaddr, u8 *val, u8 type,
1283                                   u8 pin_len, bool *persistent)
1284 {
1285         struct link_key *key, *old_key;
1286         u8 old_key_type;
1287
1288         old_key = hci_find_link_key(hdev, bdaddr);
1289         if (old_key) {
1290                 old_key_type = old_key->type;
1291                 key = old_key;
1292         } else {
1293                 old_key_type = conn ? conn->key_type : 0xff;
1294                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1295                 if (!key)
1296                         return NULL;
1297                 list_add_rcu(&key->list, &hdev->link_keys);
1298         }
1299
1300         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1301
1302         /* Some buggy controller combinations generate a changed
1303          * combination key for legacy pairing even when there's no
1304          * previous key */
1305         if (type == HCI_LK_CHANGED_COMBINATION &&
1306             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1307                 type = HCI_LK_COMBINATION;
1308                 if (conn)
1309                         conn->key_type = type;
1310         }
1311
1312         bacpy(&key->bdaddr, bdaddr);
1313         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1314         key->pin_len = pin_len;
1315
1316         if (type == HCI_LK_CHANGED_COMBINATION)
1317                 key->type = old_key_type;
1318         else
1319                 key->type = type;
1320
1321         if (persistent)
1322                 *persistent = hci_persistent_key(hdev, conn, type,
1323                                                  old_key_type);
1324
1325         return key;
1326 }
1327
1328 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1329                             u8 addr_type, u8 type, u8 authenticated,
1330                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1331 {
1332         struct smp_ltk *key, *old_key;
1333         u8 role = ltk_role(type);
1334
1335         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1336         if (old_key)
1337                 key = old_key;
1338         else {
1339                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1340                 if (!key)
1341                         return NULL;
1342                 list_add_rcu(&key->list, &hdev->long_term_keys);
1343         }
1344
1345         bacpy(&key->bdaddr, bdaddr);
1346         key->bdaddr_type = addr_type;
1347         memcpy(key->val, tk, sizeof(key->val));
1348         key->authenticated = authenticated;
1349         key->ediv = ediv;
1350         key->rand = rand;
1351         key->enc_size = enc_size;
1352         key->type = type;
1353
1354         return key;
1355 }
1356
1357 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1358                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
1359 {
1360         struct smp_irk *irk;
1361
1362         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1363         if (!irk) {
1364                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1365                 if (!irk)
1366                         return NULL;
1367
1368                 bacpy(&irk->bdaddr, bdaddr);
1369                 irk->addr_type = addr_type;
1370
1371                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1372         }
1373
1374         memcpy(irk->val, val, 16);
1375         bacpy(&irk->rpa, rpa);
1376
1377         return irk;
1378 }
1379
1380 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1381 {
1382         struct link_key *key;
1383
1384         key = hci_find_link_key(hdev, bdaddr);
1385         if (!key)
1386                 return -ENOENT;
1387
1388         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1389
1390         list_del_rcu(&key->list);
1391         kfree_rcu(key, rcu);
1392
1393         return 0;
1394 }
1395
1396 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1397 {
1398         struct smp_ltk *k;
1399         int removed = 0;
1400
1401         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1402                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1403                         continue;
1404
1405                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1406
1407                 list_del_rcu(&k->list);
1408                 kfree_rcu(k, rcu);
1409                 removed++;
1410         }
1411
1412         return removed ? 0 : -ENOENT;
1413 }
1414
1415 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1416 {
1417         struct smp_irk *k;
1418
1419         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1420                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1421                         continue;
1422
1423                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1424
1425                 list_del_rcu(&k->list);
1426                 kfree_rcu(k, rcu);
1427         }
1428 }
1429
1430 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1431 {
1432         struct smp_ltk *k;
1433         struct smp_irk *irk;
1434         u8 addr_type;
1435
1436         if (type == BDADDR_BREDR) {
1437                 if (hci_find_link_key(hdev, bdaddr))
1438                         return true;
1439                 return false;
1440         }
1441
1442         /* Convert to HCI addr type which struct smp_ltk uses */
1443         if (type == BDADDR_LE_PUBLIC)
1444                 addr_type = ADDR_LE_DEV_PUBLIC;
1445         else
1446                 addr_type = ADDR_LE_DEV_RANDOM;
1447
1448         irk = hci_get_irk(hdev, bdaddr, addr_type);
1449         if (irk) {
1450                 bdaddr = &irk->bdaddr;
1451                 addr_type = irk->addr_type;
1452         }
1453
1454         rcu_read_lock();
1455         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1456                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1457                         rcu_read_unlock();
1458                         return true;
1459                 }
1460         }
1461         rcu_read_unlock();
1462
1463         return false;
1464 }
1465
1466 /* HCI command timer function */
1467 static void hci_cmd_timeout(struct work_struct *work)
1468 {
1469         struct hci_dev *hdev = container_of(work, struct hci_dev,
1470                                             cmd_timer.work);
1471
1472         if (hdev->sent_cmd) {
1473                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1474                 u16 opcode = __le16_to_cpu(sent->opcode);
1475
1476                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1477         } else {
1478                 bt_dev_err(hdev, "command tx timeout");
1479         }
1480
1481         if (hdev->cmd_timeout)
1482                 hdev->cmd_timeout(hdev);
1483
1484         atomic_set(&hdev->cmd_cnt, 1);
1485         queue_work(hdev->workqueue, &hdev->cmd_work);
1486 }
1487
1488 /* HCI ncmd timer function */
1489 static void hci_ncmd_timeout(struct work_struct *work)
1490 {
1491         struct hci_dev *hdev = container_of(work, struct hci_dev,
1492                                             ncmd_timer.work);
1493
1494         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1495
1496         /* During HCI_INIT phase no events can be injected if the ncmd timer
1497          * triggers since the procedure has its own timeout handling.
1498          */
1499         if (test_bit(HCI_INIT, &hdev->flags))
1500                 return;
1501
1502         /* This is an irrecoverable state, inject hardware error event */
1503         hci_reset_dev(hdev);
1504 }
1505
1506 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1507                                           bdaddr_t *bdaddr, u8 bdaddr_type)
1508 {
1509         struct oob_data *data;
1510
1511         list_for_each_entry(data, &hdev->remote_oob_data, list) {
1512                 if (bacmp(bdaddr, &data->bdaddr) != 0)
1513                         continue;
1514                 if (data->bdaddr_type != bdaddr_type)
1515                         continue;
1516                 return data;
1517         }
1518
1519         return NULL;
1520 }
1521
1522 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1523                                u8 bdaddr_type)
1524 {
1525         struct oob_data *data;
1526
1527         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1528         if (!data)
1529                 return -ENOENT;
1530
1531         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1532
1533         list_del(&data->list);
1534         kfree(data);
1535
1536         return 0;
1537 }
1538
1539 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1540 {
1541         struct oob_data *data, *n;
1542
1543         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1544                 list_del(&data->list);
1545                 kfree(data);
1546         }
1547 }
1548
1549 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1550                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
1551                             u8 *hash256, u8 *rand256)
1552 {
1553         struct oob_data *data;
1554
1555         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1556         if (!data) {
1557                 data = kmalloc(sizeof(*data), GFP_KERNEL);
1558                 if (!data)
1559                         return -ENOMEM;
1560
1561                 bacpy(&data->bdaddr, bdaddr);
1562                 data->bdaddr_type = bdaddr_type;
1563                 list_add(&data->list, &hdev->remote_oob_data);
1564         }
1565
1566         if (hash192 && rand192) {
1567                 memcpy(data->hash192, hash192, sizeof(data->hash192));
1568                 memcpy(data->rand192, rand192, sizeof(data->rand192));
1569                 if (hash256 && rand256)
1570                         data->present = 0x03;
1571         } else {
1572                 memset(data->hash192, 0, sizeof(data->hash192));
1573                 memset(data->rand192, 0, sizeof(data->rand192));
1574                 if (hash256 && rand256)
1575                         data->present = 0x02;
1576                 else
1577                         data->present = 0x00;
1578         }
1579
1580         if (hash256 && rand256) {
1581                 memcpy(data->hash256, hash256, sizeof(data->hash256));
1582                 memcpy(data->rand256, rand256, sizeof(data->rand256));
1583         } else {
1584                 memset(data->hash256, 0, sizeof(data->hash256));
1585                 memset(data->rand256, 0, sizeof(data->rand256));
1586                 if (hash192 && rand192)
1587                         data->present = 0x01;
1588         }
1589
1590         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1591
1592         return 0;
1593 }
1594
1595 /* This function requires the caller holds hdev->lock */
1596 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1597 {
1598         struct adv_info *adv_instance;
1599
1600         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1601                 if (adv_instance->instance == instance)
1602                         return adv_instance;
1603         }
1604
1605         return NULL;
1606 }
1607
1608 /* This function requires the caller holds hdev->lock */
1609 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1610 {
1611         struct adv_info *cur_instance;
1612
1613         cur_instance = hci_find_adv_instance(hdev, instance);
1614         if (!cur_instance)
1615                 return NULL;
1616
1617         if (cur_instance == list_last_entry(&hdev->adv_instances,
1618                                             struct adv_info, list))
1619                 return list_first_entry(&hdev->adv_instances,
1620                                                  struct adv_info, list);
1621         else
1622                 return list_next_entry(cur_instance, list);
1623 }
1624
1625 /* This function requires the caller holds hdev->lock */
1626 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1627 {
1628         struct adv_info *adv_instance;
1629
1630         adv_instance = hci_find_adv_instance(hdev, instance);
1631         if (!adv_instance)
1632                 return -ENOENT;
1633
1634         BT_DBG("%s removing %dMR", hdev->name, instance);
1635
1636         if (hdev->cur_adv_instance == instance) {
1637                 if (hdev->adv_instance_timeout) {
1638                         cancel_delayed_work(&hdev->adv_instance_expire);
1639                         hdev->adv_instance_timeout = 0;
1640                 }
1641                 hdev->cur_adv_instance = 0x00;
1642         }
1643
1644         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1645
1646         list_del(&adv_instance->list);
1647         kfree(adv_instance);
1648
1649         hdev->adv_instance_cnt--;
1650
1651         return 0;
1652 }
1653
1654 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1655 {
1656         struct adv_info *adv_instance, *n;
1657
1658         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1659                 adv_instance->rpa_expired = rpa_expired;
1660 }
1661
1662 /* This function requires the caller holds hdev->lock */
1663 void hci_adv_instances_clear(struct hci_dev *hdev)
1664 {
1665         struct adv_info *adv_instance, *n;
1666
1667         if (hdev->adv_instance_timeout) {
1668                 cancel_delayed_work(&hdev->adv_instance_expire);
1669                 hdev->adv_instance_timeout = 0;
1670         }
1671
1672         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1673                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1674                 list_del(&adv_instance->list);
1675                 kfree(adv_instance);
1676         }
1677
1678         hdev->adv_instance_cnt = 0;
1679         hdev->cur_adv_instance = 0x00;
1680 }
1681
1682 static void adv_instance_rpa_expired(struct work_struct *work)
1683 {
1684         struct adv_info *adv_instance = container_of(work, struct adv_info,
1685                                                      rpa_expired_cb.work);
1686
1687         BT_DBG("");
1688
1689         adv_instance->rpa_expired = true;
1690 }
1691
1692 /* This function requires the caller holds hdev->lock */
1693 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
1694                          u16 adv_data_len, u8 *adv_data,
1695                          u16 scan_rsp_len, u8 *scan_rsp_data,
1696                          u16 timeout, u16 duration, s8 tx_power,
1697                          u32 min_interval, u32 max_interval)
1698 {
1699         struct adv_info *adv_instance;
1700
1701         adv_instance = hci_find_adv_instance(hdev, instance);
1702         if (adv_instance) {
1703                 memset(adv_instance->adv_data, 0,
1704                        sizeof(adv_instance->adv_data));
1705                 memset(adv_instance->scan_rsp_data, 0,
1706                        sizeof(adv_instance->scan_rsp_data));
1707         } else {
1708                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1709                     instance < 1 || instance > hdev->le_num_of_adv_sets)
1710                         return -EOVERFLOW;
1711
1712                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
1713                 if (!adv_instance)
1714                         return -ENOMEM;
1715
1716                 adv_instance->pending = true;
1717                 adv_instance->instance = instance;
1718                 list_add(&adv_instance->list, &hdev->adv_instances);
1719                 hdev->adv_instance_cnt++;
1720         }
1721
1722         adv_instance->flags = flags;
1723         adv_instance->adv_data_len = adv_data_len;
1724         adv_instance->scan_rsp_len = scan_rsp_len;
1725         adv_instance->min_interval = min_interval;
1726         adv_instance->max_interval = max_interval;
1727         adv_instance->tx_power = tx_power;
1728
1729         if (adv_data_len)
1730                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1731
1732         if (scan_rsp_len)
1733                 memcpy(adv_instance->scan_rsp_data,
1734                        scan_rsp_data, scan_rsp_len);
1735
1736         adv_instance->timeout = timeout;
1737         adv_instance->remaining_time = timeout;
1738
1739         if (duration == 0)
1740                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
1741         else
1742                 adv_instance->duration = duration;
1743
1744         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
1745                           adv_instance_rpa_expired);
1746
1747         BT_DBG("%s for %dMR", hdev->name, instance);
1748
1749         return 0;
1750 }
1751
1752 /* This function requires the caller holds hdev->lock */
1753 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1754                               u16 adv_data_len, u8 *adv_data,
1755                               u16 scan_rsp_len, u8 *scan_rsp_data)
1756 {
1757         struct adv_info *adv_instance;
1758
1759         adv_instance = hci_find_adv_instance(hdev, instance);
1760
1761         /* If advertisement doesn't exist, we can't modify its data */
1762         if (!adv_instance)
1763                 return -ENOENT;
1764
1765         if (adv_data_len) {
1766                 memset(adv_instance->adv_data, 0,
1767                        sizeof(adv_instance->adv_data));
1768                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1769                 adv_instance->adv_data_len = adv_data_len;
1770         }
1771
1772         if (scan_rsp_len) {
1773                 memset(adv_instance->scan_rsp_data, 0,
1774                        sizeof(adv_instance->scan_rsp_data));
1775                 memcpy(adv_instance->scan_rsp_data,
1776                        scan_rsp_data, scan_rsp_len);
1777                 adv_instance->scan_rsp_len = scan_rsp_len;
1778         }
1779
1780         return 0;
1781 }
1782
1783 /* This function requires the caller holds hdev->lock */
1784 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1785 {
1786         u32 flags;
1787         struct adv_info *adv;
1788
1789         if (instance == 0x00) {
1790                 /* Instance 0 always manages the "Tx Power" and "Flags"
1791                  * fields
1792                  */
1793                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1794
1795                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1796                  * corresponds to the "connectable" instance flag.
1797                  */
1798                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1799                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1800
1801                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1802                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1803                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1804                         flags |= MGMT_ADV_FLAG_DISCOV;
1805
1806                 return flags;
1807         }
1808
1809         adv = hci_find_adv_instance(hdev, instance);
1810
1811         /* Return 0 when we got an invalid instance identifier. */
1812         if (!adv)
1813                 return 0;
1814
1815         return adv->flags;
1816 }
1817
1818 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1819 {
1820         struct adv_info *adv;
1821
1822         /* Instance 0x00 always set local name */
1823         if (instance == 0x00)
1824                 return true;
1825
1826         adv = hci_find_adv_instance(hdev, instance);
1827         if (!adv)
1828                 return false;
1829
1830         if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1831             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1832                 return true;
1833
1834         return adv->scan_rsp_len ? true : false;
1835 }
1836
1837 /* This function requires the caller holds hdev->lock */
1838 void hci_adv_monitors_clear(struct hci_dev *hdev)
1839 {
1840         struct adv_monitor *monitor;
1841         int handle;
1842
1843         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1844                 hci_free_adv_monitor(hdev, monitor);
1845
1846         idr_destroy(&hdev->adv_monitors_idr);
1847 }
1848
1849 /* Frees the monitor structure and do some bookkeepings.
1850  * This function requires the caller holds hdev->lock.
1851  */
1852 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1853 {
1854         struct adv_pattern *pattern;
1855         struct adv_pattern *tmp;
1856
1857         if (!monitor)
1858                 return;
1859
1860         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1861                 list_del(&pattern->list);
1862                 kfree(pattern);
1863         }
1864
1865         if (monitor->handle)
1866                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1867
1868         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1869                 hdev->adv_monitors_cnt--;
1870                 mgmt_adv_monitor_removed(hdev, monitor->handle);
1871         }
1872
1873         kfree(monitor);
1874 }
1875
1876 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
1877 {
1878         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
1879 }
1880
1881 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
1882 {
1883         return mgmt_remove_adv_monitor_complete(hdev, status);
1884 }
1885
1886 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1887  * also attempts to forward the request to the controller.
1888  * Returns true if request is forwarded (result is pending), false otherwise.
1889  * This function requires the caller holds hdev->lock.
1890  */
1891 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
1892                          int *err)
1893 {
1894         int min, max, handle;
1895
1896         *err = 0;
1897
1898         if (!monitor) {
1899                 *err = -EINVAL;
1900                 return false;
1901         }
1902
1903         min = HCI_MIN_ADV_MONITOR_HANDLE;
1904         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1905         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1906                            GFP_KERNEL);
1907         if (handle < 0) {
1908                 *err = handle;
1909                 return false;
1910         }
1911
1912         monitor->handle = handle;
1913
1914         if (!hdev_is_powered(hdev))
1915                 return false;
1916
1917         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1918         case HCI_ADV_MONITOR_EXT_NONE:
1919                 hci_update_passive_scan(hdev);
1920                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
1921                 /* Message was not forwarded to controller - not an error */
1922                 return false;
1923         case HCI_ADV_MONITOR_EXT_MSFT:
1924                 *err = msft_add_monitor_pattern(hdev, monitor);
1925                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
1926                            *err);
1927                 break;
1928         }
1929
1930         return (*err == 0);
1931 }
1932
1933 /* Attempts to tell the controller and free the monitor. If somehow the
1934  * controller doesn't have a corresponding handle, remove anyway.
1935  * Returns true if request is forwarded (result is pending), false otherwise.
1936  * This function requires the caller holds hdev->lock.
1937  */
1938 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
1939                                    struct adv_monitor *monitor,
1940                                    u16 handle, int *err)
1941 {
1942         *err = 0;
1943
1944         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1945         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1946                 goto free_monitor;
1947         case HCI_ADV_MONITOR_EXT_MSFT:
1948                 *err = msft_remove_monitor(hdev, monitor, handle);
1949                 break;
1950         }
1951
1952         /* In case no matching handle registered, just free the monitor */
1953         if (*err == -ENOENT)
1954                 goto free_monitor;
1955
1956         return (*err == 0);
1957
1958 free_monitor:
1959         if (*err == -ENOENT)
1960                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1961                             monitor->handle);
1962         hci_free_adv_monitor(hdev, monitor);
1963
1964         *err = 0;
1965         return false;
1966 }
1967
1968 /* Returns true if request is forwarded (result is pending), false otherwise.
1969  * This function requires the caller holds hdev->lock.
1970  */
1971 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
1972 {
1973         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1974         bool pending;
1975
1976         if (!monitor) {
1977                 *err = -EINVAL;
1978                 return false;
1979         }
1980
1981         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
1982         if (!*err && !pending)
1983                 hci_update_passive_scan(hdev);
1984
1985         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
1986                    hdev->name, handle, *err, pending ? "" : "not ");
1987
1988         return pending;
1989 }
1990
1991 /* Returns true if request is forwarded (result is pending), false otherwise.
1992  * This function requires the caller holds hdev->lock.
1993  */
1994 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
1995 {
1996         struct adv_monitor *monitor;
1997         int idr_next_id = 0;
1998         bool pending = false;
1999         bool update = false;
2000
2001         *err = 0;
2002
2003         while (!*err && !pending) {
2004                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2005                 if (!monitor)
2006                         break;
2007
2008                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
2009
2010                 if (!*err && !pending)
2011                         update = true;
2012         }
2013
2014         if (update)
2015                 hci_update_passive_scan(hdev);
2016
2017         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
2018                    hdev->name, *err, pending ? "" : "not ");
2019
2020         return pending;
2021 }
2022
2023 /* This function requires the caller holds hdev->lock */
2024 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2025 {
2026         return !idr_is_empty(&hdev->adv_monitors_idr);
2027 }
2028
2029 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2030 {
2031         if (msft_monitor_supported(hdev))
2032                 return HCI_ADV_MONITOR_EXT_MSFT;
2033
2034         return HCI_ADV_MONITOR_EXT_NONE;
2035 }
2036
2037 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2038                                          bdaddr_t *bdaddr, u8 type)
2039 {
2040         struct bdaddr_list *b;
2041
2042         list_for_each_entry(b, bdaddr_list, list) {
2043                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2044                         return b;
2045         }
2046
2047         return NULL;
2048 }
2049
2050 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2051                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2052                                 u8 type)
2053 {
2054         struct bdaddr_list_with_irk *b;
2055
2056         list_for_each_entry(b, bdaddr_list, list) {
2057                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2058                         return b;
2059         }
2060
2061         return NULL;
2062 }
2063
2064 struct bdaddr_list_with_flags *
2065 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2066                                   bdaddr_t *bdaddr, u8 type)
2067 {
2068         struct bdaddr_list_with_flags *b;
2069
2070         list_for_each_entry(b, bdaddr_list, list) {
2071                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2072                         return b;
2073         }
2074
2075         return NULL;
2076 }
2077
2078 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2079 {
2080         struct bdaddr_list *b, *n;
2081
2082         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2083                 list_del(&b->list);
2084                 kfree(b);
2085         }
2086 }
2087
2088 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2089 {
2090         struct bdaddr_list *entry;
2091
2092         if (!bacmp(bdaddr, BDADDR_ANY))
2093                 return -EBADF;
2094
2095         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2096                 return -EEXIST;
2097
2098         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2099         if (!entry)
2100                 return -ENOMEM;
2101
2102         bacpy(&entry->bdaddr, bdaddr);
2103         entry->bdaddr_type = type;
2104
2105         list_add(&entry->list, list);
2106
2107         return 0;
2108 }
2109
2110 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2111                                         u8 type, u8 *peer_irk, u8 *local_irk)
2112 {
2113         struct bdaddr_list_with_irk *entry;
2114
2115         if (!bacmp(bdaddr, BDADDR_ANY))
2116                 return -EBADF;
2117
2118         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2119                 return -EEXIST;
2120
2121         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2122         if (!entry)
2123                 return -ENOMEM;
2124
2125         bacpy(&entry->bdaddr, bdaddr);
2126         entry->bdaddr_type = type;
2127
2128         if (peer_irk)
2129                 memcpy(entry->peer_irk, peer_irk, 16);
2130
2131         if (local_irk)
2132                 memcpy(entry->local_irk, local_irk, 16);
2133
2134         list_add(&entry->list, list);
2135
2136         return 0;
2137 }
2138
2139 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2140                                    u8 type, u32 flags)
2141 {
2142         struct bdaddr_list_with_flags *entry;
2143
2144         if (!bacmp(bdaddr, BDADDR_ANY))
2145                 return -EBADF;
2146
2147         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2148                 return -EEXIST;
2149
2150         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2151         if (!entry)
2152                 return -ENOMEM;
2153
2154         bacpy(&entry->bdaddr, bdaddr);
2155         entry->bdaddr_type = type;
2156         bitmap_from_u64(entry->flags, flags);
2157
2158         list_add(&entry->list, list);
2159
2160         return 0;
2161 }
2162
2163 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2164 {
2165         struct bdaddr_list *entry;
2166
2167         if (!bacmp(bdaddr, BDADDR_ANY)) {
2168                 hci_bdaddr_list_clear(list);
2169                 return 0;
2170         }
2171
2172         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2173         if (!entry)
2174                 return -ENOENT;
2175
2176         list_del(&entry->list);
2177         kfree(entry);
2178
2179         return 0;
2180 }
2181
2182 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2183                                                         u8 type)
2184 {
2185         struct bdaddr_list_with_irk *entry;
2186
2187         if (!bacmp(bdaddr, BDADDR_ANY)) {
2188                 hci_bdaddr_list_clear(list);
2189                 return 0;
2190         }
2191
2192         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2193         if (!entry)
2194                 return -ENOENT;
2195
2196         list_del(&entry->list);
2197         kfree(entry);
2198
2199         return 0;
2200 }
2201
2202 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2203                                    u8 type)
2204 {
2205         struct bdaddr_list_with_flags *entry;
2206
2207         if (!bacmp(bdaddr, BDADDR_ANY)) {
2208                 hci_bdaddr_list_clear(list);
2209                 return 0;
2210         }
2211
2212         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2213         if (!entry)
2214                 return -ENOENT;
2215
2216         list_del(&entry->list);
2217         kfree(entry);
2218
2219         return 0;
2220 }
2221
2222 /* This function requires the caller holds hdev->lock */
2223 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2224                                                bdaddr_t *addr, u8 addr_type)
2225 {
2226         struct hci_conn_params *params;
2227
2228         list_for_each_entry(params, &hdev->le_conn_params, list) {
2229                 if (bacmp(&params->addr, addr) == 0 &&
2230                     params->addr_type == addr_type) {
2231                         return params;
2232                 }
2233         }
2234
2235         return NULL;
2236 }
2237
2238 /* This function requires the caller holds hdev->lock */
2239 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2240                                                   bdaddr_t *addr, u8 addr_type)
2241 {
2242         struct hci_conn_params *param;
2243
2244         list_for_each_entry(param, list, action) {
2245                 if (bacmp(&param->addr, addr) == 0 &&
2246                     param->addr_type == addr_type)
2247                         return param;
2248         }
2249
2250         return NULL;
2251 }
2252
2253 /* This function requires the caller holds hdev->lock */
2254 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2255                                             bdaddr_t *addr, u8 addr_type)
2256 {
2257         struct hci_conn_params *params;
2258
2259         params = hci_conn_params_lookup(hdev, addr, addr_type);
2260         if (params)
2261                 return params;
2262
2263         params = kzalloc(sizeof(*params), GFP_KERNEL);
2264         if (!params) {
2265                 bt_dev_err(hdev, "out of memory");
2266                 return NULL;
2267         }
2268
2269         bacpy(&params->addr, addr);
2270         params->addr_type = addr_type;
2271
2272         list_add(&params->list, &hdev->le_conn_params);
2273         INIT_LIST_HEAD(&params->action);
2274
2275         params->conn_min_interval = hdev->le_conn_min_interval;
2276         params->conn_max_interval = hdev->le_conn_max_interval;
2277         params->conn_latency = hdev->le_conn_latency;
2278         params->supervision_timeout = hdev->le_supv_timeout;
2279         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2280
2281         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2282
2283         return params;
2284 }
2285
2286 static void hci_conn_params_free(struct hci_conn_params *params)
2287 {
2288         if (params->conn) {
2289                 hci_conn_drop(params->conn);
2290                 hci_conn_put(params->conn);
2291         }
2292
2293         list_del(&params->action);
2294         list_del(&params->list);
2295         kfree(params);
2296 }
2297
2298 /* This function requires the caller holds hdev->lock */
2299 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2300 {
2301         struct hci_conn_params *params;
2302
2303         params = hci_conn_params_lookup(hdev, addr, addr_type);
2304         if (!params)
2305                 return;
2306
2307         hci_conn_params_free(params);
2308
2309         hci_update_passive_scan(hdev);
2310
2311         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2312 }
2313
2314 /* This function requires the caller holds hdev->lock */
2315 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2316 {
2317         struct hci_conn_params *params, *tmp;
2318
2319         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2320                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2321                         continue;
2322
2323                 /* If trying to establish one time connection to disabled
2324                  * device, leave the params, but mark them as just once.
2325                  */
2326                 if (params->explicit_connect) {
2327                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2328                         continue;
2329                 }
2330
2331                 list_del(&params->list);
2332                 kfree(params);
2333         }
2334
2335         BT_DBG("All LE disabled connection parameters were removed");
2336 }
2337
2338 /* This function requires the caller holds hdev->lock */
2339 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2340 {
2341         struct hci_conn_params *params, *tmp;
2342
2343         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2344                 hci_conn_params_free(params);
2345
2346         BT_DBG("All LE connection parameters were removed");
2347 }
2348
2349 /* Copy the Identity Address of the controller.
2350  *
2351  * If the controller has a public BD_ADDR, then by default use that one.
2352  * If this is a LE only controller without a public address, default to
2353  * the static random address.
2354  *
2355  * For debugging purposes it is possible to force controllers with a
2356  * public address to use the static random address instead.
2357  *
2358  * In case BR/EDR has been disabled on a dual-mode controller and
2359  * userspace has configured a static address, then that address
2360  * becomes the identity address instead of the public BR/EDR address.
2361  */
2362 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2363                                u8 *bdaddr_type)
2364 {
2365         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2366             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2367             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2368              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2369                 bacpy(bdaddr, &hdev->static_addr);
2370                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2371         } else {
2372                 bacpy(bdaddr, &hdev->bdaddr);
2373                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2374         }
2375 }
2376
2377 static void hci_clear_wake_reason(struct hci_dev *hdev)
2378 {
2379         hci_dev_lock(hdev);
2380
2381         hdev->wake_reason = 0;
2382         bacpy(&hdev->wake_addr, BDADDR_ANY);
2383         hdev->wake_addr_type = 0;
2384
2385         hci_dev_unlock(hdev);
2386 }
2387
2388 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2389                                 void *data)
2390 {
2391         struct hci_dev *hdev =
2392                 container_of(nb, struct hci_dev, suspend_notifier);
2393         int ret = 0;
2394
2395         if (action == PM_SUSPEND_PREPARE)
2396                 ret = hci_suspend_dev(hdev);
2397         else if (action == PM_POST_SUSPEND)
2398                 ret = hci_resume_dev(hdev);
2399
2400         if (ret)
2401                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2402                            action, ret);
2403
2404         return NOTIFY_DONE;
2405 }
2406
2407 /* Alloc HCI device */
2408 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2409 {
2410         struct hci_dev *hdev;
2411         unsigned int alloc_size;
2412
2413         alloc_size = sizeof(*hdev);
2414         if (sizeof_priv) {
2415                 /* Fixme: May need ALIGN-ment? */
2416                 alloc_size += sizeof_priv;
2417         }
2418
2419         hdev = kzalloc(alloc_size, GFP_KERNEL);
2420         if (!hdev)
2421                 return NULL;
2422
2423         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2424         hdev->esco_type = (ESCO_HV1);
2425         hdev->link_mode = (HCI_LM_ACCEPT);
2426         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2427         hdev->io_capability = 0x03;     /* No Input No Output */
2428         hdev->manufacturer = 0xffff;    /* Default to internal use */
2429         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2430         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2431         hdev->adv_instance_cnt = 0;
2432         hdev->cur_adv_instance = 0x00;
2433         hdev->adv_instance_timeout = 0;
2434
2435         hdev->advmon_allowlist_duration = 300;
2436         hdev->advmon_no_filter_duration = 500;
2437         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
2438
2439         hdev->sniff_max_interval = 800;
2440         hdev->sniff_min_interval = 80;
2441
2442         hdev->le_adv_channel_map = 0x07;
2443         hdev->le_adv_min_interval = 0x0800;
2444         hdev->le_adv_max_interval = 0x0800;
2445         hdev->le_scan_interval = 0x0060;
2446         hdev->le_scan_window = 0x0030;
2447         hdev->le_scan_int_suspend = 0x0400;
2448         hdev->le_scan_window_suspend = 0x0012;
2449         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2450         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2451         hdev->le_scan_int_adv_monitor = 0x0060;
2452         hdev->le_scan_window_adv_monitor = 0x0030;
2453         hdev->le_scan_int_connect = 0x0060;
2454         hdev->le_scan_window_connect = 0x0060;
2455         hdev->le_conn_min_interval = 0x0018;
2456         hdev->le_conn_max_interval = 0x0028;
2457         hdev->le_conn_latency = 0x0000;
2458         hdev->le_supv_timeout = 0x002a;
2459         hdev->le_def_tx_len = 0x001b;
2460         hdev->le_def_tx_time = 0x0148;
2461         hdev->le_max_tx_len = 0x001b;
2462         hdev->le_max_tx_time = 0x0148;
2463         hdev->le_max_rx_len = 0x001b;
2464         hdev->le_max_rx_time = 0x0148;
2465         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2466         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2467         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2468         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2469         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2470         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2471         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2472         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2473         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2474
2475         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2476         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2477         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2478         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2479         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2480         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2481
2482         /* default 1.28 sec page scan */
2483         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2484         hdev->def_page_scan_int = 0x0800;
2485         hdev->def_page_scan_window = 0x0012;
2486
2487         mutex_init(&hdev->lock);
2488         mutex_init(&hdev->req_lock);
2489
2490         INIT_LIST_HEAD(&hdev->mgmt_pending);
2491         INIT_LIST_HEAD(&hdev->reject_list);
2492         INIT_LIST_HEAD(&hdev->accept_list);
2493         INIT_LIST_HEAD(&hdev->uuids);
2494         INIT_LIST_HEAD(&hdev->link_keys);
2495         INIT_LIST_HEAD(&hdev->long_term_keys);
2496         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2497         INIT_LIST_HEAD(&hdev->remote_oob_data);
2498         INIT_LIST_HEAD(&hdev->le_accept_list);
2499         INIT_LIST_HEAD(&hdev->le_resolv_list);
2500         INIT_LIST_HEAD(&hdev->le_conn_params);
2501         INIT_LIST_HEAD(&hdev->pend_le_conns);
2502         INIT_LIST_HEAD(&hdev->pend_le_reports);
2503         INIT_LIST_HEAD(&hdev->conn_hash.list);
2504         INIT_LIST_HEAD(&hdev->adv_instances);
2505         INIT_LIST_HEAD(&hdev->blocked_keys);
2506         INIT_LIST_HEAD(&hdev->monitored_devices);
2507
2508         INIT_LIST_HEAD(&hdev->local_codecs);
2509         INIT_WORK(&hdev->rx_work, hci_rx_work);
2510         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2511         INIT_WORK(&hdev->tx_work, hci_tx_work);
2512         INIT_WORK(&hdev->power_on, hci_power_on);
2513         INIT_WORK(&hdev->error_reset, hci_error_reset);
2514
2515         hci_cmd_sync_init(hdev);
2516
2517         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2518
2519         skb_queue_head_init(&hdev->rx_q);
2520         skb_queue_head_init(&hdev->cmd_q);
2521         skb_queue_head_init(&hdev->raw_q);
2522
2523         init_waitqueue_head(&hdev->req_wait_q);
2524
2525         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2526         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2527
2528         hci_request_setup(hdev);
2529
2530         hci_init_sysfs(hdev);
2531         discovery_init(hdev);
2532
2533         return hdev;
2534 }
2535 EXPORT_SYMBOL(hci_alloc_dev_priv);
2536
2537 /* Free HCI device */
2538 void hci_free_dev(struct hci_dev *hdev)
2539 {
2540         /* will free via device release */
2541         put_device(&hdev->dev);
2542 }
2543 EXPORT_SYMBOL(hci_free_dev);
2544
2545 /* Register HCI device */
2546 int hci_register_dev(struct hci_dev *hdev)
2547 {
2548         int id, error;
2549
2550         if (!hdev->open || !hdev->close || !hdev->send)
2551                 return -EINVAL;
2552
2553         /* Do not allow HCI_AMP devices to register at index 0,
2554          * so the index can be used as the AMP controller ID.
2555          */
2556         switch (hdev->dev_type) {
2557         case HCI_PRIMARY:
2558                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2559                 break;
2560         case HCI_AMP:
2561                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2562                 break;
2563         default:
2564                 return -EINVAL;
2565         }
2566
2567         if (id < 0)
2568                 return id;
2569
2570         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2571         hdev->id = id;
2572
2573         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2574
2575         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2576         if (!hdev->workqueue) {
2577                 error = -ENOMEM;
2578                 goto err;
2579         }
2580
2581         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2582                                                       hdev->name);
2583         if (!hdev->req_workqueue) {
2584                 destroy_workqueue(hdev->workqueue);
2585                 error = -ENOMEM;
2586                 goto err;
2587         }
2588
2589         if (!IS_ERR_OR_NULL(bt_debugfs))
2590                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2591
2592         dev_set_name(&hdev->dev, "%s", hdev->name);
2593
2594         error = device_add(&hdev->dev);
2595         if (error < 0)
2596                 goto err_wqueue;
2597
2598         hci_leds_init(hdev);
2599
2600         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2601                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2602                                     hdev);
2603         if (hdev->rfkill) {
2604                 if (rfkill_register(hdev->rfkill) < 0) {
2605                         rfkill_destroy(hdev->rfkill);
2606                         hdev->rfkill = NULL;
2607                 }
2608         }
2609
2610         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2611                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2612
2613         hci_dev_set_flag(hdev, HCI_SETUP);
2614         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2615
2616         if (hdev->dev_type == HCI_PRIMARY) {
2617                 /* Assume BR/EDR support until proven otherwise (such as
2618                  * through reading supported features during init.
2619                  */
2620                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2621         }
2622
2623         write_lock(&hci_dev_list_lock);
2624         list_add(&hdev->list, &hci_dev_list);
2625         write_unlock(&hci_dev_list_lock);
2626
2627         /* Devices that are marked for raw-only usage are unconfigured
2628          * and should not be included in normal operation.
2629          */
2630         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2631                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2632
2633         /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2634          * callback.
2635          */
2636         if (hdev->wakeup)
2637                 set_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, hdev->conn_flags);
2638
2639         hci_sock_dev_event(hdev, HCI_DEV_REG);
2640         hci_dev_hold(hdev);
2641
2642         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2643                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2644                 error = register_pm_notifier(&hdev->suspend_notifier);
2645                 if (error)
2646                         goto err_wqueue;
2647         }
2648
2649         queue_work(hdev->req_workqueue, &hdev->power_on);
2650
2651         idr_init(&hdev->adv_monitors_idr);
2652         msft_register(hdev);
2653
2654         return id;
2655
2656 err_wqueue:
2657         debugfs_remove_recursive(hdev->debugfs);
2658         destroy_workqueue(hdev->workqueue);
2659         destroy_workqueue(hdev->req_workqueue);
2660 err:
2661         ida_simple_remove(&hci_index_ida, hdev->id);
2662
2663         return error;
2664 }
2665 EXPORT_SYMBOL(hci_register_dev);
2666
2667 /* Unregister HCI device */
2668 void hci_unregister_dev(struct hci_dev *hdev)
2669 {
2670         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2671
2672         hci_dev_set_flag(hdev, HCI_UNREGISTER);
2673
2674         write_lock(&hci_dev_list_lock);
2675         list_del(&hdev->list);
2676         write_unlock(&hci_dev_list_lock);
2677
2678         hci_cmd_sync_clear(hdev);
2679
2680         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
2681                 unregister_pm_notifier(&hdev->suspend_notifier);
2682
2683         msft_unregister(hdev);
2684
2685         hci_dev_do_close(hdev);
2686
2687         if (!test_bit(HCI_INIT, &hdev->flags) &&
2688             !hci_dev_test_flag(hdev, HCI_SETUP) &&
2689             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2690                 hci_dev_lock(hdev);
2691                 mgmt_index_removed(hdev);
2692                 hci_dev_unlock(hdev);
2693         }
2694
2695         /* mgmt_index_removed should take care of emptying the
2696          * pending list */
2697         BUG_ON(!list_empty(&hdev->mgmt_pending));
2698
2699         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2700
2701         if (hdev->rfkill) {
2702                 rfkill_unregister(hdev->rfkill);
2703                 rfkill_destroy(hdev->rfkill);
2704         }
2705
2706         device_del(&hdev->dev);
2707         /* Actual cleanup is deferred until hci_release_dev(). */
2708         hci_dev_put(hdev);
2709 }
2710 EXPORT_SYMBOL(hci_unregister_dev);
2711
2712 /* Release HCI device */
2713 void hci_release_dev(struct hci_dev *hdev)
2714 {
2715         debugfs_remove_recursive(hdev->debugfs);
2716         kfree_const(hdev->hw_info);
2717         kfree_const(hdev->fw_info);
2718
2719         destroy_workqueue(hdev->workqueue);
2720         destroy_workqueue(hdev->req_workqueue);
2721
2722         hci_dev_lock(hdev);
2723         hci_bdaddr_list_clear(&hdev->reject_list);
2724         hci_bdaddr_list_clear(&hdev->accept_list);
2725         hci_uuids_clear(hdev);
2726         hci_link_keys_clear(hdev);
2727         hci_smp_ltks_clear(hdev);
2728         hci_smp_irks_clear(hdev);
2729         hci_remote_oob_data_clear(hdev);
2730         hci_adv_instances_clear(hdev);
2731         hci_adv_monitors_clear(hdev);
2732         hci_bdaddr_list_clear(&hdev->le_accept_list);
2733         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2734         hci_conn_params_clear_all(hdev);
2735         hci_discovery_filter_clear(hdev);
2736         hci_blocked_keys_clear(hdev);
2737         hci_dev_unlock(hdev);
2738
2739         ida_simple_remove(&hci_index_ida, hdev->id);
2740         kfree_skb(hdev->sent_cmd);
2741         kfree(hdev);
2742 }
2743 EXPORT_SYMBOL(hci_release_dev);
2744
2745 /* Suspend HCI device */
2746 int hci_suspend_dev(struct hci_dev *hdev)
2747 {
2748         int ret;
2749
2750         bt_dev_dbg(hdev, "");
2751
2752         /* Suspend should only act on when powered. */
2753         if (!hdev_is_powered(hdev) ||
2754             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2755                 return 0;
2756
2757         /* If powering down don't attempt to suspend */
2758         if (mgmt_powering_down(hdev))
2759                 return 0;
2760
2761         hci_req_sync_lock(hdev);
2762         ret = hci_suspend_sync(hdev);
2763         hci_req_sync_unlock(hdev);
2764
2765         hci_clear_wake_reason(hdev);
2766         mgmt_suspending(hdev, hdev->suspend_state);
2767
2768         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2769         return ret;
2770 }
2771 EXPORT_SYMBOL(hci_suspend_dev);
2772
2773 /* Resume HCI device */
2774 int hci_resume_dev(struct hci_dev *hdev)
2775 {
2776         int ret;
2777
2778         bt_dev_dbg(hdev, "");
2779
2780         /* Resume should only act on when powered. */
2781         if (!hdev_is_powered(hdev) ||
2782             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2783                 return 0;
2784
2785         /* If powering down don't attempt to resume */
2786         if (mgmt_powering_down(hdev))
2787                 return 0;
2788
2789         hci_req_sync_lock(hdev);
2790         ret = hci_resume_sync(hdev);
2791         hci_req_sync_unlock(hdev);
2792
2793         mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2794                       hdev->wake_addr_type);
2795
2796         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2797         return ret;
2798 }
2799 EXPORT_SYMBOL(hci_resume_dev);
2800
2801 /* Reset HCI device */
2802 int hci_reset_dev(struct hci_dev *hdev)
2803 {
2804         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2805         struct sk_buff *skb;
2806
2807         skb = bt_skb_alloc(3, GFP_ATOMIC);
2808         if (!skb)
2809                 return -ENOMEM;
2810
2811         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2812         skb_put_data(skb, hw_err, 3);
2813
2814         bt_dev_err(hdev, "Injecting HCI hardware error event");
2815
2816         /* Send Hardware Error to upper stack */
2817         return hci_recv_frame(hdev, skb);
2818 }
2819 EXPORT_SYMBOL(hci_reset_dev);
2820
2821 /* Receive frame from HCI drivers */
2822 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2823 {
2824         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2825                       && !test_bit(HCI_INIT, &hdev->flags))) {
2826                 kfree_skb(skb);
2827                 return -ENXIO;
2828         }
2829
2830         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2831             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
2832             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2833             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
2834                 kfree_skb(skb);
2835                 return -EINVAL;
2836         }
2837
2838         /* Incoming skb */
2839         bt_cb(skb)->incoming = 1;
2840
2841         /* Time stamp */
2842         __net_timestamp(skb);
2843
2844         skb_queue_tail(&hdev->rx_q, skb);
2845         queue_work(hdev->workqueue, &hdev->rx_work);
2846
2847         return 0;
2848 }
2849 EXPORT_SYMBOL(hci_recv_frame);
2850
2851 /* Receive diagnostic message from HCI drivers */
2852 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2853 {
2854         /* Mark as diagnostic packet */
2855         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2856
2857         /* Time stamp */
2858         __net_timestamp(skb);
2859
2860         skb_queue_tail(&hdev->rx_q, skb);
2861         queue_work(hdev->workqueue, &hdev->rx_work);
2862
2863         return 0;
2864 }
2865 EXPORT_SYMBOL(hci_recv_diag);
2866
2867 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2868 {
2869         va_list vargs;
2870
2871         va_start(vargs, fmt);
2872         kfree_const(hdev->hw_info);
2873         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2874         va_end(vargs);
2875 }
2876 EXPORT_SYMBOL(hci_set_hw_info);
2877
2878 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2879 {
2880         va_list vargs;
2881
2882         va_start(vargs, fmt);
2883         kfree_const(hdev->fw_info);
2884         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2885         va_end(vargs);
2886 }
2887 EXPORT_SYMBOL(hci_set_fw_info);
2888
2889 /* ---- Interface to upper protocols ---- */
2890
2891 int hci_register_cb(struct hci_cb *cb)
2892 {
2893         BT_DBG("%p name %s", cb, cb->name);
2894
2895         mutex_lock(&hci_cb_list_lock);
2896         list_add_tail(&cb->list, &hci_cb_list);
2897         mutex_unlock(&hci_cb_list_lock);
2898
2899         return 0;
2900 }
2901 EXPORT_SYMBOL(hci_register_cb);
2902
2903 int hci_unregister_cb(struct hci_cb *cb)
2904 {
2905         BT_DBG("%p name %s", cb, cb->name);
2906
2907         mutex_lock(&hci_cb_list_lock);
2908         list_del(&cb->list);
2909         mutex_unlock(&hci_cb_list_lock);
2910
2911         return 0;
2912 }
2913 EXPORT_SYMBOL(hci_unregister_cb);
2914
2915 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2916 {
2917         int err;
2918
2919         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2920                skb->len);
2921
2922         /* Time stamp */
2923         __net_timestamp(skb);
2924
2925         /* Send copy to monitor */
2926         hci_send_to_monitor(hdev, skb);
2927
2928         if (atomic_read(&hdev->promisc)) {
2929                 /* Send copy to the sockets */
2930                 hci_send_to_sock(hdev, skb);
2931         }
2932
2933         /* Get rid of skb owner, prior to sending to the driver. */
2934         skb_orphan(skb);
2935
2936         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2937                 kfree_skb(skb);
2938                 return -EINVAL;
2939         }
2940
2941         err = hdev->send(hdev, skb);
2942         if (err < 0) {
2943                 bt_dev_err(hdev, "sending frame failed (%d)", err);
2944                 kfree_skb(skb);
2945                 return err;
2946         }
2947
2948         return 0;
2949 }
2950
2951 /* Send HCI command */
2952 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2953                  const void *param)
2954 {
2955         struct sk_buff *skb;
2956
2957         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2958
2959         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2960         if (!skb) {
2961                 bt_dev_err(hdev, "no memory for command");
2962                 return -ENOMEM;
2963         }
2964
2965         /* Stand-alone HCI commands must be flagged as
2966          * single-command requests.
2967          */
2968         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
2969
2970         skb_queue_tail(&hdev->cmd_q, skb);
2971         queue_work(hdev->workqueue, &hdev->cmd_work);
2972
2973         return 0;
2974 }
2975
2976 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2977                    const void *param)
2978 {
2979         struct sk_buff *skb;
2980
2981         if (hci_opcode_ogf(opcode) != 0x3f) {
2982                 /* A controller receiving a command shall respond with either
2983                  * a Command Status Event or a Command Complete Event.
2984                  * Therefore, all standard HCI commands must be sent via the
2985                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
2986                  * Some vendors do not comply with this rule for vendor-specific
2987                  * commands and do not return any event. We want to support
2988                  * unresponded commands for such cases only.
2989                  */
2990                 bt_dev_err(hdev, "unresponded command not supported");
2991                 return -EINVAL;
2992         }
2993
2994         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2995         if (!skb) {
2996                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
2997                            opcode);
2998                 return -ENOMEM;
2999         }
3000
3001         hci_send_frame(hdev, skb);
3002
3003         return 0;
3004 }
3005 EXPORT_SYMBOL(__hci_cmd_send);
3006
3007 /* Get data from the previously sent command */
3008 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3009 {
3010         struct hci_command_hdr *hdr;
3011
3012         if (!hdev->sent_cmd)
3013                 return NULL;
3014
3015         hdr = (void *) hdev->sent_cmd->data;
3016
3017         if (hdr->opcode != cpu_to_le16(opcode))
3018                 return NULL;
3019
3020         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3021
3022         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3023 }
3024
3025 /* Send ACL data */
3026 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3027 {
3028         struct hci_acl_hdr *hdr;
3029         int len = skb->len;
3030
3031         skb_push(skb, HCI_ACL_HDR_SIZE);
3032         skb_reset_transport_header(skb);
3033         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3034         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3035         hdr->dlen   = cpu_to_le16(len);
3036 }
3037
3038 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3039                           struct sk_buff *skb, __u16 flags)
3040 {
3041         struct hci_conn *conn = chan->conn;
3042         struct hci_dev *hdev = conn->hdev;
3043         struct sk_buff *list;
3044
3045         skb->len = skb_headlen(skb);
3046         skb->data_len = 0;
3047
3048         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3049
3050         switch (hdev->dev_type) {
3051         case HCI_PRIMARY:
3052                 hci_add_acl_hdr(skb, conn->handle, flags);
3053                 break;
3054         case HCI_AMP:
3055                 hci_add_acl_hdr(skb, chan->handle, flags);
3056                 break;
3057         default:
3058                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3059                 return;
3060         }
3061
3062         list = skb_shinfo(skb)->frag_list;
3063         if (!list) {
3064                 /* Non fragmented */
3065                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3066
3067                 skb_queue_tail(queue, skb);
3068         } else {
3069                 /* Fragmented */
3070                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3071
3072                 skb_shinfo(skb)->frag_list = NULL;
3073
3074                 /* Queue all fragments atomically. We need to use spin_lock_bh
3075                  * here because of 6LoWPAN links, as there this function is
3076                  * called from softirq and using normal spin lock could cause
3077                  * deadlocks.
3078                  */
3079                 spin_lock_bh(&queue->lock);
3080
3081                 __skb_queue_tail(queue, skb);
3082
3083                 flags &= ~ACL_START;
3084                 flags |= ACL_CONT;
3085                 do {
3086                         skb = list; list = list->next;
3087
3088                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3089                         hci_add_acl_hdr(skb, conn->handle, flags);
3090
3091                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3092
3093                         __skb_queue_tail(queue, skb);
3094                 } while (list);
3095
3096                 spin_unlock_bh(&queue->lock);
3097         }
3098 }
3099
3100 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3101 {
3102         struct hci_dev *hdev = chan->conn->hdev;
3103
3104         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3105
3106         hci_queue_acl(chan, &chan->data_q, skb, flags);
3107
3108         queue_work(hdev->workqueue, &hdev->tx_work);
3109 }
3110
3111 /* Send SCO data */
3112 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3113 {
3114         struct hci_dev *hdev = conn->hdev;
3115         struct hci_sco_hdr hdr;
3116
3117         BT_DBG("%s len %d", hdev->name, skb->len);
3118
3119         hdr.handle = cpu_to_le16(conn->handle);
3120         hdr.dlen   = skb->len;
3121
3122         skb_push(skb, HCI_SCO_HDR_SIZE);
3123         skb_reset_transport_header(skb);
3124         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3125
3126         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3127
3128         skb_queue_tail(&conn->data_q, skb);
3129         queue_work(hdev->workqueue, &hdev->tx_work);
3130 }
3131
3132 /* ---- HCI TX task (outgoing data) ---- */
3133
3134 /* HCI Connection scheduler */
3135 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3136                                      int *quote)
3137 {
3138         struct hci_conn_hash *h = &hdev->conn_hash;
3139         struct hci_conn *conn = NULL, *c;
3140         unsigned int num = 0, min = ~0;
3141
3142         /* We don't have to lock device here. Connections are always
3143          * added and removed with TX task disabled. */
3144
3145         rcu_read_lock();
3146
3147         list_for_each_entry_rcu(c, &h->list, list) {
3148                 if (c->type != type || skb_queue_empty(&c->data_q))
3149                         continue;
3150
3151                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3152                         continue;
3153
3154                 num++;
3155
3156                 if (c->sent < min) {
3157                         min  = c->sent;
3158                         conn = c;
3159                 }
3160
3161                 if (hci_conn_num(hdev, type) == num)
3162                         break;
3163         }
3164
3165         rcu_read_unlock();
3166
3167         if (conn) {
3168                 int cnt, q;
3169
3170                 switch (conn->type) {
3171                 case ACL_LINK:
3172                         cnt = hdev->acl_cnt;
3173                         break;
3174                 case SCO_LINK:
3175                 case ESCO_LINK:
3176                         cnt = hdev->sco_cnt;
3177                         break;
3178                 case LE_LINK:
3179                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3180                         break;
3181                 default:
3182                         cnt = 0;
3183                         bt_dev_err(hdev, "unknown link type %d", conn->type);
3184                 }
3185
3186                 q = cnt / num;
3187                 *quote = q ? q : 1;
3188         } else
3189                 *quote = 0;
3190
3191         BT_DBG("conn %p quote %d", conn, *quote);
3192         return conn;
3193 }
3194
3195 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3196 {
3197         struct hci_conn_hash *h = &hdev->conn_hash;
3198         struct hci_conn *c;
3199
3200         bt_dev_err(hdev, "link tx timeout");
3201
3202         rcu_read_lock();
3203
3204         /* Kill stalled connections */
3205         list_for_each_entry_rcu(c, &h->list, list) {
3206                 if (c->type == type && c->sent) {
3207                         bt_dev_err(hdev, "killing stalled connection %pMR",
3208                                    &c->dst);
3209                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3210                 }
3211         }
3212
3213         rcu_read_unlock();
3214 }
3215
3216 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3217                                       int *quote)
3218 {
3219         struct hci_conn_hash *h = &hdev->conn_hash;
3220         struct hci_chan *chan = NULL;
3221         unsigned int num = 0, min = ~0, cur_prio = 0;
3222         struct hci_conn *conn;
3223         int cnt, q, conn_num = 0;
3224
3225         BT_DBG("%s", hdev->name);
3226
3227         rcu_read_lock();
3228
3229         list_for_each_entry_rcu(conn, &h->list, list) {
3230                 struct hci_chan *tmp;
3231
3232                 if (conn->type != type)
3233                         continue;
3234
3235                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3236                         continue;
3237
3238                 conn_num++;
3239
3240                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3241                         struct sk_buff *skb;
3242
3243                         if (skb_queue_empty(&tmp->data_q))
3244                                 continue;
3245
3246                         skb = skb_peek(&tmp->data_q);
3247                         if (skb->priority < cur_prio)
3248                                 continue;
3249
3250                         if (skb->priority > cur_prio) {
3251                                 num = 0;
3252                                 min = ~0;
3253                                 cur_prio = skb->priority;
3254                         }
3255
3256                         num++;
3257
3258                         if (conn->sent < min) {
3259                                 min  = conn->sent;
3260                                 chan = tmp;
3261                         }
3262                 }
3263
3264                 if (hci_conn_num(hdev, type) == conn_num)
3265                         break;
3266         }
3267
3268         rcu_read_unlock();
3269
3270         if (!chan)
3271                 return NULL;
3272
3273         switch (chan->conn->type) {
3274         case ACL_LINK:
3275                 cnt = hdev->acl_cnt;
3276                 break;
3277         case AMP_LINK:
3278                 cnt = hdev->block_cnt;
3279                 break;
3280         case SCO_LINK:
3281         case ESCO_LINK:
3282                 cnt = hdev->sco_cnt;
3283                 break;
3284         case LE_LINK:
3285                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3286                 break;
3287         default:
3288                 cnt = 0;
3289                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3290         }
3291
3292         q = cnt / num;
3293         *quote = q ? q : 1;
3294         BT_DBG("chan %p quote %d", chan, *quote);
3295         return chan;
3296 }
3297
3298 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3299 {
3300         struct hci_conn_hash *h = &hdev->conn_hash;
3301         struct hci_conn *conn;
3302         int num = 0;
3303
3304         BT_DBG("%s", hdev->name);
3305
3306         rcu_read_lock();
3307
3308         list_for_each_entry_rcu(conn, &h->list, list) {
3309                 struct hci_chan *chan;
3310
3311                 if (conn->type != type)
3312                         continue;
3313
3314                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3315                         continue;
3316
3317                 num++;
3318
3319                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3320                         struct sk_buff *skb;
3321
3322                         if (chan->sent) {
3323                                 chan->sent = 0;
3324                                 continue;
3325                         }
3326
3327                         if (skb_queue_empty(&chan->data_q))
3328                                 continue;
3329
3330                         skb = skb_peek(&chan->data_q);
3331                         if (skb->priority >= HCI_PRIO_MAX - 1)
3332                                 continue;
3333
3334                         skb->priority = HCI_PRIO_MAX - 1;
3335
3336                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3337                                skb->priority);
3338                 }
3339
3340                 if (hci_conn_num(hdev, type) == num)
3341                         break;
3342         }
3343
3344         rcu_read_unlock();
3345
3346 }
3347
3348 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3349 {
3350         /* Calculate count of blocks used by this packet */
3351         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3352 }
3353
3354 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3355 {
3356         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3357                 /* ACL tx timeout must be longer than maximum
3358                  * link supervision timeout (40.9 seconds) */
3359                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3360                                        HCI_ACL_TX_TIMEOUT))
3361                         hci_link_tx_to(hdev, ACL_LINK);
3362         }
3363 }
3364
3365 /* Schedule SCO */
3366 static void hci_sched_sco(struct hci_dev *hdev)
3367 {
3368         struct hci_conn *conn;
3369         struct sk_buff *skb;
3370         int quote;
3371
3372         BT_DBG("%s", hdev->name);
3373
3374         if (!hci_conn_num(hdev, SCO_LINK))
3375                 return;
3376
3377         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3378                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3379                         BT_DBG("skb %p len %d", skb, skb->len);
3380                         hci_send_frame(hdev, skb);
3381
3382                         conn->sent++;
3383                         if (conn->sent == ~0)
3384                                 conn->sent = 0;
3385                 }
3386         }
3387 }
3388
3389 static void hci_sched_esco(struct hci_dev *hdev)
3390 {
3391         struct hci_conn *conn;
3392         struct sk_buff *skb;
3393         int quote;
3394
3395         BT_DBG("%s", hdev->name);
3396
3397         if (!hci_conn_num(hdev, ESCO_LINK))
3398                 return;
3399
3400         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3401                                                      &quote))) {
3402                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3403                         BT_DBG("skb %p len %d", skb, skb->len);
3404                         hci_send_frame(hdev, skb);
3405
3406                         conn->sent++;
3407                         if (conn->sent == ~0)
3408                                 conn->sent = 0;
3409                 }
3410         }
3411 }
3412
3413 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3414 {
3415         unsigned int cnt = hdev->acl_cnt;
3416         struct hci_chan *chan;
3417         struct sk_buff *skb;
3418         int quote;
3419
3420         __check_timeout(hdev, cnt);
3421
3422         while (hdev->acl_cnt &&
3423                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3424                 u32 priority = (skb_peek(&chan->data_q))->priority;
3425                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3426                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3427                                skb->len, skb->priority);
3428
3429                         /* Stop if priority has changed */
3430                         if (skb->priority < priority)
3431                                 break;
3432
3433                         skb = skb_dequeue(&chan->data_q);
3434
3435                         hci_conn_enter_active_mode(chan->conn,
3436                                                    bt_cb(skb)->force_active);
3437
3438                         hci_send_frame(hdev, skb);
3439                         hdev->acl_last_tx = jiffies;
3440
3441                         hdev->acl_cnt--;
3442                         chan->sent++;
3443                         chan->conn->sent++;
3444
3445                         /* Send pending SCO packets right away */
3446                         hci_sched_sco(hdev);
3447                         hci_sched_esco(hdev);
3448                 }
3449         }
3450
3451         if (cnt != hdev->acl_cnt)
3452                 hci_prio_recalculate(hdev, ACL_LINK);
3453 }
3454
3455 static void hci_sched_acl_blk(struct hci_dev *hdev)
3456 {
3457         unsigned int cnt = hdev->block_cnt;
3458         struct hci_chan *chan;
3459         struct sk_buff *skb;
3460         int quote;
3461         u8 type;
3462
3463         __check_timeout(hdev, cnt);
3464
3465         BT_DBG("%s", hdev->name);
3466
3467         if (hdev->dev_type == HCI_AMP)
3468                 type = AMP_LINK;
3469         else
3470                 type = ACL_LINK;
3471
3472         while (hdev->block_cnt > 0 &&
3473                (chan = hci_chan_sent(hdev, type, &quote))) {
3474                 u32 priority = (skb_peek(&chan->data_q))->priority;
3475                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3476                         int blocks;
3477
3478                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3479                                skb->len, skb->priority);
3480
3481                         /* Stop if priority has changed */
3482                         if (skb->priority < priority)
3483                                 break;
3484
3485                         skb = skb_dequeue(&chan->data_q);
3486
3487                         blocks = __get_blocks(hdev, skb);
3488                         if (blocks > hdev->block_cnt)
3489                                 return;
3490
3491                         hci_conn_enter_active_mode(chan->conn,
3492                                                    bt_cb(skb)->force_active);
3493
3494                         hci_send_frame(hdev, skb);
3495                         hdev->acl_last_tx = jiffies;
3496
3497                         hdev->block_cnt -= blocks;
3498                         quote -= blocks;
3499
3500                         chan->sent += blocks;
3501                         chan->conn->sent += blocks;
3502                 }
3503         }
3504
3505         if (cnt != hdev->block_cnt)
3506                 hci_prio_recalculate(hdev, type);
3507 }
3508
3509 static void hci_sched_acl(struct hci_dev *hdev)
3510 {
3511         BT_DBG("%s", hdev->name);
3512
3513         /* No ACL link over BR/EDR controller */
3514         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3515                 return;
3516
3517         /* No AMP link over AMP controller */
3518         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3519                 return;
3520
3521         switch (hdev->flow_ctl_mode) {
3522         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3523                 hci_sched_acl_pkt(hdev);
3524                 break;
3525
3526         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3527                 hci_sched_acl_blk(hdev);
3528                 break;
3529         }
3530 }
3531
3532 static void hci_sched_le(struct hci_dev *hdev)
3533 {
3534         struct hci_chan *chan;
3535         struct sk_buff *skb;
3536         int quote, cnt, tmp;
3537
3538         BT_DBG("%s", hdev->name);
3539
3540         if (!hci_conn_num(hdev, LE_LINK))
3541                 return;
3542
3543         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3544
3545         __check_timeout(hdev, cnt);
3546
3547         tmp = cnt;
3548         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3549                 u32 priority = (skb_peek(&chan->data_q))->priority;
3550                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3551                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3552                                skb->len, skb->priority);
3553
3554                         /* Stop if priority has changed */
3555                         if (skb->priority < priority)
3556                                 break;
3557
3558                         skb = skb_dequeue(&chan->data_q);
3559
3560                         hci_send_frame(hdev, skb);
3561                         hdev->le_last_tx = jiffies;
3562
3563                         cnt--;
3564                         chan->sent++;
3565                         chan->conn->sent++;
3566
3567                         /* Send pending SCO packets right away */
3568                         hci_sched_sco(hdev);
3569                         hci_sched_esco(hdev);
3570                 }
3571         }
3572
3573         if (hdev->le_pkts)
3574                 hdev->le_cnt = cnt;
3575         else
3576                 hdev->acl_cnt = cnt;
3577
3578         if (cnt != tmp)
3579                 hci_prio_recalculate(hdev, LE_LINK);
3580 }
3581
3582 static void hci_tx_work(struct work_struct *work)
3583 {
3584         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3585         struct sk_buff *skb;
3586
3587         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3588                hdev->sco_cnt, hdev->le_cnt);
3589
3590         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3591                 /* Schedule queues and send stuff to HCI driver */
3592                 hci_sched_sco(hdev);
3593                 hci_sched_esco(hdev);
3594                 hci_sched_acl(hdev);
3595                 hci_sched_le(hdev);
3596         }
3597
3598         /* Send next queued raw (unknown type) packet */
3599         while ((skb = skb_dequeue(&hdev->raw_q)))
3600                 hci_send_frame(hdev, skb);
3601 }
3602
3603 /* ----- HCI RX task (incoming data processing) ----- */
3604
3605 /* ACL data packet */
3606 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3607 {
3608         struct hci_acl_hdr *hdr = (void *) skb->data;
3609         struct hci_conn *conn;
3610         __u16 handle, flags;
3611
3612         skb_pull(skb, HCI_ACL_HDR_SIZE);
3613
3614         handle = __le16_to_cpu(hdr->handle);
3615         flags  = hci_flags(handle);
3616         handle = hci_handle(handle);
3617
3618         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3619                handle, flags);
3620
3621         hdev->stat.acl_rx++;
3622
3623         hci_dev_lock(hdev);
3624         conn = hci_conn_hash_lookup_handle(hdev, handle);
3625         hci_dev_unlock(hdev);
3626
3627         if (conn) {
3628                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3629
3630                 /* Send to upper protocol */
3631                 l2cap_recv_acldata(conn, skb, flags);
3632                 return;
3633         } else {
3634                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3635                            handle);
3636         }
3637
3638         kfree_skb(skb);
3639 }
3640
3641 /* SCO data packet */
3642 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3643 {
3644         struct hci_sco_hdr *hdr = (void *) skb->data;
3645         struct hci_conn *conn;
3646         __u16 handle, flags;
3647
3648         skb_pull(skb, HCI_SCO_HDR_SIZE);
3649
3650         handle = __le16_to_cpu(hdr->handle);
3651         flags  = hci_flags(handle);
3652         handle = hci_handle(handle);
3653
3654         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3655                handle, flags);
3656
3657         hdev->stat.sco_rx++;
3658
3659         hci_dev_lock(hdev);
3660         conn = hci_conn_hash_lookup_handle(hdev, handle);
3661         hci_dev_unlock(hdev);
3662
3663         if (conn) {
3664                 /* Send to upper protocol */
3665                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3666                 sco_recv_scodata(conn, skb);
3667                 return;
3668         } else {
3669                 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3670                                        handle);
3671         }
3672
3673         kfree_skb(skb);
3674 }
3675
3676 static bool hci_req_is_complete(struct hci_dev *hdev)
3677 {
3678         struct sk_buff *skb;
3679
3680         skb = skb_peek(&hdev->cmd_q);
3681         if (!skb)
3682                 return true;
3683
3684         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3685 }
3686
3687 static void hci_resend_last(struct hci_dev *hdev)
3688 {
3689         struct hci_command_hdr *sent;
3690         struct sk_buff *skb;
3691         u16 opcode;
3692
3693         if (!hdev->sent_cmd)
3694                 return;
3695
3696         sent = (void *) hdev->sent_cmd->data;
3697         opcode = __le16_to_cpu(sent->opcode);
3698         if (opcode == HCI_OP_RESET)
3699                 return;
3700
3701         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3702         if (!skb)
3703                 return;
3704
3705         skb_queue_head(&hdev->cmd_q, skb);
3706         queue_work(hdev->workqueue, &hdev->cmd_work);
3707 }
3708
3709 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3710                           hci_req_complete_t *req_complete,
3711                           hci_req_complete_skb_t *req_complete_skb)
3712 {
3713         struct sk_buff *skb;
3714         unsigned long flags;
3715
3716         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3717
3718         /* If the completed command doesn't match the last one that was
3719          * sent we need to do special handling of it.
3720          */
3721         if (!hci_sent_cmd_data(hdev, opcode)) {
3722                 /* Some CSR based controllers generate a spontaneous
3723                  * reset complete event during init and any pending
3724                  * command will never be completed. In such a case we
3725                  * need to resend whatever was the last sent
3726                  * command.
3727                  */
3728                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3729                         hci_resend_last(hdev);
3730
3731                 return;
3732         }
3733
3734         /* If we reach this point this event matches the last command sent */
3735         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3736
3737         /* If the command succeeded and there's still more commands in
3738          * this request the request is not yet complete.
3739          */
3740         if (!status && !hci_req_is_complete(hdev))
3741                 return;
3742
3743         /* If this was the last command in a request the complete
3744          * callback would be found in hdev->sent_cmd instead of the
3745          * command queue (hdev->cmd_q).
3746          */
3747         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3748                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3749                 return;
3750         }
3751
3752         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3753                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3754                 return;
3755         }
3756
3757         /* Remove all pending commands belonging to this request */
3758         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3759         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3760                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3761                         __skb_queue_head(&hdev->cmd_q, skb);
3762                         break;
3763                 }
3764
3765                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3766                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3767                 else
3768                         *req_complete = bt_cb(skb)->hci.req_complete;
3769                 kfree_skb(skb);
3770         }
3771         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3772 }
3773
3774 static void hci_rx_work(struct work_struct *work)
3775 {
3776         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3777         struct sk_buff *skb;
3778
3779         BT_DBG("%s", hdev->name);
3780
3781         while ((skb = skb_dequeue(&hdev->rx_q))) {
3782                 /* Send copy to monitor */
3783                 hci_send_to_monitor(hdev, skb);
3784
3785                 if (atomic_read(&hdev->promisc)) {
3786                         /* Send copy to the sockets */
3787                         hci_send_to_sock(hdev, skb);
3788                 }
3789
3790                 /* If the device has been opened in HCI_USER_CHANNEL,
3791                  * the userspace has exclusive access to device.
3792                  * When device is HCI_INIT, we still need to process
3793                  * the data packets to the driver in order
3794                  * to complete its setup().
3795                  */
3796                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3797                     !test_bit(HCI_INIT, &hdev->flags)) {
3798                         kfree_skb(skb);
3799                         continue;
3800                 }
3801
3802                 if (test_bit(HCI_INIT, &hdev->flags)) {
3803                         /* Don't process data packets in this states. */
3804                         switch (hci_skb_pkt_type(skb)) {
3805                         case HCI_ACLDATA_PKT:
3806                         case HCI_SCODATA_PKT:
3807                         case HCI_ISODATA_PKT:
3808                                 kfree_skb(skb);
3809                                 continue;
3810                         }
3811                 }
3812
3813                 /* Process frame */
3814                 switch (hci_skb_pkt_type(skb)) {
3815                 case HCI_EVENT_PKT:
3816                         BT_DBG("%s Event packet", hdev->name);
3817                         hci_event_packet(hdev, skb);
3818                         break;
3819
3820                 case HCI_ACLDATA_PKT:
3821                         BT_DBG("%s ACL data packet", hdev->name);
3822                         hci_acldata_packet(hdev, skb);
3823                         break;
3824
3825                 case HCI_SCODATA_PKT:
3826                         BT_DBG("%s SCO data packet", hdev->name);
3827                         hci_scodata_packet(hdev, skb);
3828                         break;
3829
3830                 default:
3831                         kfree_skb(skb);
3832                         break;
3833                 }
3834         }
3835 }
3836
3837 static void hci_cmd_work(struct work_struct *work)
3838 {
3839         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3840         struct sk_buff *skb;
3841
3842         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3843                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3844
3845         /* Send queued commands */
3846         if (atomic_read(&hdev->cmd_cnt)) {
3847                 skb = skb_dequeue(&hdev->cmd_q);
3848                 if (!skb)
3849                         return;
3850
3851                 kfree_skb(hdev->sent_cmd);
3852
3853                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3854                 if (hdev->sent_cmd) {
3855                         int res;
3856                         if (hci_req_status_pend(hdev))
3857                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
3858                         atomic_dec(&hdev->cmd_cnt);
3859
3860                         res = hci_send_frame(hdev, skb);
3861                         if (res < 0)
3862                                 __hci_cmd_sync_cancel(hdev, -res);
3863
3864                         if (test_bit(HCI_RESET, &hdev->flags))
3865                                 cancel_delayed_work(&hdev->cmd_timer);
3866                         else
3867                                 schedule_delayed_work(&hdev->cmd_timer,
3868                                                       HCI_CMD_TIMEOUT);
3869                 } else {
3870                         skb_queue_head(&hdev->cmd_q, skb);
3871                         queue_work(hdev->workqueue, &hdev->cmd_work);
3872                 }
3873         }
3874 }