Bluetooth: Set link Supervision timeout for a connection
[platform/kernel/linux-rpi.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
67 {
68         __u8 scan = opt;
69
70         BT_DBG("%s %x", req->hdev->name, scan);
71
72         /* Inquiry and Page scans */
73         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74         return 0;
75 }
76
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
78 {
79         __u8 auth = opt;
80
81         BT_DBG("%s %x", req->hdev->name, auth);
82
83         /* Authentication */
84         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85         return 0;
86 }
87
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89 {
90         __u8 encrypt = opt;
91
92         BT_DBG("%s %x", req->hdev->name, encrypt);
93
94         /* Encryption */
95         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96         return 0;
97 }
98
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 {
101         __le16 policy = cpu_to_le16(opt);
102
103         BT_DBG("%s %x", req->hdev->name, policy);
104
105         /* Default link policy */
106         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107         return 0;
108 }
109
110 /* Get HCI device by index.
111  * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
113 {
114         struct hci_dev *hdev = NULL, *d;
115
116         BT_DBG("%d", index);
117
118         if (index < 0)
119                 return NULL;
120
121         read_lock(&hci_dev_list_lock);
122         list_for_each_entry(d, &hci_dev_list, list) {
123                 if (d->id == index) {
124                         hdev = hci_dev_hold(d);
125                         break;
126                 }
127         }
128         read_unlock(&hci_dev_list_lock);
129         return hdev;
130 }
131
132 /* ---- Inquiry support ---- */
133
134 bool hci_discovery_active(struct hci_dev *hdev)
135 {
136         struct discovery_state *discov = &hdev->discovery;
137
138         switch (discov->state) {
139         case DISCOVERY_FINDING:
140         case DISCOVERY_RESOLVING:
141                 return true;
142
143         default:
144                 return false;
145         }
146 }
147
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 {
150         int old_state = hdev->discovery.state;
151
152         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153
154         if (old_state == state)
155                 return;
156
157         hdev->discovery.state = state;
158
159         switch (state) {
160         case DISCOVERY_STOPPED:
161                 hci_update_passive_scan(hdev);
162
163                 if (old_state != DISCOVERY_STARTING)
164                         mgmt_discovering(hdev, 0);
165                 break;
166         case DISCOVERY_STARTING:
167                 break;
168         case DISCOVERY_FINDING:
169                 mgmt_discovering(hdev, 1);
170                 break;
171         case DISCOVERY_RESOLVING:
172                 break;
173         case DISCOVERY_STOPPING:
174                 break;
175         }
176 }
177
178 #ifdef TIZEN_BT
179 bool hci_le_discovery_active(struct hci_dev *hdev)
180 {
181         struct discovery_state *discov = &hdev->le_discovery;
182
183         switch (discov->state) {
184         case DISCOVERY_FINDING:
185         case DISCOVERY_RESOLVING:
186                 return true;
187
188         default:
189                 return false;
190         }
191 }
192
193 void hci_le_discovery_set_state(struct hci_dev *hdev, int state)
194 {
195         BT_DBG("%s state %u -> %u", hdev->name,
196                         hdev->le_discovery.state, state);
197
198         if (hdev->le_discovery.state == state)
199                 return;
200
201         switch (state) {
202         case DISCOVERY_STOPPED:
203                 hci_update_passive_scan(hdev);
204
205                 if (hdev->le_discovery.state != DISCOVERY_STARTING)
206                         mgmt_le_discovering(hdev, 0);
207                 break;
208         case DISCOVERY_STARTING:
209                 break;
210         case DISCOVERY_FINDING:
211                 mgmt_le_discovering(hdev, 1);
212                 break;
213         case DISCOVERY_RESOLVING:
214                 break;
215         case DISCOVERY_STOPPING:
216                 break;
217         }
218
219         hdev->le_discovery.state = state;
220 }
221
222 static void hci_tx_timeout_error_evt(struct hci_dev *hdev)
223 {
224         BT_ERR("%s H/W TX Timeout error", hdev->name);
225
226         mgmt_tx_timeout_error(hdev);
227 }
228 #endif
229
230 void hci_inquiry_cache_flush(struct hci_dev *hdev)
231 {
232         struct discovery_state *cache = &hdev->discovery;
233         struct inquiry_entry *p, *n;
234
235         list_for_each_entry_safe(p, n, &cache->all, all) {
236                 list_del(&p->all);
237                 kfree(p);
238         }
239
240         INIT_LIST_HEAD(&cache->unknown);
241         INIT_LIST_HEAD(&cache->resolve);
242 }
243
244 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
245                                                bdaddr_t *bdaddr)
246 {
247         struct discovery_state *cache = &hdev->discovery;
248         struct inquiry_entry *e;
249
250         BT_DBG("cache %p, %pMR", cache, bdaddr);
251
252         list_for_each_entry(e, &cache->all, all) {
253                 if (!bacmp(&e->data.bdaddr, bdaddr))
254                         return e;
255         }
256
257         return NULL;
258 }
259
260 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
261                                                        bdaddr_t *bdaddr)
262 {
263         struct discovery_state *cache = &hdev->discovery;
264         struct inquiry_entry *e;
265
266         BT_DBG("cache %p, %pMR", cache, bdaddr);
267
268         list_for_each_entry(e, &cache->unknown, list) {
269                 if (!bacmp(&e->data.bdaddr, bdaddr))
270                         return e;
271         }
272
273         return NULL;
274 }
275
276 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
277                                                        bdaddr_t *bdaddr,
278                                                        int state)
279 {
280         struct discovery_state *cache = &hdev->discovery;
281         struct inquiry_entry *e;
282
283         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
284
285         list_for_each_entry(e, &cache->resolve, list) {
286                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
287                         return e;
288                 if (!bacmp(&e->data.bdaddr, bdaddr))
289                         return e;
290         }
291
292         return NULL;
293 }
294
295 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
296                                       struct inquiry_entry *ie)
297 {
298         struct discovery_state *cache = &hdev->discovery;
299         struct list_head *pos = &cache->resolve;
300         struct inquiry_entry *p;
301
302         list_del(&ie->list);
303
304         list_for_each_entry(p, &cache->resolve, list) {
305                 if (p->name_state != NAME_PENDING &&
306                     abs(p->data.rssi) >= abs(ie->data.rssi))
307                         break;
308                 pos = &p->list;
309         }
310
311         list_add(&ie->list, pos);
312 }
313
314 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
315                              bool name_known)
316 {
317         struct discovery_state *cache = &hdev->discovery;
318         struct inquiry_entry *ie;
319         u32 flags = 0;
320
321         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
322
323         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
324
325         if (!data->ssp_mode)
326                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
327
328         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
329         if (ie) {
330                 if (!ie->data.ssp_mode)
331                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
332
333                 if (ie->name_state == NAME_NEEDED &&
334                     data->rssi != ie->data.rssi) {
335                         ie->data.rssi = data->rssi;
336                         hci_inquiry_cache_update_resolve(hdev, ie);
337                 }
338
339                 goto update;
340         }
341
342         /* Entry not in the cache. Add new one. */
343         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
344         if (!ie) {
345                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
346                 goto done;
347         }
348
349         list_add(&ie->all, &cache->all);
350
351         if (name_known) {
352                 ie->name_state = NAME_KNOWN;
353         } else {
354                 ie->name_state = NAME_NOT_KNOWN;
355                 list_add(&ie->list, &cache->unknown);
356         }
357
358 update:
359         if (name_known && ie->name_state != NAME_KNOWN &&
360             ie->name_state != NAME_PENDING) {
361                 ie->name_state = NAME_KNOWN;
362                 list_del(&ie->list);
363         }
364
365         memcpy(&ie->data, data, sizeof(*data));
366         ie->timestamp = jiffies;
367         cache->timestamp = jiffies;
368
369         if (ie->name_state == NAME_NOT_KNOWN)
370                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
371
372 done:
373         return flags;
374 }
375
376 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
377 {
378         struct discovery_state *cache = &hdev->discovery;
379         struct inquiry_info *info = (struct inquiry_info *) buf;
380         struct inquiry_entry *e;
381         int copied = 0;
382
383         list_for_each_entry(e, &cache->all, all) {
384                 struct inquiry_data *data = &e->data;
385
386                 if (copied >= num)
387                         break;
388
389                 bacpy(&info->bdaddr, &data->bdaddr);
390                 info->pscan_rep_mode    = data->pscan_rep_mode;
391                 info->pscan_period_mode = data->pscan_period_mode;
392                 info->pscan_mode        = data->pscan_mode;
393                 memcpy(info->dev_class, data->dev_class, 3);
394                 info->clock_offset      = data->clock_offset;
395
396                 info++;
397                 copied++;
398         }
399
400         BT_DBG("cache %p, copied %d", cache, copied);
401         return copied;
402 }
403
404 static int hci_inq_req(struct hci_request *req, unsigned long opt)
405 {
406         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
407         struct hci_dev *hdev = req->hdev;
408         struct hci_cp_inquiry cp;
409
410         BT_DBG("%s", hdev->name);
411
412         if (test_bit(HCI_INQUIRY, &hdev->flags))
413                 return 0;
414
415         /* Start Inquiry */
416         memcpy(&cp.lap, &ir->lap, 3);
417         cp.length  = ir->length;
418         cp.num_rsp = ir->num_rsp;
419         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
420
421         return 0;
422 }
423
424 int hci_inquiry(void __user *arg)
425 {
426         __u8 __user *ptr = arg;
427         struct hci_inquiry_req ir;
428         struct hci_dev *hdev;
429         int err = 0, do_inquiry = 0, max_rsp;
430         long timeo;
431         __u8 *buf;
432
433         if (copy_from_user(&ir, ptr, sizeof(ir)))
434                 return -EFAULT;
435
436         hdev = hci_dev_get(ir.dev_id);
437         if (!hdev)
438                 return -ENODEV;
439
440         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
441                 err = -EBUSY;
442                 goto done;
443         }
444
445         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
446                 err = -EOPNOTSUPP;
447                 goto done;
448         }
449
450         if (hdev->dev_type != HCI_PRIMARY) {
451                 err = -EOPNOTSUPP;
452                 goto done;
453         }
454
455         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
456                 err = -EOPNOTSUPP;
457                 goto done;
458         }
459
460         /* Restrict maximum inquiry length to 60 seconds */
461         if (ir.length > 60) {
462                 err = -EINVAL;
463                 goto done;
464         }
465
466         hci_dev_lock(hdev);
467         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
468             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
469                 hci_inquiry_cache_flush(hdev);
470                 do_inquiry = 1;
471         }
472         hci_dev_unlock(hdev);
473
474         timeo = ir.length * msecs_to_jiffies(2000);
475
476         if (do_inquiry) {
477                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
478                                    timeo, NULL);
479                 if (err < 0)
480                         goto done;
481
482                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
483                  * cleared). If it is interrupted by a signal, return -EINTR.
484                  */
485                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
486                                 TASK_INTERRUPTIBLE)) {
487                         err = -EINTR;
488                         goto done;
489                 }
490         }
491
492         /* for unlimited number of responses we will use buffer with
493          * 255 entries
494          */
495         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
496
497         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
498          * copy it to the user space.
499          */
500         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
501         if (!buf) {
502                 err = -ENOMEM;
503                 goto done;
504         }
505
506         hci_dev_lock(hdev);
507         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
508         hci_dev_unlock(hdev);
509
510         BT_DBG("num_rsp %d", ir.num_rsp);
511
512         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
513                 ptr += sizeof(ir);
514                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
515                                  ir.num_rsp))
516                         err = -EFAULT;
517         } else
518                 err = -EFAULT;
519
520         kfree(buf);
521
522 done:
523         hci_dev_put(hdev);
524         return err;
525 }
526
527 static int hci_dev_do_open(struct hci_dev *hdev)
528 {
529         int ret = 0;
530
531         BT_DBG("%s %p", hdev->name, hdev);
532
533         hci_req_sync_lock(hdev);
534
535         ret = hci_dev_open_sync(hdev);
536
537         hci_req_sync_unlock(hdev);
538         return ret;
539 }
540
541 /* ---- HCI ioctl helpers ---- */
542
543 int hci_dev_open(__u16 dev)
544 {
545         struct hci_dev *hdev;
546         int err;
547
548         hdev = hci_dev_get(dev);
549         if (!hdev)
550                 return -ENODEV;
551
552         /* Devices that are marked as unconfigured can only be powered
553          * up as user channel. Trying to bring them up as normal devices
554          * will result into a failure. Only user channel operation is
555          * possible.
556          *
557          * When this function is called for a user channel, the flag
558          * HCI_USER_CHANNEL will be set first before attempting to
559          * open the device.
560          */
561         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
562             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
563                 err = -EOPNOTSUPP;
564                 goto done;
565         }
566
567         /* We need to ensure that no other power on/off work is pending
568          * before proceeding to call hci_dev_do_open. This is
569          * particularly important if the setup procedure has not yet
570          * completed.
571          */
572         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
573                 cancel_delayed_work(&hdev->power_off);
574
575         /* After this call it is guaranteed that the setup procedure
576          * has finished. This means that error conditions like RFKILL
577          * or no valid public or static random address apply.
578          */
579         flush_workqueue(hdev->req_workqueue);
580
581         /* For controllers not using the management interface and that
582          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
583          * so that pairing works for them. Once the management interface
584          * is in use this bit will be cleared again and userspace has
585          * to explicitly enable it.
586          */
587         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
588             !hci_dev_test_flag(hdev, HCI_MGMT))
589                 hci_dev_set_flag(hdev, HCI_BONDABLE);
590
591         err = hci_dev_do_open(hdev);
592
593 done:
594         hci_dev_put(hdev);
595         return err;
596 }
597
598 int hci_dev_do_close(struct hci_dev *hdev)
599 {
600         int err;
601
602         BT_DBG("%s %p", hdev->name, hdev);
603
604         hci_req_sync_lock(hdev);
605
606         err = hci_dev_close_sync(hdev);
607
608         hci_req_sync_unlock(hdev);
609
610         return err;
611 }
612
613 int hci_dev_close(__u16 dev)
614 {
615         struct hci_dev *hdev;
616         int err;
617
618         hdev = hci_dev_get(dev);
619         if (!hdev)
620                 return -ENODEV;
621
622         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
623                 err = -EBUSY;
624                 goto done;
625         }
626
627         cancel_work_sync(&hdev->power_on);
628         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
629                 cancel_delayed_work(&hdev->power_off);
630
631         err = hci_dev_do_close(hdev);
632
633 done:
634         hci_dev_put(hdev);
635         return err;
636 }
637
638 static int hci_dev_do_reset(struct hci_dev *hdev)
639 {
640         int ret;
641
642         BT_DBG("%s %p", hdev->name, hdev);
643
644         hci_req_sync_lock(hdev);
645
646         /* Drop queues */
647         skb_queue_purge(&hdev->rx_q);
648         skb_queue_purge(&hdev->cmd_q);
649
650         /* Cancel these to avoid queueing non-chained pending work */
651         hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
652         /* Wait for
653          *
654          *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
655          *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
656          *
657          * inside RCU section to see the flag or complete scheduling.
658          */
659         synchronize_rcu();
660         /* Explicitly cancel works in case scheduled after setting the flag. */
661         cancel_delayed_work(&hdev->cmd_timer);
662         cancel_delayed_work(&hdev->ncmd_timer);
663
664         /* Avoid potential lockdep warnings from the *_flush() calls by
665          * ensuring the workqueue is empty up front.
666          */
667         drain_workqueue(hdev->workqueue);
668
669         hci_dev_lock(hdev);
670         hci_inquiry_cache_flush(hdev);
671         hci_conn_hash_flush(hdev);
672         hci_dev_unlock(hdev);
673
674         if (hdev->flush)
675                 hdev->flush(hdev);
676
677         hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
678
679         atomic_set(&hdev->cmd_cnt, 1);
680         hdev->acl_cnt = 0;
681         hdev->sco_cnt = 0;
682         hdev->le_cnt = 0;
683         hdev->iso_cnt = 0;
684
685         ret = hci_reset_sync(hdev);
686
687         hci_req_sync_unlock(hdev);
688         return ret;
689 }
690
691 int hci_dev_reset(__u16 dev)
692 {
693         struct hci_dev *hdev;
694         int err;
695
696         hdev = hci_dev_get(dev);
697         if (!hdev)
698                 return -ENODEV;
699
700         if (!test_bit(HCI_UP, &hdev->flags)) {
701                 err = -ENETDOWN;
702                 goto done;
703         }
704
705         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
706                 err = -EBUSY;
707                 goto done;
708         }
709
710         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
711                 err = -EOPNOTSUPP;
712                 goto done;
713         }
714
715         err = hci_dev_do_reset(hdev);
716
717 done:
718         hci_dev_put(hdev);
719         return err;
720 }
721
722 int hci_dev_reset_stat(__u16 dev)
723 {
724         struct hci_dev *hdev;
725         int ret = 0;
726
727         hdev = hci_dev_get(dev);
728         if (!hdev)
729                 return -ENODEV;
730
731         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
732                 ret = -EBUSY;
733                 goto done;
734         }
735
736         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
737                 ret = -EOPNOTSUPP;
738                 goto done;
739         }
740
741         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
742
743 done:
744         hci_dev_put(hdev);
745         return ret;
746 }
747
748 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
749 {
750         bool conn_changed, discov_changed;
751
752         BT_DBG("%s scan 0x%02x", hdev->name, scan);
753
754         if ((scan & SCAN_PAGE))
755                 conn_changed = !hci_dev_test_and_set_flag(hdev,
756                                                           HCI_CONNECTABLE);
757         else
758                 conn_changed = hci_dev_test_and_clear_flag(hdev,
759                                                            HCI_CONNECTABLE);
760
761         if ((scan & SCAN_INQUIRY)) {
762                 discov_changed = !hci_dev_test_and_set_flag(hdev,
763                                                             HCI_DISCOVERABLE);
764         } else {
765                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
766                 discov_changed = hci_dev_test_and_clear_flag(hdev,
767                                                              HCI_DISCOVERABLE);
768         }
769
770         if (!hci_dev_test_flag(hdev, HCI_MGMT))
771                 return;
772
773         if (conn_changed || discov_changed) {
774                 /* In case this was disabled through mgmt */
775                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
776
777                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
778                         hci_update_adv_data(hdev, hdev->cur_adv_instance);
779
780                 mgmt_new_settings(hdev);
781         }
782 }
783
784 int hci_dev_cmd(unsigned int cmd, void __user *arg)
785 {
786         struct hci_dev *hdev;
787         struct hci_dev_req dr;
788         int err = 0;
789
790         if (copy_from_user(&dr, arg, sizeof(dr)))
791                 return -EFAULT;
792
793         hdev = hci_dev_get(dr.dev_id);
794         if (!hdev)
795                 return -ENODEV;
796
797         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
798                 err = -EBUSY;
799                 goto done;
800         }
801
802         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
803                 err = -EOPNOTSUPP;
804                 goto done;
805         }
806
807         if (hdev->dev_type != HCI_PRIMARY) {
808                 err = -EOPNOTSUPP;
809                 goto done;
810         }
811
812         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
813                 err = -EOPNOTSUPP;
814                 goto done;
815         }
816
817         switch (cmd) {
818         case HCISETAUTH:
819                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
820                                    HCI_INIT_TIMEOUT, NULL);
821                 break;
822
823         case HCISETENCRYPT:
824                 if (!lmp_encrypt_capable(hdev)) {
825                         err = -EOPNOTSUPP;
826                         break;
827                 }
828
829                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
830                         /* Auth must be enabled first */
831                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
832                                            HCI_INIT_TIMEOUT, NULL);
833                         if (err)
834                                 break;
835                 }
836
837                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
838                                    HCI_INIT_TIMEOUT, NULL);
839                 break;
840
841         case HCISETSCAN:
842                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
843                                    HCI_INIT_TIMEOUT, NULL);
844
845                 /* Ensure that the connectable and discoverable states
846                  * get correctly modified as this was a non-mgmt change.
847                  */
848                 if (!err)
849                         hci_update_passive_scan_state(hdev, dr.dev_opt);
850                 break;
851
852         case HCISETLINKPOL:
853                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
854                                    HCI_INIT_TIMEOUT, NULL);
855                 break;
856
857         case HCISETLINKMODE:
858                 hdev->link_mode = ((__u16) dr.dev_opt) &
859                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
860                 break;
861
862         case HCISETPTYPE:
863                 if (hdev->pkt_type == (__u16) dr.dev_opt)
864                         break;
865
866                 hdev->pkt_type = (__u16) dr.dev_opt;
867                 mgmt_phy_configuration_changed(hdev, NULL);
868                 break;
869
870         case HCISETACLMTU:
871                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
872                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
873                 break;
874
875         case HCISETSCOMTU:
876                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
877                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
878                 break;
879
880         default:
881                 err = -EINVAL;
882                 break;
883         }
884
885 done:
886         hci_dev_put(hdev);
887         return err;
888 }
889
890 int hci_get_dev_list(void __user *arg)
891 {
892         struct hci_dev *hdev;
893         struct hci_dev_list_req *dl;
894         struct hci_dev_req *dr;
895         int n = 0, size, err;
896         __u16 dev_num;
897
898         if (get_user(dev_num, (__u16 __user *) arg))
899                 return -EFAULT;
900
901         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
902                 return -EINVAL;
903
904         size = sizeof(*dl) + dev_num * sizeof(*dr);
905
906         dl = kzalloc(size, GFP_KERNEL);
907         if (!dl)
908                 return -ENOMEM;
909
910         dr = dl->dev_req;
911
912         read_lock(&hci_dev_list_lock);
913         list_for_each_entry(hdev, &hci_dev_list, list) {
914                 unsigned long flags = hdev->flags;
915
916                 /* When the auto-off is configured it means the transport
917                  * is running, but in that case still indicate that the
918                  * device is actually down.
919                  */
920                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
921                         flags &= ~BIT(HCI_UP);
922
923                 (dr + n)->dev_id  = hdev->id;
924                 (dr + n)->dev_opt = flags;
925
926                 if (++n >= dev_num)
927                         break;
928         }
929         read_unlock(&hci_dev_list_lock);
930
931         dl->dev_num = n;
932         size = sizeof(*dl) + n * sizeof(*dr);
933
934         err = copy_to_user(arg, dl, size);
935         kfree(dl);
936
937         return err ? -EFAULT : 0;
938 }
939
940 int hci_get_dev_info(void __user *arg)
941 {
942         struct hci_dev *hdev;
943         struct hci_dev_info di;
944         unsigned long flags;
945         int err = 0;
946
947         if (copy_from_user(&di, arg, sizeof(di)))
948                 return -EFAULT;
949
950         hdev = hci_dev_get(di.dev_id);
951         if (!hdev)
952                 return -ENODEV;
953
954         /* When the auto-off is configured it means the transport
955          * is running, but in that case still indicate that the
956          * device is actually down.
957          */
958         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
959                 flags = hdev->flags & ~BIT(HCI_UP);
960         else
961                 flags = hdev->flags;
962
963         strcpy(di.name, hdev->name);
964         di.bdaddr   = hdev->bdaddr;
965         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
966         di.flags    = flags;
967         di.pkt_type = hdev->pkt_type;
968         if (lmp_bredr_capable(hdev)) {
969                 di.acl_mtu  = hdev->acl_mtu;
970                 di.acl_pkts = hdev->acl_pkts;
971                 di.sco_mtu  = hdev->sco_mtu;
972                 di.sco_pkts = hdev->sco_pkts;
973         } else {
974                 di.acl_mtu  = hdev->le_mtu;
975                 di.acl_pkts = hdev->le_pkts;
976                 di.sco_mtu  = 0;
977                 di.sco_pkts = 0;
978         }
979         di.link_policy = hdev->link_policy;
980         di.link_mode   = hdev->link_mode;
981
982         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
983         memcpy(&di.features, &hdev->features, sizeof(di.features));
984
985         if (copy_to_user(arg, &di, sizeof(di)))
986                 err = -EFAULT;
987
988         hci_dev_put(hdev);
989
990         return err;
991 }
992
993 /* ---- Interface to HCI drivers ---- */
994
995 static int hci_rfkill_set_block(void *data, bool blocked)
996 {
997         struct hci_dev *hdev = data;
998
999         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1000
1001         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1002                 return -EBUSY;
1003
1004         if (blocked) {
1005                 hci_dev_set_flag(hdev, HCI_RFKILLED);
1006                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1007                     !hci_dev_test_flag(hdev, HCI_CONFIG))
1008                         hci_dev_do_close(hdev);
1009         } else {
1010                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1011         }
1012
1013         return 0;
1014 }
1015
1016 static const struct rfkill_ops hci_rfkill_ops = {
1017         .set_block = hci_rfkill_set_block,
1018 };
1019
1020 static void hci_power_on(struct work_struct *work)
1021 {
1022         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1023         int err;
1024
1025         BT_DBG("%s", hdev->name);
1026
1027         if (test_bit(HCI_UP, &hdev->flags) &&
1028             hci_dev_test_flag(hdev, HCI_MGMT) &&
1029             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1030                 cancel_delayed_work(&hdev->power_off);
1031                 err = hci_powered_update_sync(hdev);
1032                 mgmt_power_on(hdev, err);
1033                 return;
1034         }
1035
1036         err = hci_dev_do_open(hdev);
1037         if (err < 0) {
1038                 hci_dev_lock(hdev);
1039                 mgmt_set_powered_failed(hdev, err);
1040                 hci_dev_unlock(hdev);
1041                 return;
1042         }
1043
1044         /* During the HCI setup phase, a few error conditions are
1045          * ignored and they need to be checked now. If they are still
1046          * valid, it is important to turn the device back off.
1047          */
1048         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
1049             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
1050             (hdev->dev_type == HCI_PRIMARY &&
1051              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1052              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1053                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1054                 hci_dev_do_close(hdev);
1055         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1056                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1057                                    HCI_AUTO_OFF_TIMEOUT);
1058         }
1059
1060         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1061                 /* For unconfigured devices, set the HCI_RAW flag
1062                  * so that userspace can easily identify them.
1063                  */
1064                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1065                         set_bit(HCI_RAW, &hdev->flags);
1066
1067                 /* For fully configured devices, this will send
1068                  * the Index Added event. For unconfigured devices,
1069                  * it will send Unconfigued Index Added event.
1070                  *
1071                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1072                  * and no event will be send.
1073                  */
1074                 mgmt_index_added(hdev);
1075         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1076                 /* When the controller is now configured, then it
1077                  * is important to clear the HCI_RAW flag.
1078                  */
1079                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1080                         clear_bit(HCI_RAW, &hdev->flags);
1081
1082                 /* Powering on the controller with HCI_CONFIG set only
1083                  * happens with the transition from unconfigured to
1084                  * configured. This will send the Index Added event.
1085                  */
1086                 mgmt_index_added(hdev);
1087         }
1088 }
1089
1090 static void hci_power_off(struct work_struct *work)
1091 {
1092         struct hci_dev *hdev = container_of(work, struct hci_dev,
1093                                             power_off.work);
1094
1095         BT_DBG("%s", hdev->name);
1096
1097         hci_dev_do_close(hdev);
1098 }
1099
1100 static void hci_error_reset(struct work_struct *work)
1101 {
1102         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1103
1104         BT_DBG("%s", hdev->name);
1105
1106         if (hdev->hw_error)
1107                 hdev->hw_error(hdev, hdev->hw_error_code);
1108         else
1109                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1110
1111         if (hci_dev_do_close(hdev))
1112                 return;
1113
1114         hci_dev_do_open(hdev);
1115 }
1116
1117 void hci_uuids_clear(struct hci_dev *hdev)
1118 {
1119         struct bt_uuid *uuid, *tmp;
1120
1121         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1122                 list_del(&uuid->list);
1123                 kfree(uuid);
1124         }
1125 }
1126
1127 void hci_link_keys_clear(struct hci_dev *hdev)
1128 {
1129         struct link_key *key, *tmp;
1130
1131         list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1132                 list_del_rcu(&key->list);
1133                 kfree_rcu(key, rcu);
1134         }
1135 }
1136
1137 void hci_smp_ltks_clear(struct hci_dev *hdev)
1138 {
1139         struct smp_ltk *k, *tmp;
1140
1141         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1142                 list_del_rcu(&k->list);
1143                 kfree_rcu(k, rcu);
1144         }
1145 }
1146
1147 void hci_smp_irks_clear(struct hci_dev *hdev)
1148 {
1149         struct smp_irk *k, *tmp;
1150
1151         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1152                 list_del_rcu(&k->list);
1153                 kfree_rcu(k, rcu);
1154         }
1155 }
1156
1157 void hci_blocked_keys_clear(struct hci_dev *hdev)
1158 {
1159         struct blocked_key *b, *tmp;
1160
1161         list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1162                 list_del_rcu(&b->list);
1163                 kfree_rcu(b, rcu);
1164         }
1165 }
1166
1167 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1168 {
1169         bool blocked = false;
1170         struct blocked_key *b;
1171
1172         rcu_read_lock();
1173         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1174                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1175                         blocked = true;
1176                         break;
1177                 }
1178         }
1179
1180         rcu_read_unlock();
1181         return blocked;
1182 }
1183
1184 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1185 {
1186         struct link_key *k;
1187
1188         rcu_read_lock();
1189         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1190                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1191                         rcu_read_unlock();
1192
1193                         if (hci_is_blocked_key(hdev,
1194                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
1195                                                k->val)) {
1196                                 bt_dev_warn_ratelimited(hdev,
1197                                                         "Link key blocked for %pMR",
1198                                                         &k->bdaddr);
1199                                 return NULL;
1200                         }
1201
1202                         return k;
1203                 }
1204         }
1205         rcu_read_unlock();
1206
1207         return NULL;
1208 }
1209
1210 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1211                                u8 key_type, u8 old_key_type)
1212 {
1213         /* Legacy key */
1214         if (key_type < 0x03)
1215                 return true;
1216
1217         /* Debug keys are insecure so don't store them persistently */
1218         if (key_type == HCI_LK_DEBUG_COMBINATION)
1219                 return false;
1220
1221         /* Changed combination key and there's no previous one */
1222         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1223                 return false;
1224
1225         /* Security mode 3 case */
1226         if (!conn)
1227                 return true;
1228
1229         /* BR/EDR key derived using SC from an LE link */
1230         if (conn->type == LE_LINK)
1231                 return true;
1232
1233         /* Neither local nor remote side had no-bonding as requirement */
1234         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1235                 return true;
1236
1237         /* Local side had dedicated bonding as requirement */
1238         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1239                 return true;
1240
1241         /* Remote side had dedicated bonding as requirement */
1242         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1243                 return true;
1244
1245         /* If none of the above criteria match, then don't store the key
1246          * persistently */
1247         return false;
1248 }
1249
1250 static u8 ltk_role(u8 type)
1251 {
1252         if (type == SMP_LTK)
1253                 return HCI_ROLE_MASTER;
1254
1255         return HCI_ROLE_SLAVE;
1256 }
1257
1258 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1259                              u8 addr_type, u8 role)
1260 {
1261         struct smp_ltk *k;
1262
1263         rcu_read_lock();
1264         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1265                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1266                         continue;
1267
1268                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1269                         rcu_read_unlock();
1270
1271                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1272                                                k->val)) {
1273                                 bt_dev_warn_ratelimited(hdev,
1274                                                         "LTK blocked for %pMR",
1275                                                         &k->bdaddr);
1276                                 return NULL;
1277                         }
1278
1279                         return k;
1280                 }
1281         }
1282         rcu_read_unlock();
1283
1284         return NULL;
1285 }
1286
1287 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1288 {
1289         struct smp_irk *irk_to_return = NULL;
1290         struct smp_irk *irk;
1291
1292         rcu_read_lock();
1293         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1294                 if (!bacmp(&irk->rpa, rpa)) {
1295                         irk_to_return = irk;
1296                         goto done;
1297                 }
1298         }
1299
1300         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1301                 if (smp_irk_matches(hdev, irk->val, rpa)) {
1302                         bacpy(&irk->rpa, rpa);
1303                         irk_to_return = irk;
1304                         goto done;
1305                 }
1306         }
1307
1308 done:
1309         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1310                                                 irk_to_return->val)) {
1311                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1312                                         &irk_to_return->bdaddr);
1313                 irk_to_return = NULL;
1314         }
1315
1316         rcu_read_unlock();
1317
1318         return irk_to_return;
1319 }
1320
1321 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1322                                      u8 addr_type)
1323 {
1324         struct smp_irk *irk_to_return = NULL;
1325         struct smp_irk *irk;
1326
1327         /* Identity Address must be public or static random */
1328         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1329                 return NULL;
1330
1331         rcu_read_lock();
1332         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1333                 if (addr_type == irk->addr_type &&
1334                     bacmp(bdaddr, &irk->bdaddr) == 0) {
1335                         irk_to_return = irk;
1336                         goto done;
1337                 }
1338         }
1339
1340 done:
1341
1342         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1343                                                 irk_to_return->val)) {
1344                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1345                                         &irk_to_return->bdaddr);
1346                 irk_to_return = NULL;
1347         }
1348
1349         rcu_read_unlock();
1350
1351         return irk_to_return;
1352 }
1353
1354 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1355                                   bdaddr_t *bdaddr, u8 *val, u8 type,
1356                                   u8 pin_len, bool *persistent)
1357 {
1358         struct link_key *key, *old_key;
1359         u8 old_key_type;
1360
1361         old_key = hci_find_link_key(hdev, bdaddr);
1362         if (old_key) {
1363                 old_key_type = old_key->type;
1364                 key = old_key;
1365         } else {
1366                 old_key_type = conn ? conn->key_type : 0xff;
1367                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1368                 if (!key)
1369                         return NULL;
1370                 list_add_rcu(&key->list, &hdev->link_keys);
1371         }
1372
1373         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1374
1375         /* Some buggy controller combinations generate a changed
1376          * combination key for legacy pairing even when there's no
1377          * previous key */
1378         if (type == HCI_LK_CHANGED_COMBINATION &&
1379             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1380                 type = HCI_LK_COMBINATION;
1381                 if (conn)
1382                         conn->key_type = type;
1383         }
1384
1385         bacpy(&key->bdaddr, bdaddr);
1386         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1387         key->pin_len = pin_len;
1388
1389         if (type == HCI_LK_CHANGED_COMBINATION)
1390                 key->type = old_key_type;
1391         else
1392                 key->type = type;
1393
1394         if (persistent)
1395                 *persistent = hci_persistent_key(hdev, conn, type,
1396                                                  old_key_type);
1397
1398         return key;
1399 }
1400
1401 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1402                             u8 addr_type, u8 type, u8 authenticated,
1403                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1404 {
1405         struct smp_ltk *key, *old_key;
1406         u8 role = ltk_role(type);
1407
1408         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1409         if (old_key)
1410                 key = old_key;
1411         else {
1412                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1413                 if (!key)
1414                         return NULL;
1415                 list_add_rcu(&key->list, &hdev->long_term_keys);
1416         }
1417
1418         bacpy(&key->bdaddr, bdaddr);
1419         key->bdaddr_type = addr_type;
1420         memcpy(key->val, tk, sizeof(key->val));
1421         key->authenticated = authenticated;
1422         key->ediv = ediv;
1423         key->rand = rand;
1424         key->enc_size = enc_size;
1425         key->type = type;
1426
1427         return key;
1428 }
1429
1430 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1431                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
1432 {
1433         struct smp_irk *irk;
1434
1435         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1436         if (!irk) {
1437                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1438                 if (!irk)
1439                         return NULL;
1440
1441                 bacpy(&irk->bdaddr, bdaddr);
1442                 irk->addr_type = addr_type;
1443
1444                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1445         }
1446
1447         memcpy(irk->val, val, 16);
1448         bacpy(&irk->rpa, rpa);
1449
1450         return irk;
1451 }
1452
1453 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1454 {
1455         struct link_key *key;
1456
1457         key = hci_find_link_key(hdev, bdaddr);
1458         if (!key)
1459                 return -ENOENT;
1460
1461         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1462
1463         list_del_rcu(&key->list);
1464         kfree_rcu(key, rcu);
1465
1466         return 0;
1467 }
1468
1469 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1470 {
1471         struct smp_ltk *k, *tmp;
1472         int removed = 0;
1473
1474         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1475                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1476                         continue;
1477
1478                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1479
1480                 list_del_rcu(&k->list);
1481                 kfree_rcu(k, rcu);
1482                 removed++;
1483         }
1484
1485         return removed ? 0 : -ENOENT;
1486 }
1487
1488 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1489 {
1490         struct smp_irk *k, *tmp;
1491
1492         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1493                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1494                         continue;
1495
1496                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1497
1498                 list_del_rcu(&k->list);
1499                 kfree_rcu(k, rcu);
1500         }
1501 }
1502
1503 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1504 {
1505         struct smp_ltk *k;
1506         struct smp_irk *irk;
1507         u8 addr_type;
1508
1509         if (type == BDADDR_BREDR) {
1510                 if (hci_find_link_key(hdev, bdaddr))
1511                         return true;
1512                 return false;
1513         }
1514
1515         /* Convert to HCI addr type which struct smp_ltk uses */
1516         if (type == BDADDR_LE_PUBLIC)
1517                 addr_type = ADDR_LE_DEV_PUBLIC;
1518         else
1519                 addr_type = ADDR_LE_DEV_RANDOM;
1520
1521         irk = hci_get_irk(hdev, bdaddr, addr_type);
1522         if (irk) {
1523                 bdaddr = &irk->bdaddr;
1524                 addr_type = irk->addr_type;
1525         }
1526
1527         rcu_read_lock();
1528         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1529                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1530                         rcu_read_unlock();
1531                         return true;
1532                 }
1533         }
1534         rcu_read_unlock();
1535
1536         return false;
1537 }
1538
1539 /* HCI command timer function */
1540 static void hci_cmd_timeout(struct work_struct *work)
1541 {
1542         struct hci_dev *hdev = container_of(work, struct hci_dev,
1543                                             cmd_timer.work);
1544
1545         if (hdev->sent_cmd) {
1546                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1547                 u16 opcode = __le16_to_cpu(sent->opcode);
1548
1549                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1550         } else {
1551                 bt_dev_err(hdev, "command tx timeout");
1552         }
1553
1554         if (hdev->cmd_timeout)
1555                 hdev->cmd_timeout(hdev);
1556
1557 #ifdef TIZEN_BT
1558         hci_tx_timeout_error_evt(hdev);
1559 #endif
1560
1561         atomic_set(&hdev->cmd_cnt, 1);
1562         queue_work(hdev->workqueue, &hdev->cmd_work);
1563 }
1564
1565 /* HCI ncmd timer function */
1566 static void hci_ncmd_timeout(struct work_struct *work)
1567 {
1568         struct hci_dev *hdev = container_of(work, struct hci_dev,
1569                                             ncmd_timer.work);
1570
1571         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1572
1573         /* During HCI_INIT phase no events can be injected if the ncmd timer
1574          * triggers since the procedure has its own timeout handling.
1575          */
1576         if (test_bit(HCI_INIT, &hdev->flags))
1577                 return;
1578
1579         /* This is an irrecoverable state, inject hardware error event */
1580         hci_reset_dev(hdev);
1581 }
1582
1583 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1584                                           bdaddr_t *bdaddr, u8 bdaddr_type)
1585 {
1586         struct oob_data *data;
1587
1588         list_for_each_entry(data, &hdev->remote_oob_data, list) {
1589                 if (bacmp(bdaddr, &data->bdaddr) != 0)
1590                         continue;
1591                 if (data->bdaddr_type != bdaddr_type)
1592                         continue;
1593                 return data;
1594         }
1595
1596         return NULL;
1597 }
1598
1599 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1600                                u8 bdaddr_type)
1601 {
1602         struct oob_data *data;
1603
1604         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1605         if (!data)
1606                 return -ENOENT;
1607
1608         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1609
1610         list_del(&data->list);
1611         kfree(data);
1612
1613         return 0;
1614 }
1615
1616 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1617 {
1618         struct oob_data *data, *n;
1619
1620         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1621                 list_del(&data->list);
1622                 kfree(data);
1623         }
1624 }
1625
1626 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1627                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
1628                             u8 *hash256, u8 *rand256)
1629 {
1630         struct oob_data *data;
1631
1632         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1633         if (!data) {
1634                 data = kmalloc(sizeof(*data), GFP_KERNEL);
1635                 if (!data)
1636                         return -ENOMEM;
1637
1638                 bacpy(&data->bdaddr, bdaddr);
1639                 data->bdaddr_type = bdaddr_type;
1640                 list_add(&data->list, &hdev->remote_oob_data);
1641         }
1642
1643         if (hash192 && rand192) {
1644                 memcpy(data->hash192, hash192, sizeof(data->hash192));
1645                 memcpy(data->rand192, rand192, sizeof(data->rand192));
1646                 if (hash256 && rand256)
1647                         data->present = 0x03;
1648         } else {
1649                 memset(data->hash192, 0, sizeof(data->hash192));
1650                 memset(data->rand192, 0, sizeof(data->rand192));
1651                 if (hash256 && rand256)
1652                         data->present = 0x02;
1653                 else
1654                         data->present = 0x00;
1655         }
1656
1657         if (hash256 && rand256) {
1658                 memcpy(data->hash256, hash256, sizeof(data->hash256));
1659                 memcpy(data->rand256, rand256, sizeof(data->rand256));
1660         } else {
1661                 memset(data->hash256, 0, sizeof(data->hash256));
1662                 memset(data->rand256, 0, sizeof(data->rand256));
1663                 if (hash192 && rand192)
1664                         data->present = 0x01;
1665         }
1666
1667         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1668
1669         return 0;
1670 }
1671
1672 /* This function requires the caller holds hdev->lock */
1673 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1674 {
1675         struct adv_info *adv_instance;
1676
1677         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1678                 if (adv_instance->instance == instance)
1679                         return adv_instance;
1680         }
1681
1682         return NULL;
1683 }
1684
1685 /* This function requires the caller holds hdev->lock */
1686 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1687 {
1688         struct adv_info *cur_instance;
1689
1690         cur_instance = hci_find_adv_instance(hdev, instance);
1691         if (!cur_instance)
1692                 return NULL;
1693
1694         if (cur_instance == list_last_entry(&hdev->adv_instances,
1695                                             struct adv_info, list))
1696                 return list_first_entry(&hdev->adv_instances,
1697                                                  struct adv_info, list);
1698         else
1699                 return list_next_entry(cur_instance, list);
1700 }
1701
1702 /* This function requires the caller holds hdev->lock */
1703 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1704 {
1705         struct adv_info *adv_instance;
1706
1707         adv_instance = hci_find_adv_instance(hdev, instance);
1708         if (!adv_instance)
1709                 return -ENOENT;
1710
1711         BT_DBG("%s removing %dMR", hdev->name, instance);
1712
1713         if (hdev->cur_adv_instance == instance) {
1714                 if (hdev->adv_instance_timeout) {
1715                         cancel_delayed_work(&hdev->adv_instance_expire);
1716                         hdev->adv_instance_timeout = 0;
1717                 }
1718                 hdev->cur_adv_instance = 0x00;
1719         }
1720
1721         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1722
1723         list_del(&adv_instance->list);
1724         kfree(adv_instance);
1725
1726         hdev->adv_instance_cnt--;
1727
1728         return 0;
1729 }
1730
1731 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1732 {
1733         struct adv_info *adv_instance, *n;
1734
1735         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1736                 adv_instance->rpa_expired = rpa_expired;
1737 }
1738
1739 /* This function requires the caller holds hdev->lock */
1740 void hci_adv_instances_clear(struct hci_dev *hdev)
1741 {
1742         struct adv_info *adv_instance, *n;
1743
1744         if (hdev->adv_instance_timeout) {
1745                 cancel_delayed_work(&hdev->adv_instance_expire);
1746                 hdev->adv_instance_timeout = 0;
1747         }
1748
1749         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1750                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1751                 list_del(&adv_instance->list);
1752                 kfree(adv_instance);
1753         }
1754
1755         hdev->adv_instance_cnt = 0;
1756         hdev->cur_adv_instance = 0x00;
1757 }
1758
1759 static void adv_instance_rpa_expired(struct work_struct *work)
1760 {
1761         struct adv_info *adv_instance = container_of(work, struct adv_info,
1762                                                      rpa_expired_cb.work);
1763
1764         BT_DBG("");
1765
1766         adv_instance->rpa_expired = true;
1767 }
1768
1769 /* This function requires the caller holds hdev->lock */
1770 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1771                                       u32 flags, u16 adv_data_len, u8 *adv_data,
1772                                       u16 scan_rsp_len, u8 *scan_rsp_data,
1773                                       u16 timeout, u16 duration, s8 tx_power,
1774                                       u32 min_interval, u32 max_interval,
1775                                       u8 mesh_handle)
1776 {
1777         struct adv_info *adv;
1778
1779         adv = hci_find_adv_instance(hdev, instance);
1780         if (adv) {
1781                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1782                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1783                 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1784         } else {
1785                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1786                     instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1787                         return ERR_PTR(-EOVERFLOW);
1788
1789                 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1790                 if (!adv)
1791                         return ERR_PTR(-ENOMEM);
1792
1793                 adv->pending = true;
1794                 adv->instance = instance;
1795                 list_add(&adv->list, &hdev->adv_instances);
1796                 hdev->adv_instance_cnt++;
1797         }
1798
1799         adv->flags = flags;
1800         adv->min_interval = min_interval;
1801         adv->max_interval = max_interval;
1802         adv->tx_power = tx_power;
1803         /* Defining a mesh_handle changes the timing units to ms,
1804          * rather than seconds, and ties the instance to the requested
1805          * mesh_tx queue.
1806          */
1807         adv->mesh = mesh_handle;
1808
1809         hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1810                                   scan_rsp_len, scan_rsp_data);
1811
1812         adv->timeout = timeout;
1813         adv->remaining_time = timeout;
1814
1815         if (duration == 0)
1816                 adv->duration = hdev->def_multi_adv_rotation_duration;
1817         else
1818                 adv->duration = duration;
1819
1820         INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1821
1822         BT_DBG("%s for %dMR", hdev->name, instance);
1823
1824         return adv;
1825 }
1826
1827 /* This function requires the caller holds hdev->lock */
1828 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1829                                       u32 flags, u8 data_len, u8 *data,
1830                                       u32 min_interval, u32 max_interval)
1831 {
1832         struct adv_info *adv;
1833
1834         adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1835                                    0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1836                                    min_interval, max_interval, 0);
1837         if (IS_ERR(adv))
1838                 return adv;
1839
1840         adv->periodic = true;
1841         adv->per_adv_data_len = data_len;
1842
1843         if (data)
1844                 memcpy(adv->per_adv_data, data, data_len);
1845
1846         return adv;
1847 }
1848
1849 /* This function requires the caller holds hdev->lock */
1850 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1851                               u16 adv_data_len, u8 *adv_data,
1852                               u16 scan_rsp_len, u8 *scan_rsp_data)
1853 {
1854         struct adv_info *adv;
1855
1856         adv = hci_find_adv_instance(hdev, instance);
1857
1858         /* If advertisement doesn't exist, we can't modify its data */
1859         if (!adv)
1860                 return -ENOENT;
1861
1862         if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1863                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1864                 memcpy(adv->adv_data, adv_data, adv_data_len);
1865                 adv->adv_data_len = adv_data_len;
1866                 adv->adv_data_changed = true;
1867         }
1868
1869         if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1870                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1871                 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1872                 adv->scan_rsp_len = scan_rsp_len;
1873                 adv->scan_rsp_changed = true;
1874         }
1875
1876         /* Mark as changed if there are flags which would affect it */
1877         if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1878             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1879                 adv->scan_rsp_changed = true;
1880
1881         return 0;
1882 }
1883
1884 /* This function requires the caller holds hdev->lock */
1885 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1886 {
1887         u32 flags;
1888         struct adv_info *adv;
1889
1890         if (instance == 0x00) {
1891                 /* Instance 0 always manages the "Tx Power" and "Flags"
1892                  * fields
1893                  */
1894                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1895
1896                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1897                  * corresponds to the "connectable" instance flag.
1898                  */
1899                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1900                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1901
1902                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1903                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1904                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1905                         flags |= MGMT_ADV_FLAG_DISCOV;
1906
1907                 return flags;
1908         }
1909
1910         adv = hci_find_adv_instance(hdev, instance);
1911
1912         /* Return 0 when we got an invalid instance identifier. */
1913         if (!adv)
1914                 return 0;
1915
1916         return adv->flags;
1917 }
1918
1919 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1920 {
1921         struct adv_info *adv;
1922
1923         /* Instance 0x00 always set local name */
1924         if (instance == 0x00)
1925                 return true;
1926
1927         adv = hci_find_adv_instance(hdev, instance);
1928         if (!adv)
1929                 return false;
1930
1931         if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1932             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1933                 return true;
1934
1935         return adv->scan_rsp_len ? true : false;
1936 }
1937
1938 /* This function requires the caller holds hdev->lock */
1939 void hci_adv_monitors_clear(struct hci_dev *hdev)
1940 {
1941         struct adv_monitor *monitor;
1942         int handle;
1943
1944         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1945                 hci_free_adv_monitor(hdev, monitor);
1946
1947         idr_destroy(&hdev->adv_monitors_idr);
1948 }
1949
1950 /* Frees the monitor structure and do some bookkeepings.
1951  * This function requires the caller holds hdev->lock.
1952  */
1953 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1954 {
1955         struct adv_pattern *pattern;
1956         struct adv_pattern *tmp;
1957
1958         if (!monitor)
1959                 return;
1960
1961         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1962                 list_del(&pattern->list);
1963                 kfree(pattern);
1964         }
1965
1966         if (monitor->handle)
1967                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1968
1969         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1970                 hdev->adv_monitors_cnt--;
1971                 mgmt_adv_monitor_removed(hdev, monitor->handle);
1972         }
1973
1974         kfree(monitor);
1975 }
1976
1977 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1978  * also attempts to forward the request to the controller.
1979  * This function requires the caller holds hci_req_sync_lock.
1980  */
1981 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1982 {
1983         int min, max, handle;
1984         int status = 0;
1985
1986         if (!monitor)
1987                 return -EINVAL;
1988
1989         hci_dev_lock(hdev);
1990
1991         min = HCI_MIN_ADV_MONITOR_HANDLE;
1992         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1993         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1994                            GFP_KERNEL);
1995
1996         hci_dev_unlock(hdev);
1997
1998         if (handle < 0)
1999                 return handle;
2000
2001         monitor->handle = handle;
2002
2003         if (!hdev_is_powered(hdev))
2004                 return status;
2005
2006         switch (hci_get_adv_monitor_offload_ext(hdev)) {
2007         case HCI_ADV_MONITOR_EXT_NONE:
2008                 bt_dev_dbg(hdev, "add monitor %d status %d",
2009                            monitor->handle, status);
2010                 /* Message was not forwarded to controller - not an error */
2011                 break;
2012
2013         case HCI_ADV_MONITOR_EXT_MSFT:
2014                 status = msft_add_monitor_pattern(hdev, monitor);
2015                 bt_dev_dbg(hdev, "add monitor %d msft status %d",
2016                            handle, status);
2017                 break;
2018         }
2019
2020         return status;
2021 }
2022
2023 /* Attempts to tell the controller and free the monitor. If somehow the
2024  * controller doesn't have a corresponding handle, remove anyway.
2025  * This function requires the caller holds hci_req_sync_lock.
2026  */
2027 static int hci_remove_adv_monitor(struct hci_dev *hdev,
2028                                   struct adv_monitor *monitor)
2029 {
2030         int status = 0;
2031         int handle;
2032
2033         switch (hci_get_adv_monitor_offload_ext(hdev)) {
2034         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
2035                 bt_dev_dbg(hdev, "remove monitor %d status %d",
2036                            monitor->handle, status);
2037                 goto free_monitor;
2038
2039         case HCI_ADV_MONITOR_EXT_MSFT:
2040                 handle = monitor->handle;
2041                 status = msft_remove_monitor(hdev, monitor);
2042                 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
2043                            handle, status);
2044                 break;
2045         }
2046
2047         /* In case no matching handle registered, just free the monitor */
2048         if (status == -ENOENT)
2049                 goto free_monitor;
2050
2051         return status;
2052
2053 free_monitor:
2054         if (status == -ENOENT)
2055                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2056                             monitor->handle);
2057         hci_free_adv_monitor(hdev, monitor);
2058
2059         return status;
2060 }
2061
2062 /* This function requires the caller holds hci_req_sync_lock */
2063 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2064 {
2065         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2066
2067         if (!monitor)
2068                 return -EINVAL;
2069
2070         return hci_remove_adv_monitor(hdev, monitor);
2071 }
2072
2073 /* This function requires the caller holds hci_req_sync_lock */
2074 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2075 {
2076         struct adv_monitor *monitor;
2077         int idr_next_id = 0;
2078         int status = 0;
2079
2080         while (1) {
2081                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2082                 if (!monitor)
2083                         break;
2084
2085                 status = hci_remove_adv_monitor(hdev, monitor);
2086                 if (status)
2087                         return status;
2088
2089                 idr_next_id++;
2090         }
2091
2092         return status;
2093 }
2094
2095 /* This function requires the caller holds hdev->lock */
2096 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2097 {
2098         return !idr_is_empty(&hdev->adv_monitors_idr);
2099 }
2100
2101 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2102 {
2103         if (msft_monitor_supported(hdev))
2104                 return HCI_ADV_MONITOR_EXT_MSFT;
2105
2106         return HCI_ADV_MONITOR_EXT_NONE;
2107 }
2108
2109 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2110                                          bdaddr_t *bdaddr, u8 type)
2111 {
2112         struct bdaddr_list *b;
2113
2114         list_for_each_entry(b, bdaddr_list, list) {
2115                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2116                         return b;
2117         }
2118
2119         return NULL;
2120 }
2121
2122 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2123                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2124                                 u8 type)
2125 {
2126         struct bdaddr_list_with_irk *b;
2127
2128         list_for_each_entry(b, bdaddr_list, list) {
2129                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2130                         return b;
2131         }
2132
2133         return NULL;
2134 }
2135
2136 struct bdaddr_list_with_flags *
2137 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2138                                   bdaddr_t *bdaddr, u8 type)
2139 {
2140         struct bdaddr_list_with_flags *b;
2141
2142         list_for_each_entry(b, bdaddr_list, list) {
2143                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2144                         return b;
2145         }
2146
2147         return NULL;
2148 }
2149
2150 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2151 {
2152         struct bdaddr_list *b, *n;
2153
2154         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2155                 list_del(&b->list);
2156                 kfree(b);
2157         }
2158 }
2159
2160 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2161 {
2162         struct bdaddr_list *entry;
2163
2164         if (!bacmp(bdaddr, BDADDR_ANY))
2165                 return -EBADF;
2166
2167         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2168                 return -EEXIST;
2169
2170         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2171         if (!entry)
2172                 return -ENOMEM;
2173
2174         bacpy(&entry->bdaddr, bdaddr);
2175         entry->bdaddr_type = type;
2176
2177         list_add(&entry->list, list);
2178
2179         return 0;
2180 }
2181
2182 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2183                                         u8 type, u8 *peer_irk, u8 *local_irk)
2184 {
2185         struct bdaddr_list_with_irk *entry;
2186
2187         if (!bacmp(bdaddr, BDADDR_ANY))
2188                 return -EBADF;
2189
2190         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2191                 return -EEXIST;
2192
2193         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2194         if (!entry)
2195                 return -ENOMEM;
2196
2197         bacpy(&entry->bdaddr, bdaddr);
2198         entry->bdaddr_type = type;
2199
2200         if (peer_irk)
2201                 memcpy(entry->peer_irk, peer_irk, 16);
2202
2203         if (local_irk)
2204                 memcpy(entry->local_irk, local_irk, 16);
2205
2206         list_add(&entry->list, list);
2207
2208         return 0;
2209 }
2210
2211 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2212                                    u8 type, u32 flags)
2213 {
2214         struct bdaddr_list_with_flags *entry;
2215
2216         if (!bacmp(bdaddr, BDADDR_ANY))
2217                 return -EBADF;
2218
2219         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2220                 return -EEXIST;
2221
2222         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2223         if (!entry)
2224                 return -ENOMEM;
2225
2226         bacpy(&entry->bdaddr, bdaddr);
2227         entry->bdaddr_type = type;
2228         entry->flags = flags;
2229
2230         list_add(&entry->list, list);
2231
2232         return 0;
2233 }
2234
2235 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2236 {
2237         struct bdaddr_list *entry;
2238
2239         if (!bacmp(bdaddr, BDADDR_ANY)) {
2240                 hci_bdaddr_list_clear(list);
2241                 return 0;
2242         }
2243
2244         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2245         if (!entry)
2246                 return -ENOENT;
2247
2248         list_del(&entry->list);
2249         kfree(entry);
2250
2251         return 0;
2252 }
2253
2254 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2255                                                         u8 type)
2256 {
2257         struct bdaddr_list_with_irk *entry;
2258
2259         if (!bacmp(bdaddr, BDADDR_ANY)) {
2260                 hci_bdaddr_list_clear(list);
2261                 return 0;
2262         }
2263
2264         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2265         if (!entry)
2266                 return -ENOENT;
2267
2268         list_del(&entry->list);
2269         kfree(entry);
2270
2271         return 0;
2272 }
2273
2274 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2275                                    u8 type)
2276 {
2277         struct bdaddr_list_with_flags *entry;
2278
2279         if (!bacmp(bdaddr, BDADDR_ANY)) {
2280                 hci_bdaddr_list_clear(list);
2281                 return 0;
2282         }
2283
2284         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2285         if (!entry)
2286                 return -ENOENT;
2287
2288         list_del(&entry->list);
2289         kfree(entry);
2290
2291         return 0;
2292 }
2293
2294 /* This function requires the caller holds hdev->lock */
2295 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2296                                                bdaddr_t *addr, u8 addr_type)
2297 {
2298         struct hci_conn_params *params;
2299
2300         list_for_each_entry(params, &hdev->le_conn_params, list) {
2301                 if (bacmp(&params->addr, addr) == 0 &&
2302                     params->addr_type == addr_type) {
2303                         return params;
2304                 }
2305         }
2306
2307         return NULL;
2308 }
2309
2310 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2311 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2312                                                   bdaddr_t *addr, u8 addr_type)
2313 {
2314         struct hci_conn_params *param;
2315
2316         rcu_read_lock();
2317
2318         list_for_each_entry_rcu(param, list, action) {
2319                 if (bacmp(&param->addr, addr) == 0 &&
2320                     param->addr_type == addr_type) {
2321                         rcu_read_unlock();
2322                         return param;
2323                 }
2324         }
2325
2326         rcu_read_unlock();
2327
2328         return NULL;
2329 }
2330
2331 /* This function requires the caller holds hdev->lock */
2332 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2333 {
2334         if (list_empty(&param->action))
2335                 return;
2336
2337         list_del_rcu(&param->action);
2338         synchronize_rcu();
2339         INIT_LIST_HEAD(&param->action);
2340 }
2341
2342 /* This function requires the caller holds hdev->lock */
2343 void hci_pend_le_list_add(struct hci_conn_params *param,
2344                           struct list_head *list)
2345 {
2346         list_add_rcu(&param->action, list);
2347 }
2348
2349 /* This function requires the caller holds hdev->lock */
2350 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2351                                             bdaddr_t *addr, u8 addr_type)
2352 {
2353         struct hci_conn_params *params;
2354
2355         params = hci_conn_params_lookup(hdev, addr, addr_type);
2356         if (params)
2357                 return params;
2358
2359         params = kzalloc(sizeof(*params), GFP_KERNEL);
2360         if (!params) {
2361                 bt_dev_err(hdev, "out of memory");
2362                 return NULL;
2363         }
2364
2365         bacpy(&params->addr, addr);
2366         params->addr_type = addr_type;
2367
2368         list_add(&params->list, &hdev->le_conn_params);
2369         INIT_LIST_HEAD(&params->action);
2370
2371         params->conn_min_interval = hdev->le_conn_min_interval;
2372         params->conn_max_interval = hdev->le_conn_max_interval;
2373         params->conn_latency = hdev->le_conn_latency;
2374         params->supervision_timeout = hdev->le_supv_timeout;
2375         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2376
2377         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2378
2379         return params;
2380 }
2381
2382 void hci_conn_params_free(struct hci_conn_params *params)
2383 {
2384         hci_pend_le_list_del_init(params);
2385
2386         if (params->conn) {
2387                 hci_conn_drop(params->conn);
2388                 hci_conn_put(params->conn);
2389         }
2390
2391         list_del(&params->list);
2392         kfree(params);
2393 }
2394
2395 /* This function requires the caller holds hdev->lock */
2396 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2397 {
2398         struct hci_conn_params *params;
2399
2400         params = hci_conn_params_lookup(hdev, addr, addr_type);
2401         if (!params)
2402                 return;
2403
2404         hci_conn_params_free(params);
2405
2406         hci_update_passive_scan(hdev);
2407
2408         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2409 }
2410
2411 /* This function requires the caller holds hdev->lock */
2412 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2413 {
2414         struct hci_conn_params *params, *tmp;
2415
2416         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2417                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2418                         continue;
2419
2420                 /* If trying to establish one time connection to disabled
2421                  * device, leave the params, but mark them as just once.
2422                  */
2423                 if (params->explicit_connect) {
2424                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2425                         continue;
2426                 }
2427
2428                 hci_conn_params_free(params);
2429         }
2430
2431         BT_DBG("All LE disabled connection parameters were removed");
2432 }
2433
2434 /* This function requires the caller holds hdev->lock */
2435 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2436 {
2437         struct hci_conn_params *params, *tmp;
2438
2439         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2440                 hci_conn_params_free(params);
2441
2442         BT_DBG("All LE connection parameters were removed");
2443 }
2444
2445 /* Copy the Identity Address of the controller.
2446  *
2447  * If the controller has a public BD_ADDR, then by default use that one.
2448  * If this is a LE only controller without a public address, default to
2449  * the static random address.
2450  *
2451  * For debugging purposes it is possible to force controllers with a
2452  * public address to use the static random address instead.
2453  *
2454  * In case BR/EDR has been disabled on a dual-mode controller and
2455  * userspace has configured a static address, then that address
2456  * becomes the identity address instead of the public BR/EDR address.
2457  */
2458 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2459                                u8 *bdaddr_type)
2460 {
2461         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2462             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2463             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2464              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2465                 bacpy(bdaddr, &hdev->static_addr);
2466                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2467         } else {
2468                 bacpy(bdaddr, &hdev->bdaddr);
2469                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2470         }
2471 }
2472
2473 static void hci_clear_wake_reason(struct hci_dev *hdev)
2474 {
2475         hci_dev_lock(hdev);
2476
2477         hdev->wake_reason = 0;
2478         bacpy(&hdev->wake_addr, BDADDR_ANY);
2479         hdev->wake_addr_type = 0;
2480
2481         hci_dev_unlock(hdev);
2482 }
2483
2484 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2485                                 void *data)
2486 {
2487         struct hci_dev *hdev =
2488                 container_of(nb, struct hci_dev, suspend_notifier);
2489         int ret = 0;
2490
2491         /* Userspace has full control of this device. Do nothing. */
2492         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2493                 return NOTIFY_DONE;
2494
2495         /* To avoid a potential race with hci_unregister_dev. */
2496         hci_dev_hold(hdev);
2497
2498         if (action == PM_SUSPEND_PREPARE)
2499                 ret = hci_suspend_dev(hdev);
2500         else if (action == PM_POST_SUSPEND)
2501                 ret = hci_resume_dev(hdev);
2502
2503         if (ret)
2504                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2505                            action, ret);
2506
2507         hci_dev_put(hdev);
2508         return NOTIFY_DONE;
2509 }
2510
2511 /* Alloc HCI device */
2512 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2513 {
2514         struct hci_dev *hdev;
2515         unsigned int alloc_size;
2516
2517         alloc_size = sizeof(*hdev);
2518         if (sizeof_priv) {
2519                 /* Fixme: May need ALIGN-ment? */
2520                 alloc_size += sizeof_priv;
2521         }
2522
2523         hdev = kzalloc(alloc_size, GFP_KERNEL);
2524         if (!hdev)
2525                 return NULL;
2526
2527         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2528         hdev->esco_type = (ESCO_HV1);
2529         hdev->link_mode = (HCI_LM_ACCEPT);
2530         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2531         hdev->io_capability = 0x03;     /* No Input No Output */
2532         hdev->manufacturer = 0xffff;    /* Default to internal use */
2533         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2534         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2535         hdev->adv_instance_cnt = 0;
2536         hdev->cur_adv_instance = 0x00;
2537         hdev->adv_instance_timeout = 0;
2538
2539         hdev->advmon_allowlist_duration = 300;
2540         hdev->advmon_no_filter_duration = 500;
2541         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
2542
2543         hdev->sniff_max_interval = 800;
2544 #ifdef TIZEN_BT
2545         hdev->sniff_min_interval = 400;
2546 #else
2547         hdev->sniff_min_interval = 80;
2548 #endif
2549         hdev->le_adv_channel_map = 0x07;
2550         hdev->le_adv_min_interval = 0x0800;
2551         hdev->le_adv_max_interval = 0x0800;
2552 #ifdef TIZEN_BT
2553         /* automatically enable sniff mode for connection */
2554         hdev->idle_timeout = TIZEN_SNIFF_TIMEOUT * 1000;
2555
2556         hdev->adv_filter_policy = 0x00;
2557         hdev->adv_type = 0x00;
2558 #endif
2559         hdev->le_scan_interval = 0x0060;
2560         hdev->le_scan_window = 0x0030;
2561         hdev->le_scan_int_suspend = 0x0400;
2562         hdev->le_scan_window_suspend = 0x0012;
2563         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2564         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2565         hdev->le_scan_int_adv_monitor = 0x0060;
2566         hdev->le_scan_window_adv_monitor = 0x0030;
2567         hdev->le_scan_int_connect = 0x0060;
2568         hdev->le_scan_window_connect = 0x0060;
2569         hdev->le_conn_min_interval = 0x0018;
2570         hdev->le_conn_max_interval = 0x0028;
2571         hdev->le_conn_latency = 0x0000;
2572         hdev->le_supv_timeout = 0x002a;
2573         hdev->le_def_tx_len = 0x001b;
2574         hdev->le_def_tx_time = 0x0148;
2575         hdev->le_max_tx_len = 0x001b;
2576         hdev->le_max_tx_time = 0x0148;
2577         hdev->le_max_rx_len = 0x001b;
2578         hdev->le_max_rx_time = 0x0148;
2579         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2580         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2581         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2582         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2583         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2584         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2585         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2586         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2587         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2588
2589         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2590         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2591         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2592         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2593         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2594         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2595
2596         /* default 1.28 sec page scan */
2597         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2598         hdev->def_page_scan_int = 0x0800;
2599         hdev->def_page_scan_window = 0x0012;
2600
2601         mutex_init(&hdev->lock);
2602         mutex_init(&hdev->req_lock);
2603
2604         ida_init(&hdev->unset_handle_ida);
2605
2606         INIT_LIST_HEAD(&hdev->mesh_pending);
2607         INIT_LIST_HEAD(&hdev->mgmt_pending);
2608         INIT_LIST_HEAD(&hdev->reject_list);
2609         INIT_LIST_HEAD(&hdev->accept_list);
2610         INIT_LIST_HEAD(&hdev->uuids);
2611         INIT_LIST_HEAD(&hdev->link_keys);
2612         INIT_LIST_HEAD(&hdev->long_term_keys);
2613         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2614         INIT_LIST_HEAD(&hdev->remote_oob_data);
2615         INIT_LIST_HEAD(&hdev->le_accept_list);
2616         INIT_LIST_HEAD(&hdev->le_resolv_list);
2617         INIT_LIST_HEAD(&hdev->le_conn_params);
2618         INIT_LIST_HEAD(&hdev->pend_le_conns);
2619         INIT_LIST_HEAD(&hdev->pend_le_reports);
2620         INIT_LIST_HEAD(&hdev->conn_hash.list);
2621         INIT_LIST_HEAD(&hdev->adv_instances);
2622         INIT_LIST_HEAD(&hdev->blocked_keys);
2623         INIT_LIST_HEAD(&hdev->monitored_devices);
2624
2625         INIT_LIST_HEAD(&hdev->local_codecs);
2626         INIT_WORK(&hdev->rx_work, hci_rx_work);
2627         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2628         INIT_WORK(&hdev->tx_work, hci_tx_work);
2629         INIT_WORK(&hdev->power_on, hci_power_on);
2630         INIT_WORK(&hdev->error_reset, hci_error_reset);
2631
2632         hci_cmd_sync_init(hdev);
2633
2634         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2635
2636         skb_queue_head_init(&hdev->rx_q);
2637         skb_queue_head_init(&hdev->cmd_q);
2638         skb_queue_head_init(&hdev->raw_q);
2639
2640         init_waitqueue_head(&hdev->req_wait_q);
2641
2642         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2643         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2644
2645         hci_devcd_setup(hdev);
2646         hci_request_setup(hdev);
2647
2648         hci_init_sysfs(hdev);
2649         discovery_init(hdev);
2650
2651         return hdev;
2652 }
2653 EXPORT_SYMBOL(hci_alloc_dev_priv);
2654
2655 /* Free HCI device */
2656 void hci_free_dev(struct hci_dev *hdev)
2657 {
2658         /* will free via device release */
2659         put_device(&hdev->dev);
2660 }
2661 EXPORT_SYMBOL(hci_free_dev);
2662
2663 /* Register HCI device */
2664 int hci_register_dev(struct hci_dev *hdev)
2665 {
2666         int id, error;
2667
2668         if (!hdev->open || !hdev->close || !hdev->send)
2669                 return -EINVAL;
2670
2671         /* Do not allow HCI_AMP devices to register at index 0,
2672          * so the index can be used as the AMP controller ID.
2673          */
2674         switch (hdev->dev_type) {
2675         case HCI_PRIMARY:
2676                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2677                 break;
2678         case HCI_AMP:
2679                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2680                 break;
2681         default:
2682                 return -EINVAL;
2683         }
2684
2685         if (id < 0)
2686                 return id;
2687
2688         error = dev_set_name(&hdev->dev, "hci%u", id);
2689         if (error)
2690                 return error;
2691
2692         hdev->name = dev_name(&hdev->dev);
2693         hdev->id = id;
2694
2695         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2696
2697         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2698         if (!hdev->workqueue) {
2699                 error = -ENOMEM;
2700                 goto err;
2701         }
2702
2703         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2704                                                       hdev->name);
2705         if (!hdev->req_workqueue) {
2706                 destroy_workqueue(hdev->workqueue);
2707                 error = -ENOMEM;
2708                 goto err;
2709         }
2710
2711         if (!IS_ERR_OR_NULL(bt_debugfs))
2712                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2713
2714         error = device_add(&hdev->dev);
2715         if (error < 0)
2716                 goto err_wqueue;
2717
2718         hci_leds_init(hdev);
2719
2720         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2721                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2722                                     hdev);
2723         if (hdev->rfkill) {
2724                 if (rfkill_register(hdev->rfkill) < 0) {
2725                         rfkill_destroy(hdev->rfkill);
2726                         hdev->rfkill = NULL;
2727                 }
2728         }
2729
2730         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2731                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2732
2733         hci_dev_set_flag(hdev, HCI_SETUP);
2734         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2735
2736         if (hdev->dev_type == HCI_PRIMARY) {
2737                 /* Assume BR/EDR support until proven otherwise (such as
2738                  * through reading supported features during init.
2739                  */
2740                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2741         }
2742
2743         write_lock(&hci_dev_list_lock);
2744         list_add(&hdev->list, &hci_dev_list);
2745         write_unlock(&hci_dev_list_lock);
2746
2747         /* Devices that are marked for raw-only usage are unconfigured
2748          * and should not be included in normal operation.
2749          */
2750         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2751                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2752
2753         /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2754          * callback.
2755          */
2756         if (hdev->wakeup)
2757                 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2758
2759         hci_sock_dev_event(hdev, HCI_DEV_REG);
2760         hci_dev_hold(hdev);
2761
2762         error = hci_register_suspend_notifier(hdev);
2763         if (error)
2764                 BT_WARN("register suspend notifier failed error:%d\n", error);
2765
2766         queue_work(hdev->req_workqueue, &hdev->power_on);
2767
2768         idr_init(&hdev->adv_monitors_idr);
2769         msft_register(hdev);
2770
2771         return id;
2772
2773 err_wqueue:
2774         debugfs_remove_recursive(hdev->debugfs);
2775         destroy_workqueue(hdev->workqueue);
2776         destroy_workqueue(hdev->req_workqueue);
2777 err:
2778         ida_simple_remove(&hci_index_ida, hdev->id);
2779
2780         return error;
2781 }
2782 EXPORT_SYMBOL(hci_register_dev);
2783
2784 /* Unregister HCI device */
2785 void hci_unregister_dev(struct hci_dev *hdev)
2786 {
2787         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2788
2789         mutex_lock(&hdev->unregister_lock);
2790         hci_dev_set_flag(hdev, HCI_UNREGISTER);
2791         mutex_unlock(&hdev->unregister_lock);
2792
2793         write_lock(&hci_dev_list_lock);
2794         list_del(&hdev->list);
2795         write_unlock(&hci_dev_list_lock);
2796
2797         cancel_work_sync(&hdev->power_on);
2798
2799         hci_cmd_sync_clear(hdev);
2800
2801         hci_unregister_suspend_notifier(hdev);
2802
2803         msft_unregister(hdev);
2804
2805         hci_dev_do_close(hdev);
2806
2807         if (!test_bit(HCI_INIT, &hdev->flags) &&
2808             !hci_dev_test_flag(hdev, HCI_SETUP) &&
2809             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2810                 hci_dev_lock(hdev);
2811                 mgmt_index_removed(hdev);
2812                 hci_dev_unlock(hdev);
2813         }
2814
2815         /* mgmt_index_removed should take care of emptying the
2816          * pending list */
2817         BUG_ON(!list_empty(&hdev->mgmt_pending));
2818
2819         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2820
2821         if (hdev->rfkill) {
2822                 rfkill_unregister(hdev->rfkill);
2823                 rfkill_destroy(hdev->rfkill);
2824         }
2825
2826         device_del(&hdev->dev);
2827         /* Actual cleanup is deferred until hci_release_dev(). */
2828         hci_dev_put(hdev);
2829 }
2830 EXPORT_SYMBOL(hci_unregister_dev);
2831
2832 /* Release HCI device */
2833 void hci_release_dev(struct hci_dev *hdev)
2834 {
2835         debugfs_remove_recursive(hdev->debugfs);
2836         kfree_const(hdev->hw_info);
2837         kfree_const(hdev->fw_info);
2838
2839         destroy_workqueue(hdev->workqueue);
2840         destroy_workqueue(hdev->req_workqueue);
2841
2842         hci_dev_lock(hdev);
2843         hci_bdaddr_list_clear(&hdev->reject_list);
2844         hci_bdaddr_list_clear(&hdev->accept_list);
2845         hci_uuids_clear(hdev);
2846         hci_link_keys_clear(hdev);
2847         hci_smp_ltks_clear(hdev);
2848         hci_smp_irks_clear(hdev);
2849         hci_remote_oob_data_clear(hdev);
2850         hci_adv_instances_clear(hdev);
2851         hci_adv_monitors_clear(hdev);
2852         hci_bdaddr_list_clear(&hdev->le_accept_list);
2853         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2854         hci_conn_params_clear_all(hdev);
2855         hci_discovery_filter_clear(hdev);
2856         hci_blocked_keys_clear(hdev);
2857         hci_codec_list_clear(&hdev->local_codecs);
2858         hci_dev_unlock(hdev);
2859
2860         ida_destroy(&hdev->unset_handle_ida);
2861         ida_simple_remove(&hci_index_ida, hdev->id);
2862         kfree_skb(hdev->sent_cmd);
2863         kfree_skb(hdev->recv_event);
2864         kfree(hdev);
2865 }
2866 EXPORT_SYMBOL(hci_release_dev);
2867
2868 int hci_register_suspend_notifier(struct hci_dev *hdev)
2869 {
2870         int ret = 0;
2871
2872         if (!hdev->suspend_notifier.notifier_call &&
2873             !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2874                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2875                 ret = register_pm_notifier(&hdev->suspend_notifier);
2876         }
2877
2878         return ret;
2879 }
2880
2881 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2882 {
2883         int ret = 0;
2884
2885         if (hdev->suspend_notifier.notifier_call) {
2886                 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2887                 if (!ret)
2888                         hdev->suspend_notifier.notifier_call = NULL;
2889         }
2890
2891         return ret;
2892 }
2893
2894 /* Suspend HCI device */
2895 int hci_suspend_dev(struct hci_dev *hdev)
2896 {
2897         int ret;
2898
2899         bt_dev_dbg(hdev, "");
2900
2901         /* Suspend should only act on when powered. */
2902         if (!hdev_is_powered(hdev) ||
2903             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2904                 return 0;
2905
2906         /* If powering down don't attempt to suspend */
2907         if (mgmt_powering_down(hdev))
2908                 return 0;
2909
2910         /* Cancel potentially blocking sync operation before suspend */
2911         __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
2912
2913         hci_req_sync_lock(hdev);
2914         ret = hci_suspend_sync(hdev);
2915         hci_req_sync_unlock(hdev);
2916
2917         hci_clear_wake_reason(hdev);
2918         mgmt_suspending(hdev, hdev->suspend_state);
2919
2920         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2921         return ret;
2922 }
2923 EXPORT_SYMBOL(hci_suspend_dev);
2924
2925 /* Resume HCI device */
2926 int hci_resume_dev(struct hci_dev *hdev)
2927 {
2928         int ret;
2929
2930         bt_dev_dbg(hdev, "");
2931
2932         /* Resume should only act on when powered. */
2933         if (!hdev_is_powered(hdev) ||
2934             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2935                 return 0;
2936
2937         /* If powering down don't attempt to resume */
2938         if (mgmt_powering_down(hdev))
2939                 return 0;
2940
2941         hci_req_sync_lock(hdev);
2942         ret = hci_resume_sync(hdev);
2943         hci_req_sync_unlock(hdev);
2944
2945         mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2946                       hdev->wake_addr_type);
2947
2948         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2949         return ret;
2950 }
2951 EXPORT_SYMBOL(hci_resume_dev);
2952
2953 /* Reset HCI device */
2954 int hci_reset_dev(struct hci_dev *hdev)
2955 {
2956         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2957         struct sk_buff *skb;
2958
2959         skb = bt_skb_alloc(3, GFP_ATOMIC);
2960         if (!skb)
2961                 return -ENOMEM;
2962
2963         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2964         skb_put_data(skb, hw_err, 3);
2965
2966         bt_dev_err(hdev, "Injecting HCI hardware error event");
2967
2968         /* Send Hardware Error to upper stack */
2969         return hci_recv_frame(hdev, skb);
2970 }
2971 EXPORT_SYMBOL(hci_reset_dev);
2972
2973 /* Receive frame from HCI drivers */
2974 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2975 {
2976         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2977                       && !test_bit(HCI_INIT, &hdev->flags))) {
2978                 kfree_skb(skb);
2979                 return -ENXIO;
2980         }
2981
2982         switch (hci_skb_pkt_type(skb)) {
2983         case HCI_EVENT_PKT:
2984                 break;
2985         case HCI_ACLDATA_PKT:
2986                 /* Detect if ISO packet has been sent as ACL */
2987                 if (hci_conn_num(hdev, ISO_LINK)) {
2988                         __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2989                         __u8 type;
2990
2991                         type = hci_conn_lookup_type(hdev, hci_handle(handle));
2992                         if (type == ISO_LINK)
2993                                 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2994                 }
2995                 break;
2996         case HCI_SCODATA_PKT:
2997                 break;
2998         case HCI_ISODATA_PKT:
2999                 break;
3000         default:
3001                 kfree_skb(skb);
3002                 return -EINVAL;
3003         }
3004
3005         /* Incoming skb */
3006         bt_cb(skb)->incoming = 1;
3007
3008         /* Time stamp */
3009         __net_timestamp(skb);
3010
3011         skb_queue_tail(&hdev->rx_q, skb);
3012         queue_work(hdev->workqueue, &hdev->rx_work);
3013
3014         return 0;
3015 }
3016 EXPORT_SYMBOL(hci_recv_frame);
3017
3018 /* Receive diagnostic message from HCI drivers */
3019 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3020 {
3021         /* Mark as diagnostic packet */
3022         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3023
3024         /* Time stamp */
3025         __net_timestamp(skb);
3026
3027         skb_queue_tail(&hdev->rx_q, skb);
3028         queue_work(hdev->workqueue, &hdev->rx_work);
3029
3030         return 0;
3031 }
3032 EXPORT_SYMBOL(hci_recv_diag);
3033
3034 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3035 {
3036         va_list vargs;
3037
3038         va_start(vargs, fmt);
3039         kfree_const(hdev->hw_info);
3040         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3041         va_end(vargs);
3042 }
3043 EXPORT_SYMBOL(hci_set_hw_info);
3044
3045 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3046 {
3047         va_list vargs;
3048
3049         va_start(vargs, fmt);
3050         kfree_const(hdev->fw_info);
3051         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3052         va_end(vargs);
3053 }
3054 EXPORT_SYMBOL(hci_set_fw_info);
3055
3056 /* ---- Interface to upper protocols ---- */
3057
3058 int hci_register_cb(struct hci_cb *cb)
3059 {
3060         BT_DBG("%p name %s", cb, cb->name);
3061
3062         mutex_lock(&hci_cb_list_lock);
3063         list_add_tail(&cb->list, &hci_cb_list);
3064         mutex_unlock(&hci_cb_list_lock);
3065
3066         return 0;
3067 }
3068 EXPORT_SYMBOL(hci_register_cb);
3069
3070 int hci_unregister_cb(struct hci_cb *cb)
3071 {
3072         BT_DBG("%p name %s", cb, cb->name);
3073
3074         mutex_lock(&hci_cb_list_lock);
3075         list_del(&cb->list);
3076         mutex_unlock(&hci_cb_list_lock);
3077
3078         return 0;
3079 }
3080 EXPORT_SYMBOL(hci_unregister_cb);
3081
3082 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3083 {
3084         int err;
3085
3086         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3087                skb->len);
3088
3089         /* Time stamp */
3090         __net_timestamp(skb);
3091
3092         /* Send copy to monitor */
3093         hci_send_to_monitor(hdev, skb);
3094
3095         if (atomic_read(&hdev->promisc)) {
3096                 /* Send copy to the sockets */
3097                 hci_send_to_sock(hdev, skb);
3098         }
3099
3100         /* Get rid of skb owner, prior to sending to the driver. */
3101         skb_orphan(skb);
3102
3103         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3104                 kfree_skb(skb);
3105                 return -EINVAL;
3106         }
3107
3108         err = hdev->send(hdev, skb);
3109         if (err < 0) {
3110                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3111                 kfree_skb(skb);
3112                 return err;
3113         }
3114
3115         return 0;
3116 }
3117
3118 /* Send HCI command */
3119 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3120                  const void *param)
3121 {
3122         struct sk_buff *skb;
3123
3124         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3125
3126         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3127         if (!skb) {
3128                 bt_dev_err(hdev, "no memory for command");
3129                 return -ENOMEM;
3130         }
3131
3132         /* Stand-alone HCI commands must be flagged as
3133          * single-command requests.
3134          */
3135         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3136
3137         skb_queue_tail(&hdev->cmd_q, skb);
3138         queue_work(hdev->workqueue, &hdev->cmd_work);
3139
3140         return 0;
3141 }
3142
3143 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3144                    const void *param)
3145 {
3146         struct sk_buff *skb;
3147
3148         if (hci_opcode_ogf(opcode) != 0x3f) {
3149                 /* A controller receiving a command shall respond with either
3150                  * a Command Status Event or a Command Complete Event.
3151                  * Therefore, all standard HCI commands must be sent via the
3152                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3153                  * Some vendors do not comply with this rule for vendor-specific
3154                  * commands and do not return any event. We want to support
3155                  * unresponded commands for such cases only.
3156                  */
3157                 bt_dev_err(hdev, "unresponded command not supported");
3158                 return -EINVAL;
3159         }
3160
3161         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3162         if (!skb) {
3163                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3164                            opcode);
3165                 return -ENOMEM;
3166         }
3167
3168         hci_send_frame(hdev, skb);
3169
3170         return 0;
3171 }
3172 EXPORT_SYMBOL(__hci_cmd_send);
3173
3174 /* Get data from the previously sent command */
3175 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3176 {
3177         struct hci_command_hdr *hdr;
3178
3179         if (!hdev->sent_cmd)
3180                 return NULL;
3181
3182         hdr = (void *) hdev->sent_cmd->data;
3183
3184         if (hdr->opcode != cpu_to_le16(opcode))
3185                 return NULL;
3186
3187         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3188
3189         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3190 }
3191
3192 /* Get data from last received event */
3193 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3194 {
3195         struct hci_event_hdr *hdr;
3196         int offset;
3197
3198         if (!hdev->recv_event)
3199                 return NULL;
3200
3201         hdr = (void *)hdev->recv_event->data;
3202         offset = sizeof(*hdr);
3203
3204         if (hdr->evt != event) {
3205                 /* In case of LE metaevent check the subevent match */
3206                 if (hdr->evt == HCI_EV_LE_META) {
3207                         struct hci_ev_le_meta *ev;
3208
3209                         ev = (void *)hdev->recv_event->data + offset;
3210                         offset += sizeof(*ev);
3211                         if (ev->subevent == event)
3212                                 goto found;
3213                 }
3214                 return NULL;
3215         }
3216
3217 found:
3218         bt_dev_dbg(hdev, "event 0x%2.2x", event);
3219
3220         return hdev->recv_event->data + offset;
3221 }
3222
3223 /* Send ACL data */
3224 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3225 {
3226         struct hci_acl_hdr *hdr;
3227         int len = skb->len;
3228
3229         skb_push(skb, HCI_ACL_HDR_SIZE);
3230         skb_reset_transport_header(skb);
3231         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3232         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3233         hdr->dlen   = cpu_to_le16(len);
3234 }
3235
3236 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3237                           struct sk_buff *skb, __u16 flags)
3238 {
3239         struct hci_conn *conn = chan->conn;
3240         struct hci_dev *hdev = conn->hdev;
3241         struct sk_buff *list;
3242
3243         skb->len = skb_headlen(skb);
3244         skb->data_len = 0;
3245
3246         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3247
3248         switch (hdev->dev_type) {
3249         case HCI_PRIMARY:
3250                 hci_add_acl_hdr(skb, conn->handle, flags);
3251                 break;
3252         case HCI_AMP:
3253                 hci_add_acl_hdr(skb, chan->handle, flags);
3254                 break;
3255         default:
3256                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3257                 return;
3258         }
3259
3260         list = skb_shinfo(skb)->frag_list;
3261         if (!list) {
3262                 /* Non fragmented */
3263                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3264
3265                 skb_queue_tail(queue, skb);
3266         } else {
3267                 /* Fragmented */
3268                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3269
3270                 skb_shinfo(skb)->frag_list = NULL;
3271
3272                 /* Queue all fragments atomically. We need to use spin_lock_bh
3273                  * here because of 6LoWPAN links, as there this function is
3274                  * called from softirq and using normal spin lock could cause
3275                  * deadlocks.
3276                  */
3277                 spin_lock_bh(&queue->lock);
3278
3279                 __skb_queue_tail(queue, skb);
3280
3281                 flags &= ~ACL_START;
3282                 flags |= ACL_CONT;
3283                 do {
3284                         skb = list; list = list->next;
3285
3286                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3287                         hci_add_acl_hdr(skb, conn->handle, flags);
3288
3289                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3290
3291                         __skb_queue_tail(queue, skb);
3292                 } while (list);
3293
3294                 spin_unlock_bh(&queue->lock);
3295         }
3296 }
3297
3298 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3299 {
3300         struct hci_dev *hdev = chan->conn->hdev;
3301
3302         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3303
3304         hci_queue_acl(chan, &chan->data_q, skb, flags);
3305
3306         queue_work(hdev->workqueue, &hdev->tx_work);
3307 }
3308
3309 /* Send SCO data */
3310 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3311 {
3312         struct hci_dev *hdev = conn->hdev;
3313         struct hci_sco_hdr hdr;
3314
3315         BT_DBG("%s len %d", hdev->name, skb->len);
3316
3317         hdr.handle = cpu_to_le16(conn->handle);
3318         hdr.dlen   = skb->len;
3319
3320         skb_push(skb, HCI_SCO_HDR_SIZE);
3321         skb_reset_transport_header(skb);
3322         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3323
3324         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3325
3326         skb_queue_tail(&conn->data_q, skb);
3327         queue_work(hdev->workqueue, &hdev->tx_work);
3328 }
3329
3330 /* Send ISO data */
3331 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3332 {
3333         struct hci_iso_hdr *hdr;
3334         int len = skb->len;
3335
3336         skb_push(skb, HCI_ISO_HDR_SIZE);
3337         skb_reset_transport_header(skb);
3338         hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3339         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3340         hdr->dlen   = cpu_to_le16(len);
3341 }
3342
3343 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3344                           struct sk_buff *skb)
3345 {
3346         struct hci_dev *hdev = conn->hdev;
3347         struct sk_buff *list;
3348         __u16 flags;
3349
3350         skb->len = skb_headlen(skb);
3351         skb->data_len = 0;
3352
3353         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3354
3355         list = skb_shinfo(skb)->frag_list;
3356
3357         flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3358         hci_add_iso_hdr(skb, conn->handle, flags);
3359
3360         if (!list) {
3361                 /* Non fragmented */
3362                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3363
3364                 skb_queue_tail(queue, skb);
3365         } else {
3366                 /* Fragmented */
3367                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3368
3369                 skb_shinfo(skb)->frag_list = NULL;
3370
3371                 __skb_queue_tail(queue, skb);
3372
3373                 do {
3374                         skb = list; list = list->next;
3375
3376                         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3377                         flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3378                                                    0x00);
3379                         hci_add_iso_hdr(skb, conn->handle, flags);
3380
3381                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3382
3383                         __skb_queue_tail(queue, skb);
3384                 } while (list);
3385         }
3386 }
3387
3388 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3389 {
3390         struct hci_dev *hdev = conn->hdev;
3391
3392         BT_DBG("%s len %d", hdev->name, skb->len);
3393
3394         hci_queue_iso(conn, &conn->data_q, skb);
3395
3396         queue_work(hdev->workqueue, &hdev->tx_work);
3397 }
3398
3399 /* ---- HCI TX task (outgoing data) ---- */
3400
3401 /* HCI Connection scheduler */
3402 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3403 {
3404         struct hci_dev *hdev;
3405         int cnt, q;
3406
3407         if (!conn) {
3408                 *quote = 0;
3409                 return;
3410         }
3411
3412         hdev = conn->hdev;
3413
3414         switch (conn->type) {
3415         case ACL_LINK:
3416                 cnt = hdev->acl_cnt;
3417                 break;
3418         case AMP_LINK:
3419                 cnt = hdev->block_cnt;
3420                 break;
3421         case SCO_LINK:
3422         case ESCO_LINK:
3423                 cnt = hdev->sco_cnt;
3424                 break;
3425         case LE_LINK:
3426                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3427                 break;
3428         case ISO_LINK:
3429                 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3430                         hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3431                 break;
3432         default:
3433                 cnt = 0;
3434                 bt_dev_err(hdev, "unknown link type %d", conn->type);
3435         }
3436
3437         q = cnt / num;
3438         *quote = q ? q : 1;
3439 }
3440
3441 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3442                                      int *quote)
3443 {
3444         struct hci_conn_hash *h = &hdev->conn_hash;
3445         struct hci_conn *conn = NULL, *c;
3446         unsigned int num = 0, min = ~0;
3447
3448         /* We don't have to lock device here. Connections are always
3449          * added and removed with TX task disabled. */
3450
3451         rcu_read_lock();
3452
3453         list_for_each_entry_rcu(c, &h->list, list) {
3454                 if (c->type != type || skb_queue_empty(&c->data_q))
3455                         continue;
3456
3457                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3458                         continue;
3459
3460                 num++;
3461
3462                 if (c->sent < min) {
3463                         min  = c->sent;
3464                         conn = c;
3465                 }
3466
3467                 if (hci_conn_num(hdev, type) == num)
3468                         break;
3469         }
3470
3471         rcu_read_unlock();
3472
3473         hci_quote_sent(conn, num, quote);
3474
3475         BT_DBG("conn %p quote %d", conn, *quote);
3476         return conn;
3477 }
3478
3479 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3480 {
3481         struct hci_conn_hash *h = &hdev->conn_hash;
3482         struct hci_conn *c;
3483
3484         bt_dev_err(hdev, "link tx timeout");
3485
3486         rcu_read_lock();
3487
3488         /* Kill stalled connections */
3489         list_for_each_entry_rcu(c, &h->list, list) {
3490                 if (c->type == type && c->sent) {
3491                         bt_dev_err(hdev, "killing stalled connection %pMR",
3492                                    &c->dst);
3493                         /* hci_disconnect might sleep, so, we have to release
3494                          * the RCU read lock before calling it.
3495                          */
3496                         rcu_read_unlock();
3497                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3498                         rcu_read_lock();
3499                 }
3500         }
3501
3502         rcu_read_unlock();
3503 }
3504
3505 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3506                                       int *quote)
3507 {
3508         struct hci_conn_hash *h = &hdev->conn_hash;
3509         struct hci_chan *chan = NULL;
3510         unsigned int num = 0, min = ~0, cur_prio = 0;
3511         struct hci_conn *conn;
3512         int conn_num = 0;
3513
3514         BT_DBG("%s", hdev->name);
3515
3516         rcu_read_lock();
3517
3518         list_for_each_entry_rcu(conn, &h->list, list) {
3519                 struct hci_chan *tmp;
3520
3521                 if (conn->type != type)
3522                         continue;
3523
3524                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3525                         continue;
3526
3527                 conn_num++;
3528
3529                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3530                         struct sk_buff *skb;
3531
3532                         if (skb_queue_empty(&tmp->data_q))
3533                                 continue;
3534
3535                         skb = skb_peek(&tmp->data_q);
3536                         if (skb->priority < cur_prio)
3537                                 continue;
3538
3539                         if (skb->priority > cur_prio) {
3540                                 num = 0;
3541                                 min = ~0;
3542                                 cur_prio = skb->priority;
3543                         }
3544
3545                         num++;
3546
3547                         if (conn->sent < min) {
3548                                 min  = conn->sent;
3549                                 chan = tmp;
3550                         }
3551                 }
3552
3553                 if (hci_conn_num(hdev, type) == conn_num)
3554                         break;
3555         }
3556
3557         rcu_read_unlock();
3558
3559         if (!chan)
3560                 return NULL;
3561
3562         hci_quote_sent(chan->conn, num, quote);
3563
3564         BT_DBG("chan %p quote %d", chan, *quote);
3565         return chan;
3566 }
3567
3568 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3569 {
3570         struct hci_conn_hash *h = &hdev->conn_hash;
3571         struct hci_conn *conn;
3572         int num = 0;
3573
3574         BT_DBG("%s", hdev->name);
3575
3576         rcu_read_lock();
3577
3578         list_for_each_entry_rcu(conn, &h->list, list) {
3579                 struct hci_chan *chan;
3580
3581                 if (conn->type != type)
3582                         continue;
3583
3584                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3585                         continue;
3586
3587                 num++;
3588
3589                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3590                         struct sk_buff *skb;
3591
3592                         if (chan->sent) {
3593                                 chan->sent = 0;
3594                                 continue;
3595                         }
3596
3597                         if (skb_queue_empty(&chan->data_q))
3598                                 continue;
3599
3600                         skb = skb_peek(&chan->data_q);
3601                         if (skb->priority >= HCI_PRIO_MAX - 1)
3602                                 continue;
3603
3604                         skb->priority = HCI_PRIO_MAX - 1;
3605
3606                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3607                                skb->priority);
3608                 }
3609
3610                 if (hci_conn_num(hdev, type) == num)
3611                         break;
3612         }
3613
3614         rcu_read_unlock();
3615
3616 }
3617
3618 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3619 {
3620         /* Calculate count of blocks used by this packet */
3621         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3622 }
3623
3624 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3625 {
3626         unsigned long last_tx;
3627
3628         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3629                 return;
3630
3631         switch (type) {
3632         case LE_LINK:
3633                 last_tx = hdev->le_last_tx;
3634                 break;
3635         default:
3636                 last_tx = hdev->acl_last_tx;
3637                 break;
3638         }
3639
3640         /* tx timeout must be longer than maximum link supervision timeout
3641          * (40.9 seconds)
3642          */
3643         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3644                 hci_link_tx_to(hdev, type);
3645 }
3646
3647 /* Schedule SCO */
3648 static void hci_sched_sco(struct hci_dev *hdev)
3649 {
3650         struct hci_conn *conn;
3651         struct sk_buff *skb;
3652         int quote;
3653
3654         BT_DBG("%s", hdev->name);
3655
3656         if (!hci_conn_num(hdev, SCO_LINK))
3657                 return;
3658
3659         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3660                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3661                         BT_DBG("skb %p len %d", skb, skb->len);
3662                         hci_send_frame(hdev, skb);
3663
3664                         conn->sent++;
3665                         if (conn->sent == ~0)
3666                                 conn->sent = 0;
3667                 }
3668         }
3669 }
3670
3671 static void hci_sched_esco(struct hci_dev *hdev)
3672 {
3673         struct hci_conn *conn;
3674         struct sk_buff *skb;
3675         int quote;
3676
3677         BT_DBG("%s", hdev->name);
3678
3679         if (!hci_conn_num(hdev, ESCO_LINK))
3680                 return;
3681
3682         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3683                                                      &quote))) {
3684                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3685                         BT_DBG("skb %p len %d", skb, skb->len);
3686                         hci_send_frame(hdev, skb);
3687
3688                         conn->sent++;
3689                         if (conn->sent == ~0)
3690                                 conn->sent = 0;
3691                 }
3692         }
3693 }
3694
3695 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3696 {
3697         unsigned int cnt = hdev->acl_cnt;
3698         struct hci_chan *chan;
3699         struct sk_buff *skb;
3700         int quote;
3701
3702         __check_timeout(hdev, cnt, ACL_LINK);
3703
3704         while (hdev->acl_cnt &&
3705                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3706                 u32 priority = (skb_peek(&chan->data_q))->priority;
3707                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3708                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3709                                skb->len, skb->priority);
3710
3711                         /* Stop if priority has changed */
3712                         if (skb->priority < priority)
3713                                 break;
3714
3715                         skb = skb_dequeue(&chan->data_q);
3716
3717                         hci_conn_enter_active_mode(chan->conn,
3718                                                    bt_cb(skb)->force_active);
3719
3720                         hci_send_frame(hdev, skb);
3721                         hdev->acl_last_tx = jiffies;
3722
3723                         hdev->acl_cnt--;
3724                         chan->sent++;
3725                         chan->conn->sent++;
3726
3727                         /* Send pending SCO packets right away */
3728                         hci_sched_sco(hdev);
3729                         hci_sched_esco(hdev);
3730                 }
3731         }
3732
3733         if (cnt != hdev->acl_cnt)
3734                 hci_prio_recalculate(hdev, ACL_LINK);
3735 }
3736
3737 static void hci_sched_acl_blk(struct hci_dev *hdev)
3738 {
3739         unsigned int cnt = hdev->block_cnt;
3740         struct hci_chan *chan;
3741         struct sk_buff *skb;
3742         int quote;
3743         u8 type;
3744
3745         BT_DBG("%s", hdev->name);
3746
3747         if (hdev->dev_type == HCI_AMP)
3748                 type = AMP_LINK;
3749         else
3750                 type = ACL_LINK;
3751
3752         __check_timeout(hdev, cnt, type);
3753
3754         while (hdev->block_cnt > 0 &&
3755                (chan = hci_chan_sent(hdev, type, &quote))) {
3756                 u32 priority = (skb_peek(&chan->data_q))->priority;
3757                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3758                         int blocks;
3759
3760                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3761                                skb->len, skb->priority);
3762
3763                         /* Stop if priority has changed */
3764                         if (skb->priority < priority)
3765                                 break;
3766
3767                         skb = skb_dequeue(&chan->data_q);
3768
3769                         blocks = __get_blocks(hdev, skb);
3770                         if (blocks > hdev->block_cnt)
3771                                 return;
3772
3773                         hci_conn_enter_active_mode(chan->conn,
3774                                                    bt_cb(skb)->force_active);
3775
3776                         hci_send_frame(hdev, skb);
3777                         hdev->acl_last_tx = jiffies;
3778
3779                         hdev->block_cnt -= blocks;
3780                         quote -= blocks;
3781
3782                         chan->sent += blocks;
3783                         chan->conn->sent += blocks;
3784                 }
3785         }
3786
3787         if (cnt != hdev->block_cnt)
3788                 hci_prio_recalculate(hdev, type);
3789 }
3790
3791 static void hci_sched_acl(struct hci_dev *hdev)
3792 {
3793         BT_DBG("%s", hdev->name);
3794
3795         /* No ACL link over BR/EDR controller */
3796         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3797                 return;
3798
3799         /* No AMP link over AMP controller */
3800         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3801                 return;
3802
3803         switch (hdev->flow_ctl_mode) {
3804         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3805                 hci_sched_acl_pkt(hdev);
3806                 break;
3807
3808         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3809                 hci_sched_acl_blk(hdev);
3810                 break;
3811         }
3812 }
3813
3814 static void hci_sched_le(struct hci_dev *hdev)
3815 {
3816         struct hci_chan *chan;
3817         struct sk_buff *skb;
3818         int quote, cnt, tmp;
3819
3820         BT_DBG("%s", hdev->name);
3821
3822         if (!hci_conn_num(hdev, LE_LINK))
3823                 return;
3824
3825         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3826
3827         __check_timeout(hdev, cnt, LE_LINK);
3828
3829         tmp = cnt;
3830         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3831                 u32 priority = (skb_peek(&chan->data_q))->priority;
3832                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3833                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3834                                skb->len, skb->priority);
3835
3836                         /* Stop if priority has changed */
3837                         if (skb->priority < priority)
3838                                 break;
3839
3840                         skb = skb_dequeue(&chan->data_q);
3841
3842                         hci_send_frame(hdev, skb);
3843                         hdev->le_last_tx = jiffies;
3844
3845                         cnt--;
3846                         chan->sent++;
3847                         chan->conn->sent++;
3848
3849                         /* Send pending SCO packets right away */
3850                         hci_sched_sco(hdev);
3851                         hci_sched_esco(hdev);
3852                 }
3853         }
3854
3855         if (hdev->le_pkts)
3856                 hdev->le_cnt = cnt;
3857         else
3858                 hdev->acl_cnt = cnt;
3859
3860         if (cnt != tmp)
3861                 hci_prio_recalculate(hdev, LE_LINK);
3862 }
3863
3864 /* Schedule CIS */
3865 static void hci_sched_iso(struct hci_dev *hdev)
3866 {
3867         struct hci_conn *conn;
3868         struct sk_buff *skb;
3869         int quote, *cnt;
3870
3871         BT_DBG("%s", hdev->name);
3872
3873         if (!hci_conn_num(hdev, ISO_LINK))
3874                 return;
3875
3876         cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3877                 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3878         while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3879                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3880                         BT_DBG("skb %p len %d", skb, skb->len);
3881                         hci_send_frame(hdev, skb);
3882
3883                         conn->sent++;
3884                         if (conn->sent == ~0)
3885                                 conn->sent = 0;
3886                         (*cnt)--;
3887                 }
3888         }
3889 }
3890
3891 static void hci_tx_work(struct work_struct *work)
3892 {
3893         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3894         struct sk_buff *skb;
3895
3896         BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3897                hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3898
3899         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3900                 /* Schedule queues and send stuff to HCI driver */
3901                 hci_sched_sco(hdev);
3902                 hci_sched_esco(hdev);
3903                 hci_sched_iso(hdev);
3904                 hci_sched_acl(hdev);
3905                 hci_sched_le(hdev);
3906         }
3907
3908         /* Send next queued raw (unknown type) packet */
3909         while ((skb = skb_dequeue(&hdev->raw_q)))
3910                 hci_send_frame(hdev, skb);
3911 }
3912
3913 /* ----- HCI RX task (incoming data processing) ----- */
3914
3915 /* ACL data packet */
3916 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3917 {
3918         struct hci_acl_hdr *hdr = (void *) skb->data;
3919         struct hci_conn *conn;
3920         __u16 handle, flags;
3921
3922         skb_pull(skb, HCI_ACL_HDR_SIZE);
3923
3924         handle = __le16_to_cpu(hdr->handle);
3925         flags  = hci_flags(handle);
3926         handle = hci_handle(handle);
3927
3928         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3929                handle, flags);
3930
3931         hdev->stat.acl_rx++;
3932
3933         hci_dev_lock(hdev);
3934         conn = hci_conn_hash_lookup_handle(hdev, handle);
3935         hci_dev_unlock(hdev);
3936
3937         if (conn) {
3938                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3939
3940                 /* Send to upper protocol */
3941                 l2cap_recv_acldata(conn, skb, flags);
3942                 return;
3943         } else {
3944                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3945                            handle);
3946         }
3947
3948         kfree_skb(skb);
3949 }
3950
3951 /* SCO data packet */
3952 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3953 {
3954         struct hci_sco_hdr *hdr = (void *) skb->data;
3955         struct hci_conn *conn;
3956         __u16 handle, flags;
3957
3958         skb_pull(skb, HCI_SCO_HDR_SIZE);
3959
3960         handle = __le16_to_cpu(hdr->handle);
3961         flags  = hci_flags(handle);
3962         handle = hci_handle(handle);
3963
3964         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3965                handle, flags);
3966
3967         hdev->stat.sco_rx++;
3968
3969         hci_dev_lock(hdev);
3970         conn = hci_conn_hash_lookup_handle(hdev, handle);
3971         hci_dev_unlock(hdev);
3972
3973         if (conn) {
3974                 /* Send to upper protocol */
3975                 hci_skb_pkt_status(skb) = flags & 0x03;
3976                 sco_recv_scodata(conn, skb);
3977                 return;
3978         } else {
3979                 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3980                                        handle);
3981         }
3982
3983         kfree_skb(skb);
3984 }
3985
3986 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3987 {
3988         struct hci_iso_hdr *hdr;
3989         struct hci_conn *conn;
3990         __u16 handle, flags;
3991
3992         hdr = skb_pull_data(skb, sizeof(*hdr));
3993         if (!hdr) {
3994                 bt_dev_err(hdev, "ISO packet too small");
3995                 goto drop;
3996         }
3997
3998         handle = __le16_to_cpu(hdr->handle);
3999         flags  = hci_flags(handle);
4000         handle = hci_handle(handle);
4001
4002         bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
4003                    handle, flags);
4004
4005         hci_dev_lock(hdev);
4006         conn = hci_conn_hash_lookup_handle(hdev, handle);
4007         hci_dev_unlock(hdev);
4008
4009         if (!conn) {
4010                 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
4011                            handle);
4012                 goto drop;
4013         }
4014
4015         /* Send to upper protocol */
4016         iso_recv(conn, skb, flags);
4017         return;
4018
4019 drop:
4020         kfree_skb(skb);
4021 }
4022
4023 static bool hci_req_is_complete(struct hci_dev *hdev)
4024 {
4025         struct sk_buff *skb;
4026
4027         skb = skb_peek(&hdev->cmd_q);
4028         if (!skb)
4029                 return true;
4030
4031         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4032 }
4033
4034 static void hci_resend_last(struct hci_dev *hdev)
4035 {
4036         struct hci_command_hdr *sent;
4037         struct sk_buff *skb;
4038         u16 opcode;
4039
4040         if (!hdev->sent_cmd)
4041                 return;
4042
4043         sent = (void *) hdev->sent_cmd->data;
4044         opcode = __le16_to_cpu(sent->opcode);
4045         if (opcode == HCI_OP_RESET)
4046                 return;
4047
4048         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4049         if (!skb)
4050                 return;
4051
4052         skb_queue_head(&hdev->cmd_q, skb);
4053         queue_work(hdev->workqueue, &hdev->cmd_work);
4054 }
4055
4056 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4057                           hci_req_complete_t *req_complete,
4058                           hci_req_complete_skb_t *req_complete_skb)
4059 {
4060         struct sk_buff *skb;
4061         unsigned long flags;
4062
4063         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4064
4065         /* If the completed command doesn't match the last one that was
4066          * sent we need to do special handling of it.
4067          */
4068         if (!hci_sent_cmd_data(hdev, opcode)) {
4069                 /* Some CSR based controllers generate a spontaneous
4070                  * reset complete event during init and any pending
4071                  * command will never be completed. In such a case we
4072                  * need to resend whatever was the last sent
4073                  * command.
4074                  */
4075                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4076                         hci_resend_last(hdev);
4077
4078                 return;
4079         }
4080
4081         /* If we reach this point this event matches the last command sent */
4082         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4083
4084         /* If the command succeeded and there's still more commands in
4085          * this request the request is not yet complete.
4086          */
4087         if (!status && !hci_req_is_complete(hdev))
4088                 return;
4089
4090         /* If this was the last command in a request the complete
4091          * callback would be found in hdev->sent_cmd instead of the
4092          * command queue (hdev->cmd_q).
4093          */
4094         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4095                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4096                 return;
4097         }
4098
4099         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4100                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4101                 return;
4102         }
4103
4104         /* Remove all pending commands belonging to this request */
4105         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4106         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4107                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4108                         __skb_queue_head(&hdev->cmd_q, skb);
4109                         break;
4110                 }
4111
4112                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4113                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4114                 else
4115                         *req_complete = bt_cb(skb)->hci.req_complete;
4116                 dev_kfree_skb_irq(skb);
4117         }
4118         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4119 }
4120
4121 static void hci_rx_work(struct work_struct *work)
4122 {
4123         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4124         struct sk_buff *skb;
4125
4126         BT_DBG("%s", hdev->name);
4127
4128         /* The kcov_remote functions used for collecting packet parsing
4129          * coverage information from this background thread and associate
4130          * the coverage with the syscall's thread which originally injected
4131          * the packet. This helps fuzzing the kernel.
4132          */
4133         for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4134                 kcov_remote_start_common(skb_get_kcov_handle(skb));
4135
4136                 /* Send copy to monitor */
4137                 hci_send_to_monitor(hdev, skb);
4138
4139                 if (atomic_read(&hdev->promisc)) {
4140                         /* Send copy to the sockets */
4141                         hci_send_to_sock(hdev, skb);
4142                 }
4143
4144                 /* If the device has been opened in HCI_USER_CHANNEL,
4145                  * the userspace has exclusive access to device.
4146                  * When device is HCI_INIT, we still need to process
4147                  * the data packets to the driver in order
4148                  * to complete its setup().
4149                  */
4150                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4151                     !test_bit(HCI_INIT, &hdev->flags)) {
4152                         kfree_skb(skb);
4153                         continue;
4154                 }
4155
4156                 if (test_bit(HCI_INIT, &hdev->flags)) {
4157                         /* Don't process data packets in this states. */
4158                         switch (hci_skb_pkt_type(skb)) {
4159                         case HCI_ACLDATA_PKT:
4160                         case HCI_SCODATA_PKT:
4161                         case HCI_ISODATA_PKT:
4162                                 kfree_skb(skb);
4163                                 continue;
4164                         }
4165                 }
4166
4167                 /* Process frame */
4168                 switch (hci_skb_pkt_type(skb)) {
4169                 case HCI_EVENT_PKT:
4170                         BT_DBG("%s Event packet", hdev->name);
4171                         hci_event_packet(hdev, skb);
4172                         break;
4173
4174                 case HCI_ACLDATA_PKT:
4175                         BT_DBG("%s ACL data packet", hdev->name);
4176                         hci_acldata_packet(hdev, skb);
4177                         break;
4178
4179                 case HCI_SCODATA_PKT:
4180                         BT_DBG("%s SCO data packet", hdev->name);
4181                         hci_scodata_packet(hdev, skb);
4182                         break;
4183
4184                 case HCI_ISODATA_PKT:
4185                         BT_DBG("%s ISO data packet", hdev->name);
4186                         hci_isodata_packet(hdev, skb);
4187                         break;
4188
4189                 default:
4190                         kfree_skb(skb);
4191                         break;
4192                 }
4193         }
4194 }
4195
4196 static void hci_cmd_work(struct work_struct *work)
4197 {
4198         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4199         struct sk_buff *skb;
4200
4201         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4202                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4203
4204         /* Send queued commands */
4205         if (atomic_read(&hdev->cmd_cnt)) {
4206                 skb = skb_dequeue(&hdev->cmd_q);
4207                 if (!skb)
4208                         return;
4209
4210                 kfree_skb(hdev->sent_cmd);
4211
4212                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4213                 if (hdev->sent_cmd) {
4214                         int res;
4215                         if (hci_req_status_pend(hdev))
4216                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4217                         atomic_dec(&hdev->cmd_cnt);
4218
4219                         res = hci_send_frame(hdev, skb);
4220                         if (res < 0)
4221                                 __hci_cmd_sync_cancel(hdev, -res);
4222
4223                         rcu_read_lock();
4224                         if (test_bit(HCI_RESET, &hdev->flags) ||
4225                             hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4226                                 cancel_delayed_work(&hdev->cmd_timer);
4227                         else
4228                                 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4229                                                    HCI_CMD_TIMEOUT);
4230                         rcu_read_unlock();
4231                 } else {
4232                         skb_queue_head(&hdev->cmd_q, skb);
4233                         queue_work(hdev->workqueue, &hdev->cmd_work);
4234                 }
4235         }
4236 }