Bluetooth: Fix init sequence for some CSR based controllers
[platform/adaptation/renesas_rcar/renesas_kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 static void hci_rx_work(struct work_struct *work);
58 static void hci_cmd_work(struct work_struct *work);
59 static void hci_tx_work(struct work_struct *work);
60
61 /* HCI device list */
62 LIST_HEAD(hci_dev_list);
63 DEFINE_RWLOCK(hci_dev_list_lock);
64
65 /* HCI callback list */
66 LIST_HEAD(hci_cb_list);
67 DEFINE_RWLOCK(hci_cb_list_lock);
68
69 /* ---- HCI notifications ---- */
70
71 static void hci_notify(struct hci_dev *hdev, int event)
72 {
73         hci_sock_dev_event(hdev, event);
74 }
75
76 /* ---- HCI requests ---- */
77
78 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
79 {
80         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
82         /* If this is the init phase check if the completed command matches
83          * the last init command, and if not just return.
84          */
85         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
86                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
87                 struct sk_buff *skb;
88
89                 /* Some CSR based controllers generate a spontaneous
90                  * reset complete event during init and any pending
91                  * command will never be completed. In such a case we
92                  * need to resend whatever was the last sent
93                  * command.
94                  */
95
96                 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
97                         return;
98
99                 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100                 if (skb) {
101                         skb_queue_head(&hdev->cmd_q, skb);
102                         queue_work(hdev->workqueue, &hdev->cmd_work);
103                 }
104
105                 return;
106         }
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = result;
110                 hdev->req_status = HCI_REQ_DONE;
111                 wake_up_interruptible(&hdev->req_wait_q);
112         }
113 }
114
115 static void hci_req_cancel(struct hci_dev *hdev, int err)
116 {
117         BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119         if (hdev->req_status == HCI_REQ_PEND) {
120                 hdev->req_result = err;
121                 hdev->req_status = HCI_REQ_CANCELED;
122                 wake_up_interruptible(&hdev->req_wait_q);
123         }
124 }
125
126 /* Execute request and wait for completion. */
127 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
128                                         unsigned long opt, __u32 timeout)
129 {
130         DECLARE_WAITQUEUE(wait, current);
131         int err = 0;
132
133         BT_DBG("%s start", hdev->name);
134
135         hdev->req_status = HCI_REQ_PEND;
136
137         add_wait_queue(&hdev->req_wait_q, &wait);
138         set_current_state(TASK_INTERRUPTIBLE);
139
140         req(hdev, opt);
141         schedule_timeout(timeout);
142
143         remove_wait_queue(&hdev->req_wait_q, &wait);
144
145         if (signal_pending(current))
146                 return -EINTR;
147
148         switch (hdev->req_status) {
149         case HCI_REQ_DONE:
150                 err = -bt_to_errno(hdev->req_result);
151                 break;
152
153         case HCI_REQ_CANCELED:
154                 err = -hdev->req_result;
155                 break;
156
157         default:
158                 err = -ETIMEDOUT;
159                 break;
160         }
161
162         hdev->req_status = hdev->req_result = 0;
163
164         BT_DBG("%s end: err %d", hdev->name, err);
165
166         return err;
167 }
168
169 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
170                                         unsigned long opt, __u32 timeout)
171 {
172         int ret;
173
174         if (!test_bit(HCI_UP, &hdev->flags))
175                 return -ENETDOWN;
176
177         /* Serialize all requests */
178         hci_req_lock(hdev);
179         ret = __hci_request(hdev, req, opt, timeout);
180         hci_req_unlock(hdev);
181
182         return ret;
183 }
184
185 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186 {
187         BT_DBG("%s %ld", hdev->name, opt);
188
189         /* Reset device */
190         set_bit(HCI_RESET, &hdev->flags);
191         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
192 }
193
194 static void bredr_init(struct hci_dev *hdev)
195 {
196         struct hci_cp_delete_stored_link_key cp;
197         __le16 param;
198         __u8 flt_type;
199
200         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202         /* Mandatory initialization */
203
204         /* Reset */
205         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
206                 set_bit(HCI_RESET, &hdev->flags);
207                 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208         }
209
210         /* Read Local Supported Features */
211         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212
213         /* Read Local Version */
214         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215
216         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
217         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
218
219         /* Read BD Address */
220         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222         /* Read Class of Device */
223         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225         /* Read Local Name */
226         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
227
228         /* Read Voice Setting */
229         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
230
231         /* Optional initialization */
232
233         /* Clear Event Filters */
234         flt_type = HCI_FLT_CLEAR_ALL;
235         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
236
237         /* Connection accept timeout ~20 secs */
238         param = cpu_to_le16(0x7d00);
239         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
240
241         bacpy(&cp.bdaddr, BDADDR_ANY);
242         cp.delete_all = 1;
243         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
244 }
245
246 static void amp_init(struct hci_dev *hdev)
247 {
248         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
250         /* Reset */
251         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253         /* Read Local Version */
254         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255 }
256
257 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258 {
259         struct sk_buff *skb;
260
261         BT_DBG("%s %ld", hdev->name, opt);
262
263         /* Driver initialization */
264
265         /* Special commands */
266         while ((skb = skb_dequeue(&hdev->driver_init))) {
267                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268                 skb->dev = (void *) hdev;
269
270                 skb_queue_tail(&hdev->cmd_q, skb);
271                 queue_work(hdev->workqueue, &hdev->cmd_work);
272         }
273         skb_queue_purge(&hdev->driver_init);
274
275         switch (hdev->dev_type) {
276         case HCI_BREDR:
277                 bredr_init(hdev);
278                 break;
279
280         case HCI_AMP:
281                 amp_init(hdev);
282                 break;
283
284         default:
285                 BT_ERR("Unknown device type %d", hdev->dev_type);
286                 break;
287         }
288
289 }
290
291 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292 {
293         BT_DBG("%s", hdev->name);
294
295         /* Read LE buffer size */
296         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
297 }
298
299 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300 {
301         __u8 scan = opt;
302
303         BT_DBG("%s %x", hdev->name, scan);
304
305         /* Inquiry and Page scans */
306         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
307 }
308
309 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310 {
311         __u8 auth = opt;
312
313         BT_DBG("%s %x", hdev->name, auth);
314
315         /* Authentication */
316         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
317 }
318
319 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320 {
321         __u8 encrypt = opt;
322
323         BT_DBG("%s %x", hdev->name, encrypt);
324
325         /* Encryption */
326         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
327 }
328
329 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330 {
331         __le16 policy = cpu_to_le16(opt);
332
333         BT_DBG("%s %x", hdev->name, policy);
334
335         /* Default link policy */
336         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
337 }
338
339 /* Get HCI device by index.
340  * Device is held on return. */
341 struct hci_dev *hci_dev_get(int index)
342 {
343         struct hci_dev *hdev = NULL, *d;
344
345         BT_DBG("%d", index);
346
347         if (index < 0)
348                 return NULL;
349
350         read_lock(&hci_dev_list_lock);
351         list_for_each_entry(d, &hci_dev_list, list) {
352                 if (d->id == index) {
353                         hdev = hci_dev_hold(d);
354                         break;
355                 }
356         }
357         read_unlock(&hci_dev_list_lock);
358         return hdev;
359 }
360
361 /* ---- Inquiry support ---- */
362
363 bool hci_discovery_active(struct hci_dev *hdev)
364 {
365         struct discovery_state *discov = &hdev->discovery;
366
367         switch (discov->state) {
368         case DISCOVERY_FINDING:
369         case DISCOVERY_RESOLVING:
370                 return true;
371
372         default:
373                 return false;
374         }
375 }
376
377 void hci_discovery_set_state(struct hci_dev *hdev, int state)
378 {
379         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380
381         if (hdev->discovery.state == state)
382                 return;
383
384         switch (state) {
385         case DISCOVERY_STOPPED:
386                 if (hdev->discovery.state != DISCOVERY_STARTING)
387                         mgmt_discovering(hdev, 0);
388                 hdev->discovery.type = 0;
389                 break;
390         case DISCOVERY_STARTING:
391                 break;
392         case DISCOVERY_FINDING:
393                 mgmt_discovering(hdev, 1);
394                 break;
395         case DISCOVERY_RESOLVING:
396                 break;
397         case DISCOVERY_STOPPING:
398                 break;
399         }
400
401         hdev->discovery.state = state;
402 }
403
404 static void inquiry_cache_flush(struct hci_dev *hdev)
405 {
406         struct discovery_state *cache = &hdev->discovery;
407         struct inquiry_entry *p, *n;
408
409         list_for_each_entry_safe(p, n, &cache->all, all) {
410                 list_del(&p->all);
411                 kfree(p);
412         }
413
414         INIT_LIST_HEAD(&cache->unknown);
415         INIT_LIST_HEAD(&cache->resolve);
416         cache->state = DISCOVERY_STOPPED;
417 }
418
419 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
420 {
421         struct discovery_state *cache = &hdev->discovery;
422         struct inquiry_entry *e;
423
424         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
425
426         list_for_each_entry(e, &cache->all, all) {
427                 if (!bacmp(&e->data.bdaddr, bdaddr))
428                         return e;
429         }
430
431         return NULL;
432 }
433
434 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
435                                                         bdaddr_t *bdaddr)
436 {
437         struct discovery_state *cache = &hdev->discovery;
438         struct inquiry_entry *e;
439
440         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
441
442         list_for_each_entry(e, &cache->unknown, list) {
443                 if (!bacmp(&e->data.bdaddr, bdaddr))
444                         return e;
445         }
446
447         return NULL;
448 }
449
450 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
451                                                         bdaddr_t *bdaddr,
452                                                         int state)
453 {
454         struct discovery_state *cache = &hdev->discovery;
455         struct inquiry_entry *e;
456
457         BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
458
459         list_for_each_entry(e, &cache->resolve, list) {
460                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
461                         return e;
462                 if (!bacmp(&e->data.bdaddr, bdaddr))
463                         return e;
464         }
465
466         return NULL;
467 }
468
469 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
470                                                 struct inquiry_entry *ie)
471 {
472         struct discovery_state *cache = &hdev->discovery;
473         struct list_head *pos = &cache->resolve;
474         struct inquiry_entry *p;
475
476         list_del(&ie->list);
477
478         list_for_each_entry(p, &cache->resolve, list) {
479                 if (p->name_state != NAME_PENDING &&
480                                 abs(p->data.rssi) >= abs(ie->data.rssi))
481                         break;
482                 pos = &p->list;
483         }
484
485         list_add(&ie->list, pos);
486 }
487
488 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
489                                                 bool name_known, bool *ssp)
490 {
491         struct discovery_state *cache = &hdev->discovery;
492         struct inquiry_entry *ie;
493
494         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
495
496         if (ssp)
497                 *ssp = data->ssp_mode;
498
499         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
500         if (ie) {
501                 if (ie->data.ssp_mode && ssp)
502                         *ssp = true;
503
504                 if (ie->name_state == NAME_NEEDED &&
505                                                 data->rssi != ie->data.rssi) {
506                         ie->data.rssi = data->rssi;
507                         hci_inquiry_cache_update_resolve(hdev, ie);
508                 }
509
510                 goto update;
511         }
512
513         /* Entry not in the cache. Add new one. */
514         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
515         if (!ie)
516                 return false;
517
518         list_add(&ie->all, &cache->all);
519
520         if (name_known) {
521                 ie->name_state = NAME_KNOWN;
522         } else {
523                 ie->name_state = NAME_NOT_KNOWN;
524                 list_add(&ie->list, &cache->unknown);
525         }
526
527 update:
528         if (name_known && ie->name_state != NAME_KNOWN &&
529                                         ie->name_state != NAME_PENDING) {
530                 ie->name_state = NAME_KNOWN;
531                 list_del(&ie->list);
532         }
533
534         memcpy(&ie->data, data, sizeof(*data));
535         ie->timestamp = jiffies;
536         cache->timestamp = jiffies;
537
538         if (ie->name_state == NAME_NOT_KNOWN)
539                 return false;
540
541         return true;
542 }
543
544 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
545 {
546         struct discovery_state *cache = &hdev->discovery;
547         struct inquiry_info *info = (struct inquiry_info *) buf;
548         struct inquiry_entry *e;
549         int copied = 0;
550
551         list_for_each_entry(e, &cache->all, all) {
552                 struct inquiry_data *data = &e->data;
553
554                 if (copied >= num)
555                         break;
556
557                 bacpy(&info->bdaddr, &data->bdaddr);
558                 info->pscan_rep_mode    = data->pscan_rep_mode;
559                 info->pscan_period_mode = data->pscan_period_mode;
560                 info->pscan_mode        = data->pscan_mode;
561                 memcpy(info->dev_class, data->dev_class, 3);
562                 info->clock_offset      = data->clock_offset;
563
564                 info++;
565                 copied++;
566         }
567
568         BT_DBG("cache %p, copied %d", cache, copied);
569         return copied;
570 }
571
572 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
573 {
574         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
575         struct hci_cp_inquiry cp;
576
577         BT_DBG("%s", hdev->name);
578
579         if (test_bit(HCI_INQUIRY, &hdev->flags))
580                 return;
581
582         /* Start Inquiry */
583         memcpy(&cp.lap, &ir->lap, 3);
584         cp.length  = ir->length;
585         cp.num_rsp = ir->num_rsp;
586         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
587 }
588
589 int hci_inquiry(void __user *arg)
590 {
591         __u8 __user *ptr = arg;
592         struct hci_inquiry_req ir;
593         struct hci_dev *hdev;
594         int err = 0, do_inquiry = 0, max_rsp;
595         long timeo;
596         __u8 *buf;
597
598         if (copy_from_user(&ir, ptr, sizeof(ir)))
599                 return -EFAULT;
600
601         hdev = hci_dev_get(ir.dev_id);
602         if (!hdev)
603                 return -ENODEV;
604
605         hci_dev_lock(hdev);
606         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
607                                 inquiry_cache_empty(hdev) ||
608                                 ir.flags & IREQ_CACHE_FLUSH) {
609                 inquiry_cache_flush(hdev);
610                 do_inquiry = 1;
611         }
612         hci_dev_unlock(hdev);
613
614         timeo = ir.length * msecs_to_jiffies(2000);
615
616         if (do_inquiry) {
617                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
618                 if (err < 0)
619                         goto done;
620         }
621
622         /* for unlimited number of responses we will use buffer with 255 entries */
623         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
624
625         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
626          * copy it to the user space.
627          */
628         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
629         if (!buf) {
630                 err = -ENOMEM;
631                 goto done;
632         }
633
634         hci_dev_lock(hdev);
635         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
636         hci_dev_unlock(hdev);
637
638         BT_DBG("num_rsp %d", ir.num_rsp);
639
640         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
641                 ptr += sizeof(ir);
642                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
643                                         ir.num_rsp))
644                         err = -EFAULT;
645         } else
646                 err = -EFAULT;
647
648         kfree(buf);
649
650 done:
651         hci_dev_put(hdev);
652         return err;
653 }
654
655 /* ---- HCI ioctl helpers ---- */
656
657 int hci_dev_open(__u16 dev)
658 {
659         struct hci_dev *hdev;
660         int ret = 0;
661
662         hdev = hci_dev_get(dev);
663         if (!hdev)
664                 return -ENODEV;
665
666         BT_DBG("%s %p", hdev->name, hdev);
667
668         hci_req_lock(hdev);
669
670         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
671                 ret = -ERFKILL;
672                 goto done;
673         }
674
675         if (test_bit(HCI_UP, &hdev->flags)) {
676                 ret = -EALREADY;
677                 goto done;
678         }
679
680         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
681                 set_bit(HCI_RAW, &hdev->flags);
682
683         /* Treat all non BR/EDR controllers as raw devices if
684            enable_hs is not set */
685         if (hdev->dev_type != HCI_BREDR && !enable_hs)
686                 set_bit(HCI_RAW, &hdev->flags);
687
688         if (hdev->open(hdev)) {
689                 ret = -EIO;
690                 goto done;
691         }
692
693         if (!test_bit(HCI_RAW, &hdev->flags)) {
694                 atomic_set(&hdev->cmd_cnt, 1);
695                 set_bit(HCI_INIT, &hdev->flags);
696                 hdev->init_last_cmd = 0;
697
698                 ret = __hci_request(hdev, hci_init_req, 0,
699                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
700
701                 if (lmp_host_le_capable(hdev))
702                         ret = __hci_request(hdev, hci_le_init_req, 0,
703                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
704
705                 clear_bit(HCI_INIT, &hdev->flags);
706         }
707
708         if (!ret) {
709                 hci_dev_hold(hdev);
710                 set_bit(HCI_UP, &hdev->flags);
711                 hci_notify(hdev, HCI_DEV_UP);
712                 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
713                         hci_dev_lock(hdev);
714                         mgmt_powered(hdev, 1);
715                         hci_dev_unlock(hdev);
716                 }
717         } else {
718                 /* Init failed, cleanup */
719                 flush_work(&hdev->tx_work);
720                 flush_work(&hdev->cmd_work);
721                 flush_work(&hdev->rx_work);
722
723                 skb_queue_purge(&hdev->cmd_q);
724                 skb_queue_purge(&hdev->rx_q);
725
726                 if (hdev->flush)
727                         hdev->flush(hdev);
728
729                 if (hdev->sent_cmd) {
730                         kfree_skb(hdev->sent_cmd);
731                         hdev->sent_cmd = NULL;
732                 }
733
734                 hdev->close(hdev);
735                 hdev->flags = 0;
736         }
737
738 done:
739         hci_req_unlock(hdev);
740         hci_dev_put(hdev);
741         return ret;
742 }
743
744 static int hci_dev_do_close(struct hci_dev *hdev)
745 {
746         BT_DBG("%s %p", hdev->name, hdev);
747
748         cancel_work_sync(&hdev->le_scan);
749
750         hci_req_cancel(hdev, ENODEV);
751         hci_req_lock(hdev);
752
753         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
754                 del_timer_sync(&hdev->cmd_timer);
755                 hci_req_unlock(hdev);
756                 return 0;
757         }
758
759         /* Flush RX and TX works */
760         flush_work(&hdev->tx_work);
761         flush_work(&hdev->rx_work);
762
763         if (hdev->discov_timeout > 0) {
764                 cancel_delayed_work(&hdev->discov_off);
765                 hdev->discov_timeout = 0;
766                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
767         }
768
769         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
770                 cancel_delayed_work(&hdev->service_cache);
771
772         cancel_delayed_work_sync(&hdev->le_scan_disable);
773
774         hci_dev_lock(hdev);
775         inquiry_cache_flush(hdev);
776         hci_conn_hash_flush(hdev);
777         hci_dev_unlock(hdev);
778
779         hci_notify(hdev, HCI_DEV_DOWN);
780
781         if (hdev->flush)
782                 hdev->flush(hdev);
783
784         /* Reset device */
785         skb_queue_purge(&hdev->cmd_q);
786         atomic_set(&hdev->cmd_cnt, 1);
787         if (!test_bit(HCI_RAW, &hdev->flags) &&
788                                 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
789                 set_bit(HCI_INIT, &hdev->flags);
790                 __hci_request(hdev, hci_reset_req, 0,
791                                         msecs_to_jiffies(250));
792                 clear_bit(HCI_INIT, &hdev->flags);
793         }
794
795         /* flush cmd  work */
796         flush_work(&hdev->cmd_work);
797
798         /* Drop queues */
799         skb_queue_purge(&hdev->rx_q);
800         skb_queue_purge(&hdev->cmd_q);
801         skb_queue_purge(&hdev->raw_q);
802
803         /* Drop last sent command */
804         if (hdev->sent_cmd) {
805                 del_timer_sync(&hdev->cmd_timer);
806                 kfree_skb(hdev->sent_cmd);
807                 hdev->sent_cmd = NULL;
808         }
809
810         /* After this point our queues are empty
811          * and no tasks are scheduled. */
812         hdev->close(hdev);
813
814         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
815                 hci_dev_lock(hdev);
816                 mgmt_powered(hdev, 0);
817                 hci_dev_unlock(hdev);
818         }
819
820         /* Clear flags */
821         hdev->flags = 0;
822
823         memset(hdev->eir, 0, sizeof(hdev->eir));
824         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
825
826         hci_req_unlock(hdev);
827
828         hci_dev_put(hdev);
829         return 0;
830 }
831
832 int hci_dev_close(__u16 dev)
833 {
834         struct hci_dev *hdev;
835         int err;
836
837         hdev = hci_dev_get(dev);
838         if (!hdev)
839                 return -ENODEV;
840
841         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
842                 cancel_delayed_work(&hdev->power_off);
843
844         err = hci_dev_do_close(hdev);
845
846         hci_dev_put(hdev);
847         return err;
848 }
849
850 int hci_dev_reset(__u16 dev)
851 {
852         struct hci_dev *hdev;
853         int ret = 0;
854
855         hdev = hci_dev_get(dev);
856         if (!hdev)
857                 return -ENODEV;
858
859         hci_req_lock(hdev);
860
861         if (!test_bit(HCI_UP, &hdev->flags))
862                 goto done;
863
864         /* Drop queues */
865         skb_queue_purge(&hdev->rx_q);
866         skb_queue_purge(&hdev->cmd_q);
867
868         hci_dev_lock(hdev);
869         inquiry_cache_flush(hdev);
870         hci_conn_hash_flush(hdev);
871         hci_dev_unlock(hdev);
872
873         if (hdev->flush)
874                 hdev->flush(hdev);
875
876         atomic_set(&hdev->cmd_cnt, 1);
877         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
878
879         if (!test_bit(HCI_RAW, &hdev->flags))
880                 ret = __hci_request(hdev, hci_reset_req, 0,
881                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
882
883 done:
884         hci_req_unlock(hdev);
885         hci_dev_put(hdev);
886         return ret;
887 }
888
889 int hci_dev_reset_stat(__u16 dev)
890 {
891         struct hci_dev *hdev;
892         int ret = 0;
893
894         hdev = hci_dev_get(dev);
895         if (!hdev)
896                 return -ENODEV;
897
898         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
899
900         hci_dev_put(hdev);
901
902         return ret;
903 }
904
905 int hci_dev_cmd(unsigned int cmd, void __user *arg)
906 {
907         struct hci_dev *hdev;
908         struct hci_dev_req dr;
909         int err = 0;
910
911         if (copy_from_user(&dr, arg, sizeof(dr)))
912                 return -EFAULT;
913
914         hdev = hci_dev_get(dr.dev_id);
915         if (!hdev)
916                 return -ENODEV;
917
918         switch (cmd) {
919         case HCISETAUTH:
920                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
921                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
922                 break;
923
924         case HCISETENCRYPT:
925                 if (!lmp_encrypt_capable(hdev)) {
926                         err = -EOPNOTSUPP;
927                         break;
928                 }
929
930                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
931                         /* Auth must be enabled first */
932                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
933                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
934                         if (err)
935                                 break;
936                 }
937
938                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
939                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
940                 break;
941
942         case HCISETSCAN:
943                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
944                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
945                 break;
946
947         case HCISETLINKPOL:
948                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
949                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
950                 break;
951
952         case HCISETLINKMODE:
953                 hdev->link_mode = ((__u16) dr.dev_opt) &
954                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
955                 break;
956
957         case HCISETPTYPE:
958                 hdev->pkt_type = (__u16) dr.dev_opt;
959                 break;
960
961         case HCISETACLMTU:
962                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
963                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
964                 break;
965
966         case HCISETSCOMTU:
967                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
968                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
969                 break;
970
971         default:
972                 err = -EINVAL;
973                 break;
974         }
975
976         hci_dev_put(hdev);
977         return err;
978 }
979
980 int hci_get_dev_list(void __user *arg)
981 {
982         struct hci_dev *hdev;
983         struct hci_dev_list_req *dl;
984         struct hci_dev_req *dr;
985         int n = 0, size, err;
986         __u16 dev_num;
987
988         if (get_user(dev_num, (__u16 __user *) arg))
989                 return -EFAULT;
990
991         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
992                 return -EINVAL;
993
994         size = sizeof(*dl) + dev_num * sizeof(*dr);
995
996         dl = kzalloc(size, GFP_KERNEL);
997         if (!dl)
998                 return -ENOMEM;
999
1000         dr = dl->dev_req;
1001
1002         read_lock(&hci_dev_list_lock);
1003         list_for_each_entry(hdev, &hci_dev_list, list) {
1004                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1005                         cancel_delayed_work(&hdev->power_off);
1006
1007                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1008                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1009
1010                 (dr + n)->dev_id  = hdev->id;
1011                 (dr + n)->dev_opt = hdev->flags;
1012
1013                 if (++n >= dev_num)
1014                         break;
1015         }
1016         read_unlock(&hci_dev_list_lock);
1017
1018         dl->dev_num = n;
1019         size = sizeof(*dl) + n * sizeof(*dr);
1020
1021         err = copy_to_user(arg, dl, size);
1022         kfree(dl);
1023
1024         return err ? -EFAULT : 0;
1025 }
1026
1027 int hci_get_dev_info(void __user *arg)
1028 {
1029         struct hci_dev *hdev;
1030         struct hci_dev_info di;
1031         int err = 0;
1032
1033         if (copy_from_user(&di, arg, sizeof(di)))
1034                 return -EFAULT;
1035
1036         hdev = hci_dev_get(di.dev_id);
1037         if (!hdev)
1038                 return -ENODEV;
1039
1040         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1041                 cancel_delayed_work_sync(&hdev->power_off);
1042
1043         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1044                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1045
1046         strcpy(di.name, hdev->name);
1047         di.bdaddr   = hdev->bdaddr;
1048         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1049         di.flags    = hdev->flags;
1050         di.pkt_type = hdev->pkt_type;
1051         di.acl_mtu  = hdev->acl_mtu;
1052         di.acl_pkts = hdev->acl_pkts;
1053         di.sco_mtu  = hdev->sco_mtu;
1054         di.sco_pkts = hdev->sco_pkts;
1055         di.link_policy = hdev->link_policy;
1056         di.link_mode   = hdev->link_mode;
1057
1058         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1059         memcpy(&di.features, &hdev->features, sizeof(di.features));
1060
1061         if (copy_to_user(arg, &di, sizeof(di)))
1062                 err = -EFAULT;
1063
1064         hci_dev_put(hdev);
1065
1066         return err;
1067 }
1068
1069 /* ---- Interface to HCI drivers ---- */
1070
1071 static int hci_rfkill_set_block(void *data, bool blocked)
1072 {
1073         struct hci_dev *hdev = data;
1074
1075         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1076
1077         if (!blocked)
1078                 return 0;
1079
1080         hci_dev_do_close(hdev);
1081
1082         return 0;
1083 }
1084
1085 static const struct rfkill_ops hci_rfkill_ops = {
1086         .set_block = hci_rfkill_set_block,
1087 };
1088
1089 /* Alloc HCI device */
1090 struct hci_dev *hci_alloc_dev(void)
1091 {
1092         struct hci_dev *hdev;
1093
1094         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1095         if (!hdev)
1096                 return NULL;
1097
1098         hci_init_sysfs(hdev);
1099         skb_queue_head_init(&hdev->driver_init);
1100
1101         return hdev;
1102 }
1103 EXPORT_SYMBOL(hci_alloc_dev);
1104
1105 /* Free HCI device */
1106 void hci_free_dev(struct hci_dev *hdev)
1107 {
1108         skb_queue_purge(&hdev->driver_init);
1109
1110         /* will free via device release */
1111         put_device(&hdev->dev);
1112 }
1113 EXPORT_SYMBOL(hci_free_dev);
1114
1115 static void hci_power_on(struct work_struct *work)
1116 {
1117         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1118
1119         BT_DBG("%s", hdev->name);
1120
1121         if (hci_dev_open(hdev->id) < 0)
1122                 return;
1123
1124         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1125                 schedule_delayed_work(&hdev->power_off,
1126                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1127
1128         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1129                 mgmt_index_added(hdev);
1130 }
1131
1132 static void hci_power_off(struct work_struct *work)
1133 {
1134         struct hci_dev *hdev = container_of(work, struct hci_dev,
1135                                                         power_off.work);
1136
1137         BT_DBG("%s", hdev->name);
1138
1139         hci_dev_do_close(hdev);
1140 }
1141
1142 static void hci_discov_off(struct work_struct *work)
1143 {
1144         struct hci_dev *hdev;
1145         u8 scan = SCAN_PAGE;
1146
1147         hdev = container_of(work, struct hci_dev, discov_off.work);
1148
1149         BT_DBG("%s", hdev->name);
1150
1151         hci_dev_lock(hdev);
1152
1153         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1154
1155         hdev->discov_timeout = 0;
1156
1157         hci_dev_unlock(hdev);
1158 }
1159
1160 int hci_uuids_clear(struct hci_dev *hdev)
1161 {
1162         struct list_head *p, *n;
1163
1164         list_for_each_safe(p, n, &hdev->uuids) {
1165                 struct bt_uuid *uuid;
1166
1167                 uuid = list_entry(p, struct bt_uuid, list);
1168
1169                 list_del(p);
1170                 kfree(uuid);
1171         }
1172
1173         return 0;
1174 }
1175
1176 int hci_link_keys_clear(struct hci_dev *hdev)
1177 {
1178         struct list_head *p, *n;
1179
1180         list_for_each_safe(p, n, &hdev->link_keys) {
1181                 struct link_key *key;
1182
1183                 key = list_entry(p, struct link_key, list);
1184
1185                 list_del(p);
1186                 kfree(key);
1187         }
1188
1189         return 0;
1190 }
1191
1192 int hci_smp_ltks_clear(struct hci_dev *hdev)
1193 {
1194         struct smp_ltk *k, *tmp;
1195
1196         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1197                 list_del(&k->list);
1198                 kfree(k);
1199         }
1200
1201         return 0;
1202 }
1203
1204 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1205 {
1206         struct link_key *k;
1207
1208         list_for_each_entry(k, &hdev->link_keys, list)
1209                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1210                         return k;
1211
1212         return NULL;
1213 }
1214
1215 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1216                                                 u8 key_type, u8 old_key_type)
1217 {
1218         /* Legacy key */
1219         if (key_type < 0x03)
1220                 return 1;
1221
1222         /* Debug keys are insecure so don't store them persistently */
1223         if (key_type == HCI_LK_DEBUG_COMBINATION)
1224                 return 0;
1225
1226         /* Changed combination key and there's no previous one */
1227         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1228                 return 0;
1229
1230         /* Security mode 3 case */
1231         if (!conn)
1232                 return 1;
1233
1234         /* Neither local nor remote side had no-bonding as requirement */
1235         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1236                 return 1;
1237
1238         /* Local side had dedicated bonding as requirement */
1239         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1240                 return 1;
1241
1242         /* Remote side had dedicated bonding as requirement */
1243         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1244                 return 1;
1245
1246         /* If none of the above criteria match, then don't store the key
1247          * persistently */
1248         return 0;
1249 }
1250
1251 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1252 {
1253         struct smp_ltk *k;
1254
1255         list_for_each_entry(k, &hdev->long_term_keys, list) {
1256                 if (k->ediv != ediv ||
1257                                 memcmp(rand, k->rand, sizeof(k->rand)))
1258                         continue;
1259
1260                 return k;
1261         }
1262
1263         return NULL;
1264 }
1265 EXPORT_SYMBOL(hci_find_ltk);
1266
1267 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1268                                                                 u8 addr_type)
1269 {
1270         struct smp_ltk *k;
1271
1272         list_for_each_entry(k, &hdev->long_term_keys, list)
1273                 if (addr_type == k->bdaddr_type &&
1274                                         bacmp(bdaddr, &k->bdaddr) == 0)
1275                         return k;
1276
1277         return NULL;
1278 }
1279 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1280
1281 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1282                                 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1283 {
1284         struct link_key *key, *old_key;
1285         u8 old_key_type, persistent;
1286
1287         old_key = hci_find_link_key(hdev, bdaddr);
1288         if (old_key) {
1289                 old_key_type = old_key->type;
1290                 key = old_key;
1291         } else {
1292                 old_key_type = conn ? conn->key_type : 0xff;
1293                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1294                 if (!key)
1295                         return -ENOMEM;
1296                 list_add(&key->list, &hdev->link_keys);
1297         }
1298
1299         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1300
1301         /* Some buggy controller combinations generate a changed
1302          * combination key for legacy pairing even when there's no
1303          * previous key */
1304         if (type == HCI_LK_CHANGED_COMBINATION &&
1305                                         (!conn || conn->remote_auth == 0xff) &&
1306                                         old_key_type == 0xff) {
1307                 type = HCI_LK_COMBINATION;
1308                 if (conn)
1309                         conn->key_type = type;
1310         }
1311
1312         bacpy(&key->bdaddr, bdaddr);
1313         memcpy(key->val, val, 16);
1314         key->pin_len = pin_len;
1315
1316         if (type == HCI_LK_CHANGED_COMBINATION)
1317                 key->type = old_key_type;
1318         else
1319                 key->type = type;
1320
1321         if (!new_key)
1322                 return 0;
1323
1324         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1325
1326         mgmt_new_link_key(hdev, key, persistent);
1327
1328         if (!persistent) {
1329                 list_del(&key->list);
1330                 kfree(key);
1331         }
1332
1333         return 0;
1334 }
1335
1336 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1337                                 int new_key, u8 authenticated, u8 tk[16],
1338                                 u8 enc_size, u16 ediv, u8 rand[8])
1339 {
1340         struct smp_ltk *key, *old_key;
1341
1342         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1343                 return 0;
1344
1345         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1346         if (old_key)
1347                 key = old_key;
1348         else {
1349                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1350                 if (!key)
1351                         return -ENOMEM;
1352                 list_add(&key->list, &hdev->long_term_keys);
1353         }
1354
1355         bacpy(&key->bdaddr, bdaddr);
1356         key->bdaddr_type = addr_type;
1357         memcpy(key->val, tk, sizeof(key->val));
1358         key->authenticated = authenticated;
1359         key->ediv = ediv;
1360         key->enc_size = enc_size;
1361         key->type = type;
1362         memcpy(key->rand, rand, sizeof(key->rand));
1363
1364         if (!new_key)
1365                 return 0;
1366
1367         if (type & HCI_SMP_LTK)
1368                 mgmt_new_ltk(hdev, key, 1);
1369
1370         return 0;
1371 }
1372
1373 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1374 {
1375         struct link_key *key;
1376
1377         key = hci_find_link_key(hdev, bdaddr);
1378         if (!key)
1379                 return -ENOENT;
1380
1381         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1382
1383         list_del(&key->list);
1384         kfree(key);
1385
1386         return 0;
1387 }
1388
1389 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1390 {
1391         struct smp_ltk *k, *tmp;
1392
1393         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1394                 if (bacmp(bdaddr, &k->bdaddr))
1395                         continue;
1396
1397                 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1398
1399                 list_del(&k->list);
1400                 kfree(k);
1401         }
1402
1403         return 0;
1404 }
1405
1406 /* HCI command timer function */
1407 static void hci_cmd_timer(unsigned long arg)
1408 {
1409         struct hci_dev *hdev = (void *) arg;
1410
1411         BT_ERR("%s command tx timeout", hdev->name);
1412         atomic_set(&hdev->cmd_cnt, 1);
1413         queue_work(hdev->workqueue, &hdev->cmd_work);
1414 }
1415
1416 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1417                                                         bdaddr_t *bdaddr)
1418 {
1419         struct oob_data *data;
1420
1421         list_for_each_entry(data, &hdev->remote_oob_data, list)
1422                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1423                         return data;
1424
1425         return NULL;
1426 }
1427
1428 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1429 {
1430         struct oob_data *data;
1431
1432         data = hci_find_remote_oob_data(hdev, bdaddr);
1433         if (!data)
1434                 return -ENOENT;
1435
1436         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1437
1438         list_del(&data->list);
1439         kfree(data);
1440
1441         return 0;
1442 }
1443
1444 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1445 {
1446         struct oob_data *data, *n;
1447
1448         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1449                 list_del(&data->list);
1450                 kfree(data);
1451         }
1452
1453         return 0;
1454 }
1455
1456 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1457                                                                 u8 *randomizer)
1458 {
1459         struct oob_data *data;
1460
1461         data = hci_find_remote_oob_data(hdev, bdaddr);
1462
1463         if (!data) {
1464                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1465                 if (!data)
1466                         return -ENOMEM;
1467
1468                 bacpy(&data->bdaddr, bdaddr);
1469                 list_add(&data->list, &hdev->remote_oob_data);
1470         }
1471
1472         memcpy(data->hash, hash, sizeof(data->hash));
1473         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1474
1475         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1476
1477         return 0;
1478 }
1479
1480 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1481                                                 bdaddr_t *bdaddr)
1482 {
1483         struct bdaddr_list *b;
1484
1485         list_for_each_entry(b, &hdev->blacklist, list)
1486                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1487                         return b;
1488
1489         return NULL;
1490 }
1491
1492 int hci_blacklist_clear(struct hci_dev *hdev)
1493 {
1494         struct list_head *p, *n;
1495
1496         list_for_each_safe(p, n, &hdev->blacklist) {
1497                 struct bdaddr_list *b;
1498
1499                 b = list_entry(p, struct bdaddr_list, list);
1500
1501                 list_del(p);
1502                 kfree(b);
1503         }
1504
1505         return 0;
1506 }
1507
1508 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1509 {
1510         struct bdaddr_list *entry;
1511
1512         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1513                 return -EBADF;
1514
1515         if (hci_blacklist_lookup(hdev, bdaddr))
1516                 return -EEXIST;
1517
1518         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1519         if (!entry)
1520                 return -ENOMEM;
1521
1522         bacpy(&entry->bdaddr, bdaddr);
1523
1524         list_add(&entry->list, &hdev->blacklist);
1525
1526         return mgmt_device_blocked(hdev, bdaddr, type);
1527 }
1528
1529 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1530 {
1531         struct bdaddr_list *entry;
1532
1533         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1534                 return hci_blacklist_clear(hdev);
1535
1536         entry = hci_blacklist_lookup(hdev, bdaddr);
1537         if (!entry)
1538                 return -ENOENT;
1539
1540         list_del(&entry->list);
1541         kfree(entry);
1542
1543         return mgmt_device_unblocked(hdev, bdaddr, type);
1544 }
1545
1546 static void hci_clear_adv_cache(struct work_struct *work)
1547 {
1548         struct hci_dev *hdev = container_of(work, struct hci_dev,
1549                                                         adv_work.work);
1550
1551         hci_dev_lock(hdev);
1552
1553         hci_adv_entries_clear(hdev);
1554
1555         hci_dev_unlock(hdev);
1556 }
1557
1558 int hci_adv_entries_clear(struct hci_dev *hdev)
1559 {
1560         struct adv_entry *entry, *tmp;
1561
1562         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1563                 list_del(&entry->list);
1564                 kfree(entry);
1565         }
1566
1567         BT_DBG("%s adv cache cleared", hdev->name);
1568
1569         return 0;
1570 }
1571
1572 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1573 {
1574         struct adv_entry *entry;
1575
1576         list_for_each_entry(entry, &hdev->adv_entries, list)
1577                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1578                         return entry;
1579
1580         return NULL;
1581 }
1582
1583 static inline int is_connectable_adv(u8 evt_type)
1584 {
1585         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1586                 return 1;
1587
1588         return 0;
1589 }
1590
1591 int hci_add_adv_entry(struct hci_dev *hdev,
1592                                         struct hci_ev_le_advertising_info *ev)
1593 {
1594         struct adv_entry *entry;
1595
1596         if (!is_connectable_adv(ev->evt_type))
1597                 return -EINVAL;
1598
1599         /* Only new entries should be added to adv_entries. So, if
1600          * bdaddr was found, don't add it. */
1601         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1602                 return 0;
1603
1604         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1605         if (!entry)
1606                 return -ENOMEM;
1607
1608         bacpy(&entry->bdaddr, &ev->bdaddr);
1609         entry->bdaddr_type = ev->bdaddr_type;
1610
1611         list_add(&entry->list, &hdev->adv_entries);
1612
1613         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1614                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1615
1616         return 0;
1617 }
1618
1619 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1620 {
1621         struct le_scan_params *param =  (struct le_scan_params *) opt;
1622         struct hci_cp_le_set_scan_param cp;
1623
1624         memset(&cp, 0, sizeof(cp));
1625         cp.type = param->type;
1626         cp.interval = cpu_to_le16(param->interval);
1627         cp.window = cpu_to_le16(param->window);
1628
1629         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1630 }
1631
1632 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1633 {
1634         struct hci_cp_le_set_scan_enable cp;
1635
1636         memset(&cp, 0, sizeof(cp));
1637         cp.enable = 1;
1638
1639         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1640 }
1641
1642 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1643                                                 u16 window, int timeout)
1644 {
1645         long timeo = msecs_to_jiffies(3000);
1646         struct le_scan_params param;
1647         int err;
1648
1649         BT_DBG("%s", hdev->name);
1650
1651         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1652                 return -EINPROGRESS;
1653
1654         param.type = type;
1655         param.interval = interval;
1656         param.window = window;
1657
1658         hci_req_lock(hdev);
1659
1660         err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1661                                                                         timeo);
1662         if (!err)
1663                 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1664
1665         hci_req_unlock(hdev);
1666
1667         if (err < 0)
1668                 return err;
1669
1670         schedule_delayed_work(&hdev->le_scan_disable,
1671                                                 msecs_to_jiffies(timeout));
1672
1673         return 0;
1674 }
1675
1676 static void le_scan_disable_work(struct work_struct *work)
1677 {
1678         struct hci_dev *hdev = container_of(work, struct hci_dev,
1679                                                 le_scan_disable.work);
1680         struct hci_cp_le_set_scan_enable cp;
1681
1682         BT_DBG("%s", hdev->name);
1683
1684         memset(&cp, 0, sizeof(cp));
1685
1686         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1687 }
1688
1689 static void le_scan_work(struct work_struct *work)
1690 {
1691         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1692         struct le_scan_params *param = &hdev->le_scan_params;
1693
1694         BT_DBG("%s", hdev->name);
1695
1696         hci_do_le_scan(hdev, param->type, param->interval,
1697                                         param->window, param->timeout);
1698 }
1699
1700 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1701                                                                 int timeout)
1702 {
1703         struct le_scan_params *param = &hdev->le_scan_params;
1704
1705         BT_DBG("%s", hdev->name);
1706
1707         if (work_busy(&hdev->le_scan))
1708                 return -EINPROGRESS;
1709
1710         param->type = type;
1711         param->interval = interval;
1712         param->window = window;
1713         param->timeout = timeout;
1714
1715         queue_work(system_long_wq, &hdev->le_scan);
1716
1717         return 0;
1718 }
1719
1720 /* Register HCI device */
1721 int hci_register_dev(struct hci_dev *hdev)
1722 {
1723         struct list_head *head = &hci_dev_list, *p;
1724         int i, id, error;
1725
1726         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1727
1728         if (!hdev->open || !hdev->close)
1729                 return -EINVAL;
1730
1731         /* Do not allow HCI_AMP devices to register at index 0,
1732          * so the index can be used as the AMP controller ID.
1733          */
1734         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1735
1736         write_lock(&hci_dev_list_lock);
1737
1738         /* Find first available device id */
1739         list_for_each(p, &hci_dev_list) {
1740                 if (list_entry(p, struct hci_dev, list)->id != id)
1741                         break;
1742                 head = p; id++;
1743         }
1744
1745         sprintf(hdev->name, "hci%d", id);
1746         hdev->id = id;
1747         list_add_tail(&hdev->list, head);
1748
1749         mutex_init(&hdev->lock);
1750
1751         hdev->flags = 0;
1752         hdev->dev_flags = 0;
1753         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1754         hdev->esco_type = (ESCO_HV1);
1755         hdev->link_mode = (HCI_LM_ACCEPT);
1756         hdev->io_capability = 0x03; /* No Input No Output */
1757
1758         hdev->idle_timeout = 0;
1759         hdev->sniff_max_interval = 800;
1760         hdev->sniff_min_interval = 80;
1761
1762         INIT_WORK(&hdev->rx_work, hci_rx_work);
1763         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1764         INIT_WORK(&hdev->tx_work, hci_tx_work);
1765
1766
1767         skb_queue_head_init(&hdev->rx_q);
1768         skb_queue_head_init(&hdev->cmd_q);
1769         skb_queue_head_init(&hdev->raw_q);
1770
1771         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1772
1773         for (i = 0; i < NUM_REASSEMBLY; i++)
1774                 hdev->reassembly[i] = NULL;
1775
1776         init_waitqueue_head(&hdev->req_wait_q);
1777         mutex_init(&hdev->req_lock);
1778
1779         discovery_init(hdev);
1780
1781         hci_conn_hash_init(hdev);
1782
1783         INIT_LIST_HEAD(&hdev->mgmt_pending);
1784
1785         INIT_LIST_HEAD(&hdev->blacklist);
1786
1787         INIT_LIST_HEAD(&hdev->uuids);
1788
1789         INIT_LIST_HEAD(&hdev->link_keys);
1790         INIT_LIST_HEAD(&hdev->long_term_keys);
1791
1792         INIT_LIST_HEAD(&hdev->remote_oob_data);
1793
1794         INIT_LIST_HEAD(&hdev->adv_entries);
1795
1796         INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1797         INIT_WORK(&hdev->power_on, hci_power_on);
1798         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1799
1800         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1801
1802         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1803
1804         atomic_set(&hdev->promisc, 0);
1805
1806         INIT_WORK(&hdev->le_scan, le_scan_work);
1807
1808         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1809
1810         write_unlock(&hci_dev_list_lock);
1811
1812         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1813                                                         WQ_MEM_RECLAIM, 1);
1814         if (!hdev->workqueue) {
1815                 error = -ENOMEM;
1816                 goto err;
1817         }
1818
1819         error = hci_add_sysfs(hdev);
1820         if (error < 0)
1821                 goto err_wqueue;
1822
1823         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1824                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1825         if (hdev->rfkill) {
1826                 if (rfkill_register(hdev->rfkill) < 0) {
1827                         rfkill_destroy(hdev->rfkill);
1828                         hdev->rfkill = NULL;
1829                 }
1830         }
1831
1832         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1833         set_bit(HCI_SETUP, &hdev->dev_flags);
1834         schedule_work(&hdev->power_on);
1835
1836         hci_notify(hdev, HCI_DEV_REG);
1837         hci_dev_hold(hdev);
1838
1839         return id;
1840
1841 err_wqueue:
1842         destroy_workqueue(hdev->workqueue);
1843 err:
1844         write_lock(&hci_dev_list_lock);
1845         list_del(&hdev->list);
1846         write_unlock(&hci_dev_list_lock);
1847
1848         return error;
1849 }
1850 EXPORT_SYMBOL(hci_register_dev);
1851
1852 /* Unregister HCI device */
1853 void hci_unregister_dev(struct hci_dev *hdev)
1854 {
1855         int i;
1856
1857         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1858
1859         write_lock(&hci_dev_list_lock);
1860         list_del(&hdev->list);
1861         write_unlock(&hci_dev_list_lock);
1862
1863         hci_dev_do_close(hdev);
1864
1865         for (i = 0; i < NUM_REASSEMBLY; i++)
1866                 kfree_skb(hdev->reassembly[i]);
1867
1868         if (!test_bit(HCI_INIT, &hdev->flags) &&
1869                                 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1870                 hci_dev_lock(hdev);
1871                 mgmt_index_removed(hdev);
1872                 hci_dev_unlock(hdev);
1873         }
1874
1875         /* mgmt_index_removed should take care of emptying the
1876          * pending list */
1877         BUG_ON(!list_empty(&hdev->mgmt_pending));
1878
1879         hci_notify(hdev, HCI_DEV_UNREG);
1880
1881         if (hdev->rfkill) {
1882                 rfkill_unregister(hdev->rfkill);
1883                 rfkill_destroy(hdev->rfkill);
1884         }
1885
1886         hci_del_sysfs(hdev);
1887
1888         cancel_delayed_work_sync(&hdev->adv_work);
1889
1890         destroy_workqueue(hdev->workqueue);
1891
1892         hci_dev_lock(hdev);
1893         hci_blacklist_clear(hdev);
1894         hci_uuids_clear(hdev);
1895         hci_link_keys_clear(hdev);
1896         hci_smp_ltks_clear(hdev);
1897         hci_remote_oob_data_clear(hdev);
1898         hci_adv_entries_clear(hdev);
1899         hci_dev_unlock(hdev);
1900
1901         hci_dev_put(hdev);
1902 }
1903 EXPORT_SYMBOL(hci_unregister_dev);
1904
1905 /* Suspend HCI device */
1906 int hci_suspend_dev(struct hci_dev *hdev)
1907 {
1908         hci_notify(hdev, HCI_DEV_SUSPEND);
1909         return 0;
1910 }
1911 EXPORT_SYMBOL(hci_suspend_dev);
1912
1913 /* Resume HCI device */
1914 int hci_resume_dev(struct hci_dev *hdev)
1915 {
1916         hci_notify(hdev, HCI_DEV_RESUME);
1917         return 0;
1918 }
1919 EXPORT_SYMBOL(hci_resume_dev);
1920
1921 /* Receive frame from HCI drivers */
1922 int hci_recv_frame(struct sk_buff *skb)
1923 {
1924         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1925         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1926                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1927                 kfree_skb(skb);
1928                 return -ENXIO;
1929         }
1930
1931         /* Incomming skb */
1932         bt_cb(skb)->incoming = 1;
1933
1934         /* Time stamp */
1935         __net_timestamp(skb);
1936
1937         skb_queue_tail(&hdev->rx_q, skb);
1938         queue_work(hdev->workqueue, &hdev->rx_work);
1939
1940         return 0;
1941 }
1942 EXPORT_SYMBOL(hci_recv_frame);
1943
1944 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1945                                                   int count, __u8 index)
1946 {
1947         int len = 0;
1948         int hlen = 0;
1949         int remain = count;
1950         struct sk_buff *skb;
1951         struct bt_skb_cb *scb;
1952
1953         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1954                                 index >= NUM_REASSEMBLY)
1955                 return -EILSEQ;
1956
1957         skb = hdev->reassembly[index];
1958
1959         if (!skb) {
1960                 switch (type) {
1961                 case HCI_ACLDATA_PKT:
1962                         len = HCI_MAX_FRAME_SIZE;
1963                         hlen = HCI_ACL_HDR_SIZE;
1964                         break;
1965                 case HCI_EVENT_PKT:
1966                         len = HCI_MAX_EVENT_SIZE;
1967                         hlen = HCI_EVENT_HDR_SIZE;
1968                         break;
1969                 case HCI_SCODATA_PKT:
1970                         len = HCI_MAX_SCO_SIZE;
1971                         hlen = HCI_SCO_HDR_SIZE;
1972                         break;
1973                 }
1974
1975                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1976                 if (!skb)
1977                         return -ENOMEM;
1978
1979                 scb = (void *) skb->cb;
1980                 scb->expect = hlen;
1981                 scb->pkt_type = type;
1982
1983                 skb->dev = (void *) hdev;
1984                 hdev->reassembly[index] = skb;
1985         }
1986
1987         while (count) {
1988                 scb = (void *) skb->cb;
1989                 len = min_t(uint, scb->expect, count);
1990
1991                 memcpy(skb_put(skb, len), data, len);
1992
1993                 count -= len;
1994                 data += len;
1995                 scb->expect -= len;
1996                 remain = count;
1997
1998                 switch (type) {
1999                 case HCI_EVENT_PKT:
2000                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2001                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2002                                 scb->expect = h->plen;
2003
2004                                 if (skb_tailroom(skb) < scb->expect) {
2005                                         kfree_skb(skb);
2006                                         hdev->reassembly[index] = NULL;
2007                                         return -ENOMEM;
2008                                 }
2009                         }
2010                         break;
2011
2012                 case HCI_ACLDATA_PKT:
2013                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2014                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2015                                 scb->expect = __le16_to_cpu(h->dlen);
2016
2017                                 if (skb_tailroom(skb) < scb->expect) {
2018                                         kfree_skb(skb);
2019                                         hdev->reassembly[index] = NULL;
2020                                         return -ENOMEM;
2021                                 }
2022                         }
2023                         break;
2024
2025                 case HCI_SCODATA_PKT:
2026                         if (skb->len == HCI_SCO_HDR_SIZE) {
2027                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2028                                 scb->expect = h->dlen;
2029
2030                                 if (skb_tailroom(skb) < scb->expect) {
2031                                         kfree_skb(skb);
2032                                         hdev->reassembly[index] = NULL;
2033                                         return -ENOMEM;
2034                                 }
2035                         }
2036                         break;
2037                 }
2038
2039                 if (scb->expect == 0) {
2040                         /* Complete frame */
2041
2042                         bt_cb(skb)->pkt_type = type;
2043                         hci_recv_frame(skb);
2044
2045                         hdev->reassembly[index] = NULL;
2046                         return remain;
2047                 }
2048         }
2049
2050         return remain;
2051 }
2052
2053 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2054 {
2055         int rem = 0;
2056
2057         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2058                 return -EILSEQ;
2059
2060         while (count) {
2061                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2062                 if (rem < 0)
2063                         return rem;
2064
2065                 data += (count - rem);
2066                 count = rem;
2067         }
2068
2069         return rem;
2070 }
2071 EXPORT_SYMBOL(hci_recv_fragment);
2072
2073 #define STREAM_REASSEMBLY 0
2074
2075 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2076 {
2077         int type;
2078         int rem = 0;
2079
2080         while (count) {
2081                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2082
2083                 if (!skb) {
2084                         struct { char type; } *pkt;
2085
2086                         /* Start of the frame */
2087                         pkt = data;
2088                         type = pkt->type;
2089
2090                         data++;
2091                         count--;
2092                 } else
2093                         type = bt_cb(skb)->pkt_type;
2094
2095                 rem = hci_reassembly(hdev, type, data, count,
2096                                                         STREAM_REASSEMBLY);
2097                 if (rem < 0)
2098                         return rem;
2099
2100                 data += (count - rem);
2101                 count = rem;
2102         }
2103
2104         return rem;
2105 }
2106 EXPORT_SYMBOL(hci_recv_stream_fragment);
2107
2108 /* ---- Interface to upper protocols ---- */
2109
2110 int hci_register_cb(struct hci_cb *cb)
2111 {
2112         BT_DBG("%p name %s", cb, cb->name);
2113
2114         write_lock(&hci_cb_list_lock);
2115         list_add(&cb->list, &hci_cb_list);
2116         write_unlock(&hci_cb_list_lock);
2117
2118         return 0;
2119 }
2120 EXPORT_SYMBOL(hci_register_cb);
2121
2122 int hci_unregister_cb(struct hci_cb *cb)
2123 {
2124         BT_DBG("%p name %s", cb, cb->name);
2125
2126         write_lock(&hci_cb_list_lock);
2127         list_del(&cb->list);
2128         write_unlock(&hci_cb_list_lock);
2129
2130         return 0;
2131 }
2132 EXPORT_SYMBOL(hci_unregister_cb);
2133
2134 static int hci_send_frame(struct sk_buff *skb)
2135 {
2136         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2137
2138         if (!hdev) {
2139                 kfree_skb(skb);
2140                 return -ENODEV;
2141         }
2142
2143         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2144
2145         /* Time stamp */
2146         __net_timestamp(skb);
2147
2148         /* Send copy to monitor */
2149         hci_send_to_monitor(hdev, skb);
2150
2151         if (atomic_read(&hdev->promisc)) {
2152                 /* Send copy to the sockets */
2153                 hci_send_to_sock(hdev, skb);
2154         }
2155
2156         /* Get rid of skb owner, prior to sending to the driver. */
2157         skb_orphan(skb);
2158
2159         return hdev->send(skb);
2160 }
2161
2162 /* Send HCI command */
2163 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2164 {
2165         int len = HCI_COMMAND_HDR_SIZE + plen;
2166         struct hci_command_hdr *hdr;
2167         struct sk_buff *skb;
2168
2169         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2170
2171         skb = bt_skb_alloc(len, GFP_ATOMIC);
2172         if (!skb) {
2173                 BT_ERR("%s no memory for command", hdev->name);
2174                 return -ENOMEM;
2175         }
2176
2177         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2178         hdr->opcode = cpu_to_le16(opcode);
2179         hdr->plen   = plen;
2180
2181         if (plen)
2182                 memcpy(skb_put(skb, plen), param, plen);
2183
2184         BT_DBG("skb len %d", skb->len);
2185
2186         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2187         skb->dev = (void *) hdev;
2188
2189         if (test_bit(HCI_INIT, &hdev->flags))
2190                 hdev->init_last_cmd = opcode;
2191
2192         skb_queue_tail(&hdev->cmd_q, skb);
2193         queue_work(hdev->workqueue, &hdev->cmd_work);
2194
2195         return 0;
2196 }
2197
2198 /* Get data from the previously sent command */
2199 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2200 {
2201         struct hci_command_hdr *hdr;
2202
2203         if (!hdev->sent_cmd)
2204                 return NULL;
2205
2206         hdr = (void *) hdev->sent_cmd->data;
2207
2208         if (hdr->opcode != cpu_to_le16(opcode))
2209                 return NULL;
2210
2211         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2212
2213         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2214 }
2215
2216 /* Send ACL data */
2217 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2218 {
2219         struct hci_acl_hdr *hdr;
2220         int len = skb->len;
2221
2222         skb_push(skb, HCI_ACL_HDR_SIZE);
2223         skb_reset_transport_header(skb);
2224         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2225         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2226         hdr->dlen   = cpu_to_le16(len);
2227 }
2228
2229 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2230                                 struct sk_buff *skb, __u16 flags)
2231 {
2232         struct hci_dev *hdev = conn->hdev;
2233         struct sk_buff *list;
2234
2235         list = skb_shinfo(skb)->frag_list;
2236         if (!list) {
2237                 /* Non fragmented */
2238                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2239
2240                 skb_queue_tail(queue, skb);
2241         } else {
2242                 /* Fragmented */
2243                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2244
2245                 skb_shinfo(skb)->frag_list = NULL;
2246
2247                 /* Queue all fragments atomically */
2248                 spin_lock(&queue->lock);
2249
2250                 __skb_queue_tail(queue, skb);
2251
2252                 flags &= ~ACL_START;
2253                 flags |= ACL_CONT;
2254                 do {
2255                         skb = list; list = list->next;
2256
2257                         skb->dev = (void *) hdev;
2258                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2259                         hci_add_acl_hdr(skb, conn->handle, flags);
2260
2261                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2262
2263                         __skb_queue_tail(queue, skb);
2264                 } while (list);
2265
2266                 spin_unlock(&queue->lock);
2267         }
2268 }
2269
2270 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2271 {
2272         struct hci_conn *conn = chan->conn;
2273         struct hci_dev *hdev = conn->hdev;
2274
2275         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2276
2277         skb->dev = (void *) hdev;
2278         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2279         hci_add_acl_hdr(skb, conn->handle, flags);
2280
2281         hci_queue_acl(conn, &chan->data_q, skb, flags);
2282
2283         queue_work(hdev->workqueue, &hdev->tx_work);
2284 }
2285 EXPORT_SYMBOL(hci_send_acl);
2286
2287 /* Send SCO data */
2288 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2289 {
2290         struct hci_dev *hdev = conn->hdev;
2291         struct hci_sco_hdr hdr;
2292
2293         BT_DBG("%s len %d", hdev->name, skb->len);
2294
2295         hdr.handle = cpu_to_le16(conn->handle);
2296         hdr.dlen   = skb->len;
2297
2298         skb_push(skb, HCI_SCO_HDR_SIZE);
2299         skb_reset_transport_header(skb);
2300         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2301
2302         skb->dev = (void *) hdev;
2303         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2304
2305         skb_queue_tail(&conn->data_q, skb);
2306         queue_work(hdev->workqueue, &hdev->tx_work);
2307 }
2308 EXPORT_SYMBOL(hci_send_sco);
2309
2310 /* ---- HCI TX task (outgoing data) ---- */
2311
2312 /* HCI Connection scheduler */
2313 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2314 {
2315         struct hci_conn_hash *h = &hdev->conn_hash;
2316         struct hci_conn *conn = NULL, *c;
2317         int num = 0, min = ~0;
2318
2319         /* We don't have to lock device here. Connections are always
2320          * added and removed with TX task disabled. */
2321
2322         rcu_read_lock();
2323
2324         list_for_each_entry_rcu(c, &h->list, list) {
2325                 if (c->type != type || skb_queue_empty(&c->data_q))
2326                         continue;
2327
2328                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2329                         continue;
2330
2331                 num++;
2332
2333                 if (c->sent < min) {
2334                         min  = c->sent;
2335                         conn = c;
2336                 }
2337
2338                 if (hci_conn_num(hdev, type) == num)
2339                         break;
2340         }
2341
2342         rcu_read_unlock();
2343
2344         if (conn) {
2345                 int cnt, q;
2346
2347                 switch (conn->type) {
2348                 case ACL_LINK:
2349                         cnt = hdev->acl_cnt;
2350                         break;
2351                 case SCO_LINK:
2352                 case ESCO_LINK:
2353                         cnt = hdev->sco_cnt;
2354                         break;
2355                 case LE_LINK:
2356                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2357                         break;
2358                 default:
2359                         cnt = 0;
2360                         BT_ERR("Unknown link type");
2361                 }
2362
2363                 q = cnt / num;
2364                 *quote = q ? q : 1;
2365         } else
2366                 *quote = 0;
2367
2368         BT_DBG("conn %p quote %d", conn, *quote);
2369         return conn;
2370 }
2371
2372 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2373 {
2374         struct hci_conn_hash *h = &hdev->conn_hash;
2375         struct hci_conn *c;
2376
2377         BT_ERR("%s link tx timeout", hdev->name);
2378
2379         rcu_read_lock();
2380
2381         /* Kill stalled connections */
2382         list_for_each_entry_rcu(c, &h->list, list) {
2383                 if (c->type == type && c->sent) {
2384                         BT_ERR("%s killing stalled connection %s",
2385                                 hdev->name, batostr(&c->dst));
2386                         hci_acl_disconn(c, 0x13);
2387                 }
2388         }
2389
2390         rcu_read_unlock();
2391 }
2392
2393 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2394                                                 int *quote)
2395 {
2396         struct hci_conn_hash *h = &hdev->conn_hash;
2397         struct hci_chan *chan = NULL;
2398         int num = 0, min = ~0, cur_prio = 0;
2399         struct hci_conn *conn;
2400         int cnt, q, conn_num = 0;
2401
2402         BT_DBG("%s", hdev->name);
2403
2404         rcu_read_lock();
2405
2406         list_for_each_entry_rcu(conn, &h->list, list) {
2407                 struct hci_chan *tmp;
2408
2409                 if (conn->type != type)
2410                         continue;
2411
2412                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2413                         continue;
2414
2415                 conn_num++;
2416
2417                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2418                         struct sk_buff *skb;
2419
2420                         if (skb_queue_empty(&tmp->data_q))
2421                                 continue;
2422
2423                         skb = skb_peek(&tmp->data_q);
2424                         if (skb->priority < cur_prio)
2425                                 continue;
2426
2427                         if (skb->priority > cur_prio) {
2428                                 num = 0;
2429                                 min = ~0;
2430                                 cur_prio = skb->priority;
2431                         }
2432
2433                         num++;
2434
2435                         if (conn->sent < min) {
2436                                 min  = conn->sent;
2437                                 chan = tmp;
2438                         }
2439                 }
2440
2441                 if (hci_conn_num(hdev, type) == conn_num)
2442                         break;
2443         }
2444
2445         rcu_read_unlock();
2446
2447         if (!chan)
2448                 return NULL;
2449
2450         switch (chan->conn->type) {
2451         case ACL_LINK:
2452                 cnt = hdev->acl_cnt;
2453                 break;
2454         case SCO_LINK:
2455         case ESCO_LINK:
2456                 cnt = hdev->sco_cnt;
2457                 break;
2458         case LE_LINK:
2459                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2460                 break;
2461         default:
2462                 cnt = 0;
2463                 BT_ERR("Unknown link type");
2464         }
2465
2466         q = cnt / num;
2467         *quote = q ? q : 1;
2468         BT_DBG("chan %p quote %d", chan, *quote);
2469         return chan;
2470 }
2471
2472 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2473 {
2474         struct hci_conn_hash *h = &hdev->conn_hash;
2475         struct hci_conn *conn;
2476         int num = 0;
2477
2478         BT_DBG("%s", hdev->name);
2479
2480         rcu_read_lock();
2481
2482         list_for_each_entry_rcu(conn, &h->list, list) {
2483                 struct hci_chan *chan;
2484
2485                 if (conn->type != type)
2486                         continue;
2487
2488                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2489                         continue;
2490
2491                 num++;
2492
2493                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2494                         struct sk_buff *skb;
2495
2496                         if (chan->sent) {
2497                                 chan->sent = 0;
2498                                 continue;
2499                         }
2500
2501                         if (skb_queue_empty(&chan->data_q))
2502                                 continue;
2503
2504                         skb = skb_peek(&chan->data_q);
2505                         if (skb->priority >= HCI_PRIO_MAX - 1)
2506                                 continue;
2507
2508                         skb->priority = HCI_PRIO_MAX - 1;
2509
2510                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2511                                                                 skb->priority);
2512                 }
2513
2514                 if (hci_conn_num(hdev, type) == num)
2515                         break;
2516         }
2517
2518         rcu_read_unlock();
2519
2520 }
2521
2522 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2523 {
2524         /* Calculate count of blocks used by this packet */
2525         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2526 }
2527
2528 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2529 {
2530         if (!test_bit(HCI_RAW, &hdev->flags)) {
2531                 /* ACL tx timeout must be longer than maximum
2532                  * link supervision timeout (40.9 seconds) */
2533                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2534                                         msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2535                         hci_link_tx_to(hdev, ACL_LINK);
2536         }
2537 }
2538
2539 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2540 {
2541         unsigned int cnt = hdev->acl_cnt;
2542         struct hci_chan *chan;
2543         struct sk_buff *skb;
2544         int quote;
2545
2546         __check_timeout(hdev, cnt);
2547
2548         while (hdev->acl_cnt &&
2549                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2550                 u32 priority = (skb_peek(&chan->data_q))->priority;
2551                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2552                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2553                                         skb->len, skb->priority);
2554
2555                         /* Stop if priority has changed */
2556                         if (skb->priority < priority)
2557                                 break;
2558
2559                         skb = skb_dequeue(&chan->data_q);
2560
2561                         hci_conn_enter_active_mode(chan->conn,
2562                                                 bt_cb(skb)->force_active);
2563
2564                         hci_send_frame(skb);
2565                         hdev->acl_last_tx = jiffies;
2566
2567                         hdev->acl_cnt--;
2568                         chan->sent++;
2569                         chan->conn->sent++;
2570                 }
2571         }
2572
2573         if (cnt != hdev->acl_cnt)
2574                 hci_prio_recalculate(hdev, ACL_LINK);
2575 }
2576
2577 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2578 {
2579         unsigned int cnt = hdev->block_cnt;
2580         struct hci_chan *chan;
2581         struct sk_buff *skb;
2582         int quote;
2583
2584         __check_timeout(hdev, cnt);
2585
2586         while (hdev->block_cnt > 0 &&
2587                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2588                 u32 priority = (skb_peek(&chan->data_q))->priority;
2589                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2590                         int blocks;
2591
2592                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2593                                                 skb->len, skb->priority);
2594
2595                         /* Stop if priority has changed */
2596                         if (skb->priority < priority)
2597                                 break;
2598
2599                         skb = skb_dequeue(&chan->data_q);
2600
2601                         blocks = __get_blocks(hdev, skb);
2602                         if (blocks > hdev->block_cnt)
2603                                 return;
2604
2605                         hci_conn_enter_active_mode(chan->conn,
2606                                                 bt_cb(skb)->force_active);
2607
2608                         hci_send_frame(skb);
2609                         hdev->acl_last_tx = jiffies;
2610
2611                         hdev->block_cnt -= blocks;
2612                         quote -= blocks;
2613
2614                         chan->sent += blocks;
2615                         chan->conn->sent += blocks;
2616                 }
2617         }
2618
2619         if (cnt != hdev->block_cnt)
2620                 hci_prio_recalculate(hdev, ACL_LINK);
2621 }
2622
2623 static inline void hci_sched_acl(struct hci_dev *hdev)
2624 {
2625         BT_DBG("%s", hdev->name);
2626
2627         if (!hci_conn_num(hdev, ACL_LINK))
2628                 return;
2629
2630         switch (hdev->flow_ctl_mode) {
2631         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2632                 hci_sched_acl_pkt(hdev);
2633                 break;
2634
2635         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2636                 hci_sched_acl_blk(hdev);
2637                 break;
2638         }
2639 }
2640
2641 /* Schedule SCO */
2642 static inline void hci_sched_sco(struct hci_dev *hdev)
2643 {
2644         struct hci_conn *conn;
2645         struct sk_buff *skb;
2646         int quote;
2647
2648         BT_DBG("%s", hdev->name);
2649
2650         if (!hci_conn_num(hdev, SCO_LINK))
2651                 return;
2652
2653         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2654                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2655                         BT_DBG("skb %p len %d", skb, skb->len);
2656                         hci_send_frame(skb);
2657
2658                         conn->sent++;
2659                         if (conn->sent == ~0)
2660                                 conn->sent = 0;
2661                 }
2662         }
2663 }
2664
2665 static inline void hci_sched_esco(struct hci_dev *hdev)
2666 {
2667         struct hci_conn *conn;
2668         struct sk_buff *skb;
2669         int quote;
2670
2671         BT_DBG("%s", hdev->name);
2672
2673         if (!hci_conn_num(hdev, ESCO_LINK))
2674                 return;
2675
2676         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2677                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2678                         BT_DBG("skb %p len %d", skb, skb->len);
2679                         hci_send_frame(skb);
2680
2681                         conn->sent++;
2682                         if (conn->sent == ~0)
2683                                 conn->sent = 0;
2684                 }
2685         }
2686 }
2687
2688 static inline void hci_sched_le(struct hci_dev *hdev)
2689 {
2690         struct hci_chan *chan;
2691         struct sk_buff *skb;
2692         int quote, cnt, tmp;
2693
2694         BT_DBG("%s", hdev->name);
2695
2696         if (!hci_conn_num(hdev, LE_LINK))
2697                 return;
2698
2699         if (!test_bit(HCI_RAW, &hdev->flags)) {
2700                 /* LE tx timeout must be longer than maximum
2701                  * link supervision timeout (40.9 seconds) */
2702                 if (!hdev->le_cnt && hdev->le_pkts &&
2703                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2704                         hci_link_tx_to(hdev, LE_LINK);
2705         }
2706
2707         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2708         tmp = cnt;
2709         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2710                 u32 priority = (skb_peek(&chan->data_q))->priority;
2711                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2712                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2713                                         skb->len, skb->priority);
2714
2715                         /* Stop if priority has changed */
2716                         if (skb->priority < priority)
2717                                 break;
2718
2719                         skb = skb_dequeue(&chan->data_q);
2720
2721                         hci_send_frame(skb);
2722                         hdev->le_last_tx = jiffies;
2723
2724                         cnt--;
2725                         chan->sent++;
2726                         chan->conn->sent++;
2727                 }
2728         }
2729
2730         if (hdev->le_pkts)
2731                 hdev->le_cnt = cnt;
2732         else
2733                 hdev->acl_cnt = cnt;
2734
2735         if (cnt != tmp)
2736                 hci_prio_recalculate(hdev, LE_LINK);
2737 }
2738
2739 static void hci_tx_work(struct work_struct *work)
2740 {
2741         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2742         struct sk_buff *skb;
2743
2744         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2745                 hdev->sco_cnt, hdev->le_cnt);
2746
2747         /* Schedule queues and send stuff to HCI driver */
2748
2749         hci_sched_acl(hdev);
2750
2751         hci_sched_sco(hdev);
2752
2753         hci_sched_esco(hdev);
2754
2755         hci_sched_le(hdev);
2756
2757         /* Send next queued raw (unknown type) packet */
2758         while ((skb = skb_dequeue(&hdev->raw_q)))
2759                 hci_send_frame(skb);
2760 }
2761
2762 /* ----- HCI RX task (incoming data processing) ----- */
2763
2764 /* ACL data packet */
2765 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2766 {
2767         struct hci_acl_hdr *hdr = (void *) skb->data;
2768         struct hci_conn *conn;
2769         __u16 handle, flags;
2770
2771         skb_pull(skb, HCI_ACL_HDR_SIZE);
2772
2773         handle = __le16_to_cpu(hdr->handle);
2774         flags  = hci_flags(handle);
2775         handle = hci_handle(handle);
2776
2777         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2778
2779         hdev->stat.acl_rx++;
2780
2781         hci_dev_lock(hdev);
2782         conn = hci_conn_hash_lookup_handle(hdev, handle);
2783         hci_dev_unlock(hdev);
2784
2785         if (conn) {
2786                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2787
2788                 /* Send to upper protocol */
2789                 l2cap_recv_acldata(conn, skb, flags);
2790                 return;
2791         } else {
2792                 BT_ERR("%s ACL packet for unknown connection handle %d",
2793                         hdev->name, handle);
2794         }
2795
2796         kfree_skb(skb);
2797 }
2798
2799 /* SCO data packet */
2800 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2801 {
2802         struct hci_sco_hdr *hdr = (void *) skb->data;
2803         struct hci_conn *conn;
2804         __u16 handle;
2805
2806         skb_pull(skb, HCI_SCO_HDR_SIZE);
2807
2808         handle = __le16_to_cpu(hdr->handle);
2809
2810         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2811
2812         hdev->stat.sco_rx++;
2813
2814         hci_dev_lock(hdev);
2815         conn = hci_conn_hash_lookup_handle(hdev, handle);
2816         hci_dev_unlock(hdev);
2817
2818         if (conn) {
2819                 /* Send to upper protocol */
2820                 sco_recv_scodata(conn, skb);
2821                 return;
2822         } else {
2823                 BT_ERR("%s SCO packet for unknown connection handle %d",
2824                         hdev->name, handle);
2825         }
2826
2827         kfree_skb(skb);
2828 }
2829
2830 static void hci_rx_work(struct work_struct *work)
2831 {
2832         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2833         struct sk_buff *skb;
2834
2835         BT_DBG("%s", hdev->name);
2836
2837         while ((skb = skb_dequeue(&hdev->rx_q))) {
2838                 /* Send copy to monitor */
2839                 hci_send_to_monitor(hdev, skb);
2840
2841                 if (atomic_read(&hdev->promisc)) {
2842                         /* Send copy to the sockets */
2843                         hci_send_to_sock(hdev, skb);
2844                 }
2845
2846                 if (test_bit(HCI_RAW, &hdev->flags)) {
2847                         kfree_skb(skb);
2848                         continue;
2849                 }
2850
2851                 if (test_bit(HCI_INIT, &hdev->flags)) {
2852                         /* Don't process data packets in this states. */
2853                         switch (bt_cb(skb)->pkt_type) {
2854                         case HCI_ACLDATA_PKT:
2855                         case HCI_SCODATA_PKT:
2856                                 kfree_skb(skb);
2857                                 continue;
2858                         }
2859                 }
2860
2861                 /* Process frame */
2862                 switch (bt_cb(skb)->pkt_type) {
2863                 case HCI_EVENT_PKT:
2864                         BT_DBG("%s Event packet", hdev->name);
2865                         hci_event_packet(hdev, skb);
2866                         break;
2867
2868                 case HCI_ACLDATA_PKT:
2869                         BT_DBG("%s ACL data packet", hdev->name);
2870                         hci_acldata_packet(hdev, skb);
2871                         break;
2872
2873                 case HCI_SCODATA_PKT:
2874                         BT_DBG("%s SCO data packet", hdev->name);
2875                         hci_scodata_packet(hdev, skb);
2876                         break;
2877
2878                 default:
2879                         kfree_skb(skb);
2880                         break;
2881                 }
2882         }
2883 }
2884
2885 static void hci_cmd_work(struct work_struct *work)
2886 {
2887         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2888         struct sk_buff *skb;
2889
2890         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2891
2892         /* Send queued commands */
2893         if (atomic_read(&hdev->cmd_cnt)) {
2894                 skb = skb_dequeue(&hdev->cmd_q);
2895                 if (!skb)
2896                         return;
2897
2898                 kfree_skb(hdev->sent_cmd);
2899
2900                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2901                 if (hdev->sent_cmd) {
2902                         atomic_dec(&hdev->cmd_cnt);
2903                         hci_send_frame(skb);
2904                         if (test_bit(HCI_RESET, &hdev->flags))
2905                                 del_timer(&hdev->cmd_timer);
2906                         else
2907                                 mod_timer(&hdev->cmd_timer,
2908                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2909                 } else {
2910                         skb_queue_head(&hdev->cmd_q, skb);
2911                         queue_work(hdev->workqueue, &hdev->cmd_work);
2912                 }
2913         }
2914 }
2915
2916 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2917 {
2918         /* General inquiry access code (GIAC) */
2919         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2920         struct hci_cp_inquiry cp;
2921
2922         BT_DBG("%s", hdev->name);
2923
2924         if (test_bit(HCI_INQUIRY, &hdev->flags))
2925                 return -EINPROGRESS;
2926
2927         inquiry_cache_flush(hdev);
2928
2929         memset(&cp, 0, sizeof(cp));
2930         memcpy(&cp.lap, lap, sizeof(cp.lap));
2931         cp.length  = length;
2932
2933         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2934 }
2935
2936 int hci_cancel_inquiry(struct hci_dev *hdev)
2937 {
2938         BT_DBG("%s", hdev->name);
2939
2940         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2941                 return -EPERM;
2942
2943         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2944 }