92b48e257b8997c6965c6f5ec470f0fe8c98ba5a
[profile/ivi/kernel-x86-ivi.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <net/sock.h>
46
47 #include <asm/system.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53
54 #define AUTO_OFF_TIMEOUT 2000
55
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59 static void hci_notify(struct hci_dev *hdev, int event);
60
61 static DEFINE_RWLOCK(hci_task_lock);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* HCI protocols */
72 #define HCI_MAX_PROTO   2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77
78 /* ---- HCI notifications ---- */
79
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82         return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87         return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92         atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94
95 /* ---- HCI requests ---- */
96
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 {
99         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
101         /* If this is the init phase check if the completed command matches
102          * the last init command, and if not just return.
103          */
104         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105                 return;
106
107         if (hdev->req_status == HCI_REQ_PEND) {
108                 hdev->req_result = result;
109                 hdev->req_status = HCI_REQ_DONE;
110                 wake_up_interruptible(&hdev->req_wait_q);
111         }
112 }
113
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116         BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118         if (hdev->req_status == HCI_REQ_PEND) {
119                 hdev->req_result = err;
120                 hdev->req_status = HCI_REQ_CANCELED;
121                 wake_up_interruptible(&hdev->req_wait_q);
122         }
123 }
124
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127                                         unsigned long opt, __u32 timeout)
128 {
129         DECLARE_WAITQUEUE(wait, current);
130         int err = 0;
131
132         BT_DBG("%s start", hdev->name);
133
134         hdev->req_status = HCI_REQ_PEND;
135
136         add_wait_queue(&hdev->req_wait_q, &wait);
137         set_current_state(TASK_INTERRUPTIBLE);
138
139         req(hdev, opt);
140         schedule_timeout(timeout);
141
142         remove_wait_queue(&hdev->req_wait_q, &wait);
143
144         if (signal_pending(current))
145                 return -EINTR;
146
147         switch (hdev->req_status) {
148         case HCI_REQ_DONE:
149                 err = -bt_err(hdev->req_result);
150                 break;
151
152         case HCI_REQ_CANCELED:
153                 err = -hdev->req_result;
154                 break;
155
156         default:
157                 err = -ETIMEDOUT;
158                 break;
159         }
160
161         hdev->req_status = hdev->req_result = 0;
162
163         BT_DBG("%s end: err %d", hdev->name, err);
164
165         return err;
166 }
167
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169                                         unsigned long opt, __u32 timeout)
170 {
171         int ret;
172
173         if (!test_bit(HCI_UP, &hdev->flags))
174                 return -ENETDOWN;
175
176         /* Serialize all requests */
177         hci_req_lock(hdev);
178         ret = __hci_request(hdev, req, opt, timeout);
179         hci_req_unlock(hdev);
180
181         return ret;
182 }
183
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186         BT_DBG("%s %ld", hdev->name, opt);
187
188         /* Reset device */
189         set_bit(HCI_RESET, &hdev->flags);
190         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194 {
195         struct hci_cp_delete_stored_link_key cp;
196         struct sk_buff *skb;
197         __le16 param;
198         __u8 flt_type;
199
200         BT_DBG("%s %ld", hdev->name, opt);
201
202         /* Driver initialization */
203
204         /* Special commands */
205         while ((skb = skb_dequeue(&hdev->driver_init))) {
206                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207                 skb->dev = (void *) hdev;
208
209                 skb_queue_tail(&hdev->cmd_q, skb);
210                 tasklet_schedule(&hdev->cmd_task);
211         }
212         skb_queue_purge(&hdev->driver_init);
213
214         /* Mandatory initialization */
215
216         /* Reset */
217         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218                         set_bit(HCI_RESET, &hdev->flags);
219                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
220         }
221
222         /* Read Local Supported Features */
223         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224
225         /* Read Local Version */
226         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227
228         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
230
231 #if 0
232         /* Host buffer size */
233         {
234                 struct hci_cp_host_buffer_size cp;
235                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
237                 cp.acl_max_pkt = cpu_to_le16(0xffff);
238                 cp.sco_max_pkt = cpu_to_le16(0xffff);
239                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
240         }
241 #endif
242
243         /* Read BD Address */
244         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246         /* Read Class of Device */
247         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249         /* Read Local Name */
250         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251
252         /* Read Voice Setting */
253         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254
255         /* Optional initialization */
256
257         /* Clear Event Filters */
258         flt_type = HCI_FLT_CLEAR_ALL;
259         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260
261         /* Connection accept timeout ~20 secs */
262         param = cpu_to_le16(0x7d00);
263         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264
265         bacpy(&cp.bdaddr, BDADDR_ANY);
266         cp.delete_all = 1;
267         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
268 }
269
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271 {
272         BT_DBG("%s", hdev->name);
273
274         /* Read LE buffer size */
275         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276 }
277
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279 {
280         __u8 scan = opt;
281
282         BT_DBG("%s %x", hdev->name, scan);
283
284         /* Inquiry and Page scans */
285         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
286 }
287
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289 {
290         __u8 auth = opt;
291
292         BT_DBG("%s %x", hdev->name, auth);
293
294         /* Authentication */
295         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
296 }
297
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299 {
300         __u8 encrypt = opt;
301
302         BT_DBG("%s %x", hdev->name, encrypt);
303
304         /* Encryption */
305         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
306 }
307
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309 {
310         __le16 policy = cpu_to_le16(opt);
311
312         BT_DBG("%s %x", hdev->name, policy);
313
314         /* Default link policy */
315         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316 }
317
318 /* Get HCI device by index.
319  * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
321 {
322         struct hci_dev *hdev = NULL;
323         struct list_head *p;
324
325         BT_DBG("%d", index);
326
327         if (index < 0)
328                 return NULL;
329
330         read_lock(&hci_dev_list_lock);
331         list_for_each(p, &hci_dev_list) {
332                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
333                 if (d->id == index) {
334                         hdev = hci_dev_hold(d);
335                         break;
336                 }
337         }
338         read_unlock(&hci_dev_list_lock);
339         return hdev;
340 }
341
342 /* ---- Inquiry support ---- */
343 static void inquiry_cache_flush(struct hci_dev *hdev)
344 {
345         struct inquiry_cache *cache = &hdev->inq_cache;
346         struct inquiry_entry *next  = cache->list, *e;
347
348         BT_DBG("cache %p", cache);
349
350         cache->list = NULL;
351         while ((e = next)) {
352                 next = e->next;
353                 kfree(e);
354         }
355 }
356
357 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
358 {
359         struct inquiry_cache *cache = &hdev->inq_cache;
360         struct inquiry_entry *e;
361
362         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
363
364         for (e = cache->list; e; e = e->next)
365                 if (!bacmp(&e->data.bdaddr, bdaddr))
366                         break;
367         return e;
368 }
369
370 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
371 {
372         struct inquiry_cache *cache = &hdev->inq_cache;
373         struct inquiry_entry *ie;
374
375         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
376
377         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
378         if (!ie) {
379                 /* Entry not in the cache. Add new one. */
380                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
381                 if (!ie)
382                         return;
383
384                 ie->next = cache->list;
385                 cache->list = ie;
386         }
387
388         memcpy(&ie->data, data, sizeof(*data));
389         ie->timestamp = jiffies;
390         cache->timestamp = jiffies;
391 }
392
393 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
394 {
395         struct inquiry_cache *cache = &hdev->inq_cache;
396         struct inquiry_info *info = (struct inquiry_info *) buf;
397         struct inquiry_entry *e;
398         int copied = 0;
399
400         for (e = cache->list; e && copied < num; e = e->next, copied++) {
401                 struct inquiry_data *data = &e->data;
402                 bacpy(&info->bdaddr, &data->bdaddr);
403                 info->pscan_rep_mode    = data->pscan_rep_mode;
404                 info->pscan_period_mode = data->pscan_period_mode;
405                 info->pscan_mode        = data->pscan_mode;
406                 memcpy(info->dev_class, data->dev_class, 3);
407                 info->clock_offset      = data->clock_offset;
408                 info++;
409         }
410
411         BT_DBG("cache %p, copied %d", cache, copied);
412         return copied;
413 }
414
415 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
416 {
417         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
418         struct hci_cp_inquiry cp;
419
420         BT_DBG("%s", hdev->name);
421
422         if (test_bit(HCI_INQUIRY, &hdev->flags))
423                 return;
424
425         /* Start Inquiry */
426         memcpy(&cp.lap, &ir->lap, 3);
427         cp.length  = ir->length;
428         cp.num_rsp = ir->num_rsp;
429         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
430 }
431
432 int hci_inquiry(void __user *arg)
433 {
434         __u8 __user *ptr = arg;
435         struct hci_inquiry_req ir;
436         struct hci_dev *hdev;
437         int err = 0, do_inquiry = 0, max_rsp;
438         long timeo;
439         __u8 *buf;
440
441         if (copy_from_user(&ir, ptr, sizeof(ir)))
442                 return -EFAULT;
443
444         hdev = hci_dev_get(ir.dev_id);
445         if (!hdev)
446                 return -ENODEV;
447
448         hci_dev_lock_bh(hdev);
449         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
450                                 inquiry_cache_empty(hdev) ||
451                                 ir.flags & IREQ_CACHE_FLUSH) {
452                 inquiry_cache_flush(hdev);
453                 do_inquiry = 1;
454         }
455         hci_dev_unlock_bh(hdev);
456
457         timeo = ir.length * msecs_to_jiffies(2000);
458
459         if (do_inquiry) {
460                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
461                 if (err < 0)
462                         goto done;
463         }
464
465         /* for unlimited number of responses we will use buffer with 255 entries */
466         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
467
468         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
469          * copy it to the user space.
470          */
471         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
472         if (!buf) {
473                 err = -ENOMEM;
474                 goto done;
475         }
476
477         hci_dev_lock_bh(hdev);
478         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
479         hci_dev_unlock_bh(hdev);
480
481         BT_DBG("num_rsp %d", ir.num_rsp);
482
483         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
484                 ptr += sizeof(ir);
485                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
486                                         ir.num_rsp))
487                         err = -EFAULT;
488         } else
489                 err = -EFAULT;
490
491         kfree(buf);
492
493 done:
494         hci_dev_put(hdev);
495         return err;
496 }
497
498 /* ---- HCI ioctl helpers ---- */
499
500 int hci_dev_open(__u16 dev)
501 {
502         struct hci_dev *hdev;
503         int ret = 0;
504
505         hdev = hci_dev_get(dev);
506         if (!hdev)
507                 return -ENODEV;
508
509         BT_DBG("%s %p", hdev->name, hdev);
510
511         hci_req_lock(hdev);
512
513         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
514                 ret = -ERFKILL;
515                 goto done;
516         }
517
518         if (test_bit(HCI_UP, &hdev->flags)) {
519                 ret = -EALREADY;
520                 goto done;
521         }
522
523         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
524                 set_bit(HCI_RAW, &hdev->flags);
525
526         /* Treat all non BR/EDR controllers as raw devices for now */
527         if (hdev->dev_type != HCI_BREDR)
528                 set_bit(HCI_RAW, &hdev->flags);
529
530         if (hdev->open(hdev)) {
531                 ret = -EIO;
532                 goto done;
533         }
534
535         if (!test_bit(HCI_RAW, &hdev->flags)) {
536                 atomic_set(&hdev->cmd_cnt, 1);
537                 set_bit(HCI_INIT, &hdev->flags);
538                 hdev->init_last_cmd = 0;
539
540                 ret = __hci_request(hdev, hci_init_req, 0,
541                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
542
543                 if (lmp_le_capable(hdev))
544                         ret = __hci_request(hdev, hci_le_init_req, 0,
545                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
546
547                 clear_bit(HCI_INIT, &hdev->flags);
548         }
549
550         if (!ret) {
551                 hci_dev_hold(hdev);
552                 set_bit(HCI_UP, &hdev->flags);
553                 hci_notify(hdev, HCI_DEV_UP);
554                 if (!test_bit(HCI_SETUP, &hdev->flags))
555                         mgmt_powered(hdev->id, 1);
556         } else {
557                 /* Init failed, cleanup */
558                 tasklet_kill(&hdev->rx_task);
559                 tasklet_kill(&hdev->tx_task);
560                 tasklet_kill(&hdev->cmd_task);
561
562                 skb_queue_purge(&hdev->cmd_q);
563                 skb_queue_purge(&hdev->rx_q);
564
565                 if (hdev->flush)
566                         hdev->flush(hdev);
567
568                 if (hdev->sent_cmd) {
569                         kfree_skb(hdev->sent_cmd);
570                         hdev->sent_cmd = NULL;
571                 }
572
573                 hdev->close(hdev);
574                 hdev->flags = 0;
575         }
576
577 done:
578         hci_req_unlock(hdev);
579         hci_dev_put(hdev);
580         return ret;
581 }
582
583 static int hci_dev_do_close(struct hci_dev *hdev)
584 {
585         BT_DBG("%s %p", hdev->name, hdev);
586
587         hci_req_cancel(hdev, ENODEV);
588         hci_req_lock(hdev);
589
590         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
591                 hci_req_unlock(hdev);
592                 return 0;
593         }
594
595         /* Kill RX and TX tasks */
596         tasklet_kill(&hdev->rx_task);
597         tasklet_kill(&hdev->tx_task);
598
599         hci_dev_lock_bh(hdev);
600         inquiry_cache_flush(hdev);
601         hci_conn_hash_flush(hdev);
602         hci_dev_unlock_bh(hdev);
603
604         hci_notify(hdev, HCI_DEV_DOWN);
605
606         if (hdev->flush)
607                 hdev->flush(hdev);
608
609         /* Reset device */
610         skb_queue_purge(&hdev->cmd_q);
611         atomic_set(&hdev->cmd_cnt, 1);
612         if (!test_bit(HCI_RAW, &hdev->flags)) {
613                 set_bit(HCI_INIT, &hdev->flags);
614                 __hci_request(hdev, hci_reset_req, 0,
615                                         msecs_to_jiffies(250));
616                 clear_bit(HCI_INIT, &hdev->flags);
617         }
618
619         /* Kill cmd task */
620         tasklet_kill(&hdev->cmd_task);
621
622         /* Drop queues */
623         skb_queue_purge(&hdev->rx_q);
624         skb_queue_purge(&hdev->cmd_q);
625         skb_queue_purge(&hdev->raw_q);
626
627         /* Drop last sent command */
628         if (hdev->sent_cmd) {
629                 del_timer_sync(&hdev->cmd_timer);
630                 kfree_skb(hdev->sent_cmd);
631                 hdev->sent_cmd = NULL;
632         }
633
634         /* After this point our queues are empty
635          * and no tasks are scheduled. */
636         hdev->close(hdev);
637
638         mgmt_powered(hdev->id, 0);
639
640         /* Clear flags */
641         hdev->flags = 0;
642
643         hci_req_unlock(hdev);
644
645         hci_dev_put(hdev);
646         return 0;
647 }
648
649 int hci_dev_close(__u16 dev)
650 {
651         struct hci_dev *hdev;
652         int err;
653
654         hdev = hci_dev_get(dev);
655         if (!hdev)
656                 return -ENODEV;
657         err = hci_dev_do_close(hdev);
658         hci_dev_put(hdev);
659         return err;
660 }
661
662 int hci_dev_reset(__u16 dev)
663 {
664         struct hci_dev *hdev;
665         int ret = 0;
666
667         hdev = hci_dev_get(dev);
668         if (!hdev)
669                 return -ENODEV;
670
671         hci_req_lock(hdev);
672         tasklet_disable(&hdev->tx_task);
673
674         if (!test_bit(HCI_UP, &hdev->flags))
675                 goto done;
676
677         /* Drop queues */
678         skb_queue_purge(&hdev->rx_q);
679         skb_queue_purge(&hdev->cmd_q);
680
681         hci_dev_lock_bh(hdev);
682         inquiry_cache_flush(hdev);
683         hci_conn_hash_flush(hdev);
684         hci_dev_unlock_bh(hdev);
685
686         if (hdev->flush)
687                 hdev->flush(hdev);
688
689         atomic_set(&hdev->cmd_cnt, 1);
690         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
691
692         if (!test_bit(HCI_RAW, &hdev->flags))
693                 ret = __hci_request(hdev, hci_reset_req, 0,
694                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
695
696 done:
697         tasklet_enable(&hdev->tx_task);
698         hci_req_unlock(hdev);
699         hci_dev_put(hdev);
700         return ret;
701 }
702
703 int hci_dev_reset_stat(__u16 dev)
704 {
705         struct hci_dev *hdev;
706         int ret = 0;
707
708         hdev = hci_dev_get(dev);
709         if (!hdev)
710                 return -ENODEV;
711
712         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
713
714         hci_dev_put(hdev);
715
716         return ret;
717 }
718
719 int hci_dev_cmd(unsigned int cmd, void __user *arg)
720 {
721         struct hci_dev *hdev;
722         struct hci_dev_req dr;
723         int err = 0;
724
725         if (copy_from_user(&dr, arg, sizeof(dr)))
726                 return -EFAULT;
727
728         hdev = hci_dev_get(dr.dev_id);
729         if (!hdev)
730                 return -ENODEV;
731
732         switch (cmd) {
733         case HCISETAUTH:
734                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
735                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
736                 break;
737
738         case HCISETENCRYPT:
739                 if (!lmp_encrypt_capable(hdev)) {
740                         err = -EOPNOTSUPP;
741                         break;
742                 }
743
744                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
745                         /* Auth must be enabled first */
746                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
747                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
748                         if (err)
749                                 break;
750                 }
751
752                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
753                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
754                 break;
755
756         case HCISETSCAN:
757                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
758                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
759                 break;
760
761         case HCISETLINKPOL:
762                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
763                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
764                 break;
765
766         case HCISETLINKMODE:
767                 hdev->link_mode = ((__u16) dr.dev_opt) &
768                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
769                 break;
770
771         case HCISETPTYPE:
772                 hdev->pkt_type = (__u16) dr.dev_opt;
773                 break;
774
775         case HCISETACLMTU:
776                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
777                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
778                 break;
779
780         case HCISETSCOMTU:
781                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
782                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
783                 break;
784
785         default:
786                 err = -EINVAL;
787                 break;
788         }
789
790         hci_dev_put(hdev);
791         return err;
792 }
793
794 int hci_get_dev_list(void __user *arg)
795 {
796         struct hci_dev_list_req *dl;
797         struct hci_dev_req *dr;
798         struct list_head *p;
799         int n = 0, size, err;
800         __u16 dev_num;
801
802         if (get_user(dev_num, (__u16 __user *) arg))
803                 return -EFAULT;
804
805         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
806                 return -EINVAL;
807
808         size = sizeof(*dl) + dev_num * sizeof(*dr);
809
810         dl = kzalloc(size, GFP_KERNEL);
811         if (!dl)
812                 return -ENOMEM;
813
814         dr = dl->dev_req;
815
816         read_lock_bh(&hci_dev_list_lock);
817         list_for_each(p, &hci_dev_list) {
818                 struct hci_dev *hdev;
819
820                 hdev = list_entry(p, struct hci_dev, list);
821
822                 hci_del_off_timer(hdev);
823
824                 if (!test_bit(HCI_MGMT, &hdev->flags))
825                         set_bit(HCI_PAIRABLE, &hdev->flags);
826
827                 (dr + n)->dev_id  = hdev->id;
828                 (dr + n)->dev_opt = hdev->flags;
829
830                 if (++n >= dev_num)
831                         break;
832         }
833         read_unlock_bh(&hci_dev_list_lock);
834
835         dl->dev_num = n;
836         size = sizeof(*dl) + n * sizeof(*dr);
837
838         err = copy_to_user(arg, dl, size);
839         kfree(dl);
840
841         return err ? -EFAULT : 0;
842 }
843
844 int hci_get_dev_info(void __user *arg)
845 {
846         struct hci_dev *hdev;
847         struct hci_dev_info di;
848         int err = 0;
849
850         if (copy_from_user(&di, arg, sizeof(di)))
851                 return -EFAULT;
852
853         hdev = hci_dev_get(di.dev_id);
854         if (!hdev)
855                 return -ENODEV;
856
857         hci_del_off_timer(hdev);
858
859         if (!test_bit(HCI_MGMT, &hdev->flags))
860                 set_bit(HCI_PAIRABLE, &hdev->flags);
861
862         strcpy(di.name, hdev->name);
863         di.bdaddr   = hdev->bdaddr;
864         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
865         di.flags    = hdev->flags;
866         di.pkt_type = hdev->pkt_type;
867         di.acl_mtu  = hdev->acl_mtu;
868         di.acl_pkts = hdev->acl_pkts;
869         di.sco_mtu  = hdev->sco_mtu;
870         di.sco_pkts = hdev->sco_pkts;
871         di.link_policy = hdev->link_policy;
872         di.link_mode   = hdev->link_mode;
873
874         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
875         memcpy(&di.features, &hdev->features, sizeof(di.features));
876
877         if (copy_to_user(arg, &di, sizeof(di)))
878                 err = -EFAULT;
879
880         hci_dev_put(hdev);
881
882         return err;
883 }
884
885 /* ---- Interface to HCI drivers ---- */
886
887 static int hci_rfkill_set_block(void *data, bool blocked)
888 {
889         struct hci_dev *hdev = data;
890
891         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
892
893         if (!blocked)
894                 return 0;
895
896         hci_dev_do_close(hdev);
897
898         return 0;
899 }
900
901 static const struct rfkill_ops hci_rfkill_ops = {
902         .set_block = hci_rfkill_set_block,
903 };
904
905 /* Alloc HCI device */
906 struct hci_dev *hci_alloc_dev(void)
907 {
908         struct hci_dev *hdev;
909
910         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
911         if (!hdev)
912                 return NULL;
913
914         skb_queue_head_init(&hdev->driver_init);
915
916         return hdev;
917 }
918 EXPORT_SYMBOL(hci_alloc_dev);
919
920 /* Free HCI device */
921 void hci_free_dev(struct hci_dev *hdev)
922 {
923         skb_queue_purge(&hdev->driver_init);
924
925         /* will free via device release */
926         put_device(&hdev->dev);
927 }
928 EXPORT_SYMBOL(hci_free_dev);
929
930 static void hci_power_on(struct work_struct *work)
931 {
932         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
933
934         BT_DBG("%s", hdev->name);
935
936         if (hci_dev_open(hdev->id) < 0)
937                 return;
938
939         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
940                 mod_timer(&hdev->off_timer,
941                                 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
942
943         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
944                 mgmt_index_added(hdev->id);
945 }
946
947 static void hci_power_off(struct work_struct *work)
948 {
949         struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
950
951         BT_DBG("%s", hdev->name);
952
953         hci_dev_close(hdev->id);
954 }
955
956 static void hci_auto_off(unsigned long data)
957 {
958         struct hci_dev *hdev = (struct hci_dev *) data;
959
960         BT_DBG("%s", hdev->name);
961
962         clear_bit(HCI_AUTO_OFF, &hdev->flags);
963
964         queue_work(hdev->workqueue, &hdev->power_off);
965 }
966
967 void hci_del_off_timer(struct hci_dev *hdev)
968 {
969         BT_DBG("%s", hdev->name);
970
971         clear_bit(HCI_AUTO_OFF, &hdev->flags);
972         del_timer(&hdev->off_timer);
973 }
974
975 int hci_uuids_clear(struct hci_dev *hdev)
976 {
977         struct list_head *p, *n;
978
979         list_for_each_safe(p, n, &hdev->uuids) {
980                 struct bt_uuid *uuid;
981
982                 uuid = list_entry(p, struct bt_uuid, list);
983
984                 list_del(p);
985                 kfree(uuid);
986         }
987
988         return 0;
989 }
990
991 int hci_link_keys_clear(struct hci_dev *hdev)
992 {
993         struct list_head *p, *n;
994
995         list_for_each_safe(p, n, &hdev->link_keys) {
996                 struct link_key *key;
997
998                 key = list_entry(p, struct link_key, list);
999
1000                 list_del(p);
1001                 kfree(key);
1002         }
1003
1004         return 0;
1005 }
1006
1007 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1008 {
1009         struct list_head *p;
1010
1011         list_for_each(p, &hdev->link_keys) {
1012                 struct link_key *k;
1013
1014                 k = list_entry(p, struct link_key, list);
1015
1016                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1017                         return k;
1018         }
1019
1020         return NULL;
1021 }
1022
1023 int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1024                                                 u8 *val, u8 type, u8 pin_len)
1025 {
1026         struct link_key *key, *old_key;
1027         u8 old_key_type;
1028
1029         old_key = hci_find_link_key(hdev, bdaddr);
1030         if (old_key) {
1031                 old_key_type = old_key->type;
1032                 key = old_key;
1033         } else {
1034                 old_key_type = 0xff;
1035                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1036                 if (!key)
1037                         return -ENOMEM;
1038                 list_add(&key->list, &hdev->link_keys);
1039         }
1040
1041         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1042
1043         bacpy(&key->bdaddr, bdaddr);
1044         memcpy(key->val, val, 16);
1045         key->type = type;
1046         key->pin_len = pin_len;
1047
1048         if (new_key)
1049                 mgmt_new_key(hdev->id, key, old_key_type);
1050
1051         if (type == 0x06)
1052                 key->type = old_key_type;
1053
1054         return 0;
1055 }
1056
1057 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1058 {
1059         struct link_key *key;
1060
1061         key = hci_find_link_key(hdev, bdaddr);
1062         if (!key)
1063                 return -ENOENT;
1064
1065         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1066
1067         list_del(&key->list);
1068         kfree(key);
1069
1070         return 0;
1071 }
1072
1073 /* HCI command timer function */
1074 static void hci_cmd_timer(unsigned long arg)
1075 {
1076         struct hci_dev *hdev = (void *) arg;
1077
1078         BT_ERR("%s command tx timeout", hdev->name);
1079         atomic_set(&hdev->cmd_cnt, 1);
1080         clear_bit(HCI_RESET, &hdev->flags);
1081         tasklet_schedule(&hdev->cmd_task);
1082 }
1083
1084 /* Register HCI device */
1085 int hci_register_dev(struct hci_dev *hdev)
1086 {
1087         struct list_head *head = &hci_dev_list, *p;
1088         int i, id = 0;
1089
1090         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1091                                                 hdev->bus, hdev->owner);
1092
1093         if (!hdev->open || !hdev->close || !hdev->destruct)
1094                 return -EINVAL;
1095
1096         write_lock_bh(&hci_dev_list_lock);
1097
1098         /* Find first available device id */
1099         list_for_each(p, &hci_dev_list) {
1100                 if (list_entry(p, struct hci_dev, list)->id != id)
1101                         break;
1102                 head = p; id++;
1103         }
1104
1105         sprintf(hdev->name, "hci%d", id);
1106         hdev->id = id;
1107         list_add(&hdev->list, head);
1108
1109         atomic_set(&hdev->refcnt, 1);
1110         spin_lock_init(&hdev->lock);
1111
1112         hdev->flags = 0;
1113         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1114         hdev->esco_type = (ESCO_HV1);
1115         hdev->link_mode = (HCI_LM_ACCEPT);
1116         hdev->io_capability = 0x03; /* No Input No Output */
1117
1118         hdev->idle_timeout = 0;
1119         hdev->sniff_max_interval = 800;
1120         hdev->sniff_min_interval = 80;
1121
1122         tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1123         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1124         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1125
1126         skb_queue_head_init(&hdev->rx_q);
1127         skb_queue_head_init(&hdev->cmd_q);
1128         skb_queue_head_init(&hdev->raw_q);
1129
1130         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1131
1132         for (i = 0; i < NUM_REASSEMBLY; i++)
1133                 hdev->reassembly[i] = NULL;
1134
1135         init_waitqueue_head(&hdev->req_wait_q);
1136         mutex_init(&hdev->req_lock);
1137
1138         inquiry_cache_init(hdev);
1139
1140         hci_conn_hash_init(hdev);
1141
1142         INIT_LIST_HEAD(&hdev->blacklist);
1143
1144         INIT_LIST_HEAD(&hdev->uuids);
1145
1146         INIT_LIST_HEAD(&hdev->link_keys);
1147
1148         INIT_WORK(&hdev->power_on, hci_power_on);
1149         INIT_WORK(&hdev->power_off, hci_power_off);
1150         setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1151
1152         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1153
1154         atomic_set(&hdev->promisc, 0);
1155
1156         write_unlock_bh(&hci_dev_list_lock);
1157
1158         hdev->workqueue = create_singlethread_workqueue(hdev->name);
1159         if (!hdev->workqueue)
1160                 goto nomem;
1161
1162         hci_register_sysfs(hdev);
1163
1164         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1165                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1166         if (hdev->rfkill) {
1167                 if (rfkill_register(hdev->rfkill) < 0) {
1168                         rfkill_destroy(hdev->rfkill);
1169                         hdev->rfkill = NULL;
1170                 }
1171         }
1172
1173         set_bit(HCI_AUTO_OFF, &hdev->flags);
1174         set_bit(HCI_SETUP, &hdev->flags);
1175         queue_work(hdev->workqueue, &hdev->power_on);
1176
1177         hci_notify(hdev, HCI_DEV_REG);
1178
1179         return id;
1180
1181 nomem:
1182         write_lock_bh(&hci_dev_list_lock);
1183         list_del(&hdev->list);
1184         write_unlock_bh(&hci_dev_list_lock);
1185
1186         return -ENOMEM;
1187 }
1188 EXPORT_SYMBOL(hci_register_dev);
1189
1190 /* Unregister HCI device */
1191 int hci_unregister_dev(struct hci_dev *hdev)
1192 {
1193         int i;
1194
1195         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1196
1197         write_lock_bh(&hci_dev_list_lock);
1198         list_del(&hdev->list);
1199         write_unlock_bh(&hci_dev_list_lock);
1200
1201         hci_dev_do_close(hdev);
1202
1203         for (i = 0; i < NUM_REASSEMBLY; i++)
1204                 kfree_skb(hdev->reassembly[i]);
1205
1206         if (!test_bit(HCI_INIT, &hdev->flags) &&
1207                                         !test_bit(HCI_SETUP, &hdev->flags))
1208                 mgmt_index_removed(hdev->id);
1209
1210         hci_notify(hdev, HCI_DEV_UNREG);
1211
1212         if (hdev->rfkill) {
1213                 rfkill_unregister(hdev->rfkill);
1214                 rfkill_destroy(hdev->rfkill);
1215         }
1216
1217         hci_unregister_sysfs(hdev);
1218
1219         hci_del_off_timer(hdev);
1220
1221         destroy_workqueue(hdev->workqueue);
1222
1223         hci_dev_lock_bh(hdev);
1224         hci_blacklist_clear(hdev);
1225         hci_uuids_clear(hdev);
1226         hci_link_keys_clear(hdev);
1227         hci_dev_unlock_bh(hdev);
1228
1229         __hci_dev_put(hdev);
1230
1231         return 0;
1232 }
1233 EXPORT_SYMBOL(hci_unregister_dev);
1234
1235 /* Suspend HCI device */
1236 int hci_suspend_dev(struct hci_dev *hdev)
1237 {
1238         hci_notify(hdev, HCI_DEV_SUSPEND);
1239         return 0;
1240 }
1241 EXPORT_SYMBOL(hci_suspend_dev);
1242
1243 /* Resume HCI device */
1244 int hci_resume_dev(struct hci_dev *hdev)
1245 {
1246         hci_notify(hdev, HCI_DEV_RESUME);
1247         return 0;
1248 }
1249 EXPORT_SYMBOL(hci_resume_dev);
1250
1251 /* Receive frame from HCI drivers */
1252 int hci_recv_frame(struct sk_buff *skb)
1253 {
1254         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1255         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1256                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1257                 kfree_skb(skb);
1258                 return -ENXIO;
1259         }
1260
1261         /* Incomming skb */
1262         bt_cb(skb)->incoming = 1;
1263
1264         /* Time stamp */
1265         __net_timestamp(skb);
1266
1267         /* Queue frame for rx task */
1268         skb_queue_tail(&hdev->rx_q, skb);
1269         tasklet_schedule(&hdev->rx_task);
1270
1271         return 0;
1272 }
1273 EXPORT_SYMBOL(hci_recv_frame);
1274
1275 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1276                           int count, __u8 index, gfp_t gfp_mask)
1277 {
1278         int len = 0;
1279         int hlen = 0;
1280         int remain = count;
1281         struct sk_buff *skb;
1282         struct bt_skb_cb *scb;
1283
1284         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1285                                 index >= NUM_REASSEMBLY)
1286                 return -EILSEQ;
1287
1288         skb = hdev->reassembly[index];
1289
1290         if (!skb) {
1291                 switch (type) {
1292                 case HCI_ACLDATA_PKT:
1293                         len = HCI_MAX_FRAME_SIZE;
1294                         hlen = HCI_ACL_HDR_SIZE;
1295                         break;
1296                 case HCI_EVENT_PKT:
1297                         len = HCI_MAX_EVENT_SIZE;
1298                         hlen = HCI_EVENT_HDR_SIZE;
1299                         break;
1300                 case HCI_SCODATA_PKT:
1301                         len = HCI_MAX_SCO_SIZE;
1302                         hlen = HCI_SCO_HDR_SIZE;
1303                         break;
1304                 }
1305
1306                 skb = bt_skb_alloc(len, gfp_mask);
1307                 if (!skb)
1308                         return -ENOMEM;
1309
1310                 scb = (void *) skb->cb;
1311                 scb->expect = hlen;
1312                 scb->pkt_type = type;
1313
1314                 skb->dev = (void *) hdev;
1315                 hdev->reassembly[index] = skb;
1316         }
1317
1318         while (count) {
1319                 scb = (void *) skb->cb;
1320                 len = min(scb->expect, (__u16)count);
1321
1322                 memcpy(skb_put(skb, len), data, len);
1323
1324                 count -= len;
1325                 data += len;
1326                 scb->expect -= len;
1327                 remain = count;
1328
1329                 switch (type) {
1330                 case HCI_EVENT_PKT:
1331                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1332                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1333                                 scb->expect = h->plen;
1334
1335                                 if (skb_tailroom(skb) < scb->expect) {
1336                                         kfree_skb(skb);
1337                                         hdev->reassembly[index] = NULL;
1338                                         return -ENOMEM;
1339                                 }
1340                         }
1341                         break;
1342
1343                 case HCI_ACLDATA_PKT:
1344                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1345                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1346                                 scb->expect = __le16_to_cpu(h->dlen);
1347
1348                                 if (skb_tailroom(skb) < scb->expect) {
1349                                         kfree_skb(skb);
1350                                         hdev->reassembly[index] = NULL;
1351                                         return -ENOMEM;
1352                                 }
1353                         }
1354                         break;
1355
1356                 case HCI_SCODATA_PKT:
1357                         if (skb->len == HCI_SCO_HDR_SIZE) {
1358                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1359                                 scb->expect = h->dlen;
1360
1361                                 if (skb_tailroom(skb) < scb->expect) {
1362                                         kfree_skb(skb);
1363                                         hdev->reassembly[index] = NULL;
1364                                         return -ENOMEM;
1365                                 }
1366                         }
1367                         break;
1368                 }
1369
1370                 if (scb->expect == 0) {
1371                         /* Complete frame */
1372
1373                         bt_cb(skb)->pkt_type = type;
1374                         hci_recv_frame(skb);
1375
1376                         hdev->reassembly[index] = NULL;
1377                         return remain;
1378                 }
1379         }
1380
1381         return remain;
1382 }
1383
1384 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1385 {
1386         int rem = 0;
1387
1388         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1389                 return -EILSEQ;
1390
1391         while (count) {
1392                 rem = hci_reassembly(hdev, type, data, count,
1393                                                 type - 1, GFP_ATOMIC);
1394                 if (rem < 0)
1395                         return rem;
1396
1397                 data += (count - rem);
1398                 count = rem;
1399         };
1400
1401         return rem;
1402 }
1403 EXPORT_SYMBOL(hci_recv_fragment);
1404
1405 #define STREAM_REASSEMBLY 0
1406
1407 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1408 {
1409         int type;
1410         int rem = 0;
1411
1412         while (count) {
1413                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1414
1415                 if (!skb) {
1416                         struct { char type; } *pkt;
1417
1418                         /* Start of the frame */
1419                         pkt = data;
1420                         type = pkt->type;
1421
1422                         data++;
1423                         count--;
1424                 } else
1425                         type = bt_cb(skb)->pkt_type;
1426
1427                 rem = hci_reassembly(hdev, type, data,
1428                                         count, STREAM_REASSEMBLY, GFP_ATOMIC);
1429                 if (rem < 0)
1430                         return rem;
1431
1432                 data += (count - rem);
1433                 count = rem;
1434         };
1435
1436         return rem;
1437 }
1438 EXPORT_SYMBOL(hci_recv_stream_fragment);
1439
1440 /* ---- Interface to upper protocols ---- */
1441
1442 /* Register/Unregister protocols.
1443  * hci_task_lock is used to ensure that no tasks are running. */
1444 int hci_register_proto(struct hci_proto *hp)
1445 {
1446         int err = 0;
1447
1448         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1449
1450         if (hp->id >= HCI_MAX_PROTO)
1451                 return -EINVAL;
1452
1453         write_lock_bh(&hci_task_lock);
1454
1455         if (!hci_proto[hp->id])
1456                 hci_proto[hp->id] = hp;
1457         else
1458                 err = -EEXIST;
1459
1460         write_unlock_bh(&hci_task_lock);
1461
1462         return err;
1463 }
1464 EXPORT_SYMBOL(hci_register_proto);
1465
1466 int hci_unregister_proto(struct hci_proto *hp)
1467 {
1468         int err = 0;
1469
1470         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1471
1472         if (hp->id >= HCI_MAX_PROTO)
1473                 return -EINVAL;
1474
1475         write_lock_bh(&hci_task_lock);
1476
1477         if (hci_proto[hp->id])
1478                 hci_proto[hp->id] = NULL;
1479         else
1480                 err = -ENOENT;
1481
1482         write_unlock_bh(&hci_task_lock);
1483
1484         return err;
1485 }
1486 EXPORT_SYMBOL(hci_unregister_proto);
1487
1488 int hci_register_cb(struct hci_cb *cb)
1489 {
1490         BT_DBG("%p name %s", cb, cb->name);
1491
1492         write_lock_bh(&hci_cb_list_lock);
1493         list_add(&cb->list, &hci_cb_list);
1494         write_unlock_bh(&hci_cb_list_lock);
1495
1496         return 0;
1497 }
1498 EXPORT_SYMBOL(hci_register_cb);
1499
1500 int hci_unregister_cb(struct hci_cb *cb)
1501 {
1502         BT_DBG("%p name %s", cb, cb->name);
1503
1504         write_lock_bh(&hci_cb_list_lock);
1505         list_del(&cb->list);
1506         write_unlock_bh(&hci_cb_list_lock);
1507
1508         return 0;
1509 }
1510 EXPORT_SYMBOL(hci_unregister_cb);
1511
1512 static int hci_send_frame(struct sk_buff *skb)
1513 {
1514         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1515
1516         if (!hdev) {
1517                 kfree_skb(skb);
1518                 return -ENODEV;
1519         }
1520
1521         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1522
1523         if (atomic_read(&hdev->promisc)) {
1524                 /* Time stamp */
1525                 __net_timestamp(skb);
1526
1527                 hci_send_to_sock(hdev, skb, NULL);
1528         }
1529
1530         /* Get rid of skb owner, prior to sending to the driver. */
1531         skb_orphan(skb);
1532
1533         return hdev->send(skb);
1534 }
1535
1536 /* Send HCI command */
1537 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1538 {
1539         int len = HCI_COMMAND_HDR_SIZE + plen;
1540         struct hci_command_hdr *hdr;
1541         struct sk_buff *skb;
1542
1543         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1544
1545         skb = bt_skb_alloc(len, GFP_ATOMIC);
1546         if (!skb) {
1547                 BT_ERR("%s no memory for command", hdev->name);
1548                 return -ENOMEM;
1549         }
1550
1551         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1552         hdr->opcode = cpu_to_le16(opcode);
1553         hdr->plen   = plen;
1554
1555         if (plen)
1556                 memcpy(skb_put(skb, plen), param, plen);
1557
1558         BT_DBG("skb len %d", skb->len);
1559
1560         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1561         skb->dev = (void *) hdev;
1562
1563         if (test_bit(HCI_INIT, &hdev->flags))
1564                 hdev->init_last_cmd = opcode;
1565
1566         skb_queue_tail(&hdev->cmd_q, skb);
1567         tasklet_schedule(&hdev->cmd_task);
1568
1569         return 0;
1570 }
1571
1572 /* Get data from the previously sent command */
1573 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1574 {
1575         struct hci_command_hdr *hdr;
1576
1577         if (!hdev->sent_cmd)
1578                 return NULL;
1579
1580         hdr = (void *) hdev->sent_cmd->data;
1581
1582         if (hdr->opcode != cpu_to_le16(opcode))
1583                 return NULL;
1584
1585         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1586
1587         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1588 }
1589
1590 /* Send ACL data */
1591 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1592 {
1593         struct hci_acl_hdr *hdr;
1594         int len = skb->len;
1595
1596         skb_push(skb, HCI_ACL_HDR_SIZE);
1597         skb_reset_transport_header(skb);
1598         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1599         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1600         hdr->dlen   = cpu_to_le16(len);
1601 }
1602
1603 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1604 {
1605         struct hci_dev *hdev = conn->hdev;
1606         struct sk_buff *list;
1607
1608         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1609
1610         skb->dev = (void *) hdev;
1611         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1612         hci_add_acl_hdr(skb, conn->handle, flags);
1613
1614         list = skb_shinfo(skb)->frag_list;
1615         if (!list) {
1616                 /* Non fragmented */
1617                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1618
1619                 skb_queue_tail(&conn->data_q, skb);
1620         } else {
1621                 /* Fragmented */
1622                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1623
1624                 skb_shinfo(skb)->frag_list = NULL;
1625
1626                 /* Queue all fragments atomically */
1627                 spin_lock_bh(&conn->data_q.lock);
1628
1629                 __skb_queue_tail(&conn->data_q, skb);
1630
1631                 flags &= ~ACL_START;
1632                 flags |= ACL_CONT;
1633                 do {
1634                         skb = list; list = list->next;
1635
1636                         skb->dev = (void *) hdev;
1637                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1638                         hci_add_acl_hdr(skb, conn->handle, flags);
1639
1640                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1641
1642                         __skb_queue_tail(&conn->data_q, skb);
1643                 } while (list);
1644
1645                 spin_unlock_bh(&conn->data_q.lock);
1646         }
1647
1648         tasklet_schedule(&hdev->tx_task);
1649 }
1650 EXPORT_SYMBOL(hci_send_acl);
1651
1652 /* Send SCO data */
1653 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1654 {
1655         struct hci_dev *hdev = conn->hdev;
1656         struct hci_sco_hdr hdr;
1657
1658         BT_DBG("%s len %d", hdev->name, skb->len);
1659
1660         hdr.handle = cpu_to_le16(conn->handle);
1661         hdr.dlen   = skb->len;
1662
1663         skb_push(skb, HCI_SCO_HDR_SIZE);
1664         skb_reset_transport_header(skb);
1665         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1666
1667         skb->dev = (void *) hdev;
1668         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1669
1670         skb_queue_tail(&conn->data_q, skb);
1671         tasklet_schedule(&hdev->tx_task);
1672 }
1673 EXPORT_SYMBOL(hci_send_sco);
1674
1675 /* ---- HCI TX task (outgoing data) ---- */
1676
1677 /* HCI Connection scheduler */
1678 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1679 {
1680         struct hci_conn_hash *h = &hdev->conn_hash;
1681         struct hci_conn *conn = NULL;
1682         int num = 0, min = ~0;
1683         struct list_head *p;
1684
1685         /* We don't have to lock device here. Connections are always
1686          * added and removed with TX task disabled. */
1687         list_for_each(p, &h->list) {
1688                 struct hci_conn *c;
1689                 c = list_entry(p, struct hci_conn, list);
1690
1691                 if (c->type != type || skb_queue_empty(&c->data_q))
1692                         continue;
1693
1694                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1695                         continue;
1696
1697                 num++;
1698
1699                 if (c->sent < min) {
1700                         min  = c->sent;
1701                         conn = c;
1702                 }
1703         }
1704
1705         if (conn) {
1706                 int cnt, q;
1707
1708                 switch (conn->type) {
1709                 case ACL_LINK:
1710                         cnt = hdev->acl_cnt;
1711                         break;
1712                 case SCO_LINK:
1713                 case ESCO_LINK:
1714                         cnt = hdev->sco_cnt;
1715                         break;
1716                 case LE_LINK:
1717                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1718                         break;
1719                 default:
1720                         cnt = 0;
1721                         BT_ERR("Unknown link type");
1722                 }
1723
1724                 q = cnt / num;
1725                 *quote = q ? q : 1;
1726         } else
1727                 *quote = 0;
1728
1729         BT_DBG("conn %p quote %d", conn, *quote);
1730         return conn;
1731 }
1732
1733 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1734 {
1735         struct hci_conn_hash *h = &hdev->conn_hash;
1736         struct list_head *p;
1737         struct hci_conn  *c;
1738
1739         BT_ERR("%s link tx timeout", hdev->name);
1740
1741         /* Kill stalled connections */
1742         list_for_each(p, &h->list) {
1743                 c = list_entry(p, struct hci_conn, list);
1744                 if (c->type == type && c->sent) {
1745                         BT_ERR("%s killing stalled connection %s",
1746                                 hdev->name, batostr(&c->dst));
1747                         hci_acl_disconn(c, 0x13);
1748                 }
1749         }
1750 }
1751
1752 static inline void hci_sched_acl(struct hci_dev *hdev)
1753 {
1754         struct hci_conn *conn;
1755         struct sk_buff *skb;
1756         int quote;
1757
1758         BT_DBG("%s", hdev->name);
1759
1760         if (!test_bit(HCI_RAW, &hdev->flags)) {
1761                 /* ACL tx timeout must be longer than maximum
1762                  * link supervision timeout (40.9 seconds) */
1763                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1764                         hci_link_tx_to(hdev, ACL_LINK);
1765         }
1766
1767         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1768                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1769                         BT_DBG("skb %p len %d", skb, skb->len);
1770
1771                         hci_conn_enter_active_mode(conn);
1772
1773                         hci_send_frame(skb);
1774                         hdev->acl_last_tx = jiffies;
1775
1776                         hdev->acl_cnt--;
1777                         conn->sent++;
1778                 }
1779         }
1780 }
1781
1782 /* Schedule SCO */
1783 static inline void hci_sched_sco(struct hci_dev *hdev)
1784 {
1785         struct hci_conn *conn;
1786         struct sk_buff *skb;
1787         int quote;
1788
1789         BT_DBG("%s", hdev->name);
1790
1791         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1792                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1793                         BT_DBG("skb %p len %d", skb, skb->len);
1794                         hci_send_frame(skb);
1795
1796                         conn->sent++;
1797                         if (conn->sent == ~0)
1798                                 conn->sent = 0;
1799                 }
1800         }
1801 }
1802
1803 static inline void hci_sched_esco(struct hci_dev *hdev)
1804 {
1805         struct hci_conn *conn;
1806         struct sk_buff *skb;
1807         int quote;
1808
1809         BT_DBG("%s", hdev->name);
1810
1811         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1812                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1813                         BT_DBG("skb %p len %d", skb, skb->len);
1814                         hci_send_frame(skb);
1815
1816                         conn->sent++;
1817                         if (conn->sent == ~0)
1818                                 conn->sent = 0;
1819                 }
1820         }
1821 }
1822
1823 static inline void hci_sched_le(struct hci_dev *hdev)
1824 {
1825         struct hci_conn *conn;
1826         struct sk_buff *skb;
1827         int quote, cnt;
1828
1829         BT_DBG("%s", hdev->name);
1830
1831         if (!test_bit(HCI_RAW, &hdev->flags)) {
1832                 /* LE tx timeout must be longer than maximum
1833                  * link supervision timeout (40.9 seconds) */
1834                 if (!hdev->le_cnt && hdev->le_pkts &&
1835                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1836                         hci_link_tx_to(hdev, LE_LINK);
1837         }
1838
1839         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1840         while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1841                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1842                         BT_DBG("skb %p len %d", skb, skb->len);
1843
1844                         hci_send_frame(skb);
1845                         hdev->le_last_tx = jiffies;
1846
1847                         cnt--;
1848                         conn->sent++;
1849                 }
1850         }
1851         if (hdev->le_pkts)
1852                 hdev->le_cnt = cnt;
1853         else
1854                 hdev->acl_cnt = cnt;
1855 }
1856
1857 static void hci_tx_task(unsigned long arg)
1858 {
1859         struct hci_dev *hdev = (struct hci_dev *) arg;
1860         struct sk_buff *skb;
1861
1862         read_lock(&hci_task_lock);
1863
1864         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1865                 hdev->sco_cnt, hdev->le_cnt);
1866
1867         /* Schedule queues and send stuff to HCI driver */
1868
1869         hci_sched_acl(hdev);
1870
1871         hci_sched_sco(hdev);
1872
1873         hci_sched_esco(hdev);
1874
1875         hci_sched_le(hdev);
1876
1877         /* Send next queued raw (unknown type) packet */
1878         while ((skb = skb_dequeue(&hdev->raw_q)))
1879                 hci_send_frame(skb);
1880
1881         read_unlock(&hci_task_lock);
1882 }
1883
1884 /* ----- HCI RX task (incoming data proccessing) ----- */
1885
1886 /* ACL data packet */
1887 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1888 {
1889         struct hci_acl_hdr *hdr = (void *) skb->data;
1890         struct hci_conn *conn;
1891         __u16 handle, flags;
1892
1893         skb_pull(skb, HCI_ACL_HDR_SIZE);
1894
1895         handle = __le16_to_cpu(hdr->handle);
1896         flags  = hci_flags(handle);
1897         handle = hci_handle(handle);
1898
1899         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1900
1901         hdev->stat.acl_rx++;
1902
1903         hci_dev_lock(hdev);
1904         conn = hci_conn_hash_lookup_handle(hdev, handle);
1905         hci_dev_unlock(hdev);
1906
1907         if (conn) {
1908                 register struct hci_proto *hp;
1909
1910                 hci_conn_enter_active_mode(conn);
1911
1912                 /* Send to upper protocol */
1913                 hp = hci_proto[HCI_PROTO_L2CAP];
1914                 if (hp && hp->recv_acldata) {
1915                         hp->recv_acldata(conn, skb, flags);
1916                         return;
1917                 }
1918         } else {
1919                 BT_ERR("%s ACL packet for unknown connection handle %d",
1920                         hdev->name, handle);
1921         }
1922
1923         kfree_skb(skb);
1924 }
1925
1926 /* SCO data packet */
1927 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1928 {
1929         struct hci_sco_hdr *hdr = (void *) skb->data;
1930         struct hci_conn *conn;
1931         __u16 handle;
1932
1933         skb_pull(skb, HCI_SCO_HDR_SIZE);
1934
1935         handle = __le16_to_cpu(hdr->handle);
1936
1937         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1938
1939         hdev->stat.sco_rx++;
1940
1941         hci_dev_lock(hdev);
1942         conn = hci_conn_hash_lookup_handle(hdev, handle);
1943         hci_dev_unlock(hdev);
1944
1945         if (conn) {
1946                 register struct hci_proto *hp;
1947
1948                 /* Send to upper protocol */
1949                 hp = hci_proto[HCI_PROTO_SCO];
1950                 if (hp && hp->recv_scodata) {
1951                         hp->recv_scodata(conn, skb);
1952                         return;
1953                 }
1954         } else {
1955                 BT_ERR("%s SCO packet for unknown connection handle %d",
1956                         hdev->name, handle);
1957         }
1958
1959         kfree_skb(skb);
1960 }
1961
1962 static void hci_rx_task(unsigned long arg)
1963 {
1964         struct hci_dev *hdev = (struct hci_dev *) arg;
1965         struct sk_buff *skb;
1966
1967         BT_DBG("%s", hdev->name);
1968
1969         read_lock(&hci_task_lock);
1970
1971         while ((skb = skb_dequeue(&hdev->rx_q))) {
1972                 if (atomic_read(&hdev->promisc)) {
1973                         /* Send copy to the sockets */
1974                         hci_send_to_sock(hdev, skb, NULL);
1975                 }
1976
1977                 if (test_bit(HCI_RAW, &hdev->flags)) {
1978                         kfree_skb(skb);
1979                         continue;
1980                 }
1981
1982                 if (test_bit(HCI_INIT, &hdev->flags)) {
1983                         /* Don't process data packets in this states. */
1984                         switch (bt_cb(skb)->pkt_type) {
1985                         case HCI_ACLDATA_PKT:
1986                         case HCI_SCODATA_PKT:
1987                                 kfree_skb(skb);
1988                                 continue;
1989                         }
1990                 }
1991
1992                 /* Process frame */
1993                 switch (bt_cb(skb)->pkt_type) {
1994                 case HCI_EVENT_PKT:
1995                         hci_event_packet(hdev, skb);
1996                         break;
1997
1998                 case HCI_ACLDATA_PKT:
1999                         BT_DBG("%s ACL data packet", hdev->name);
2000                         hci_acldata_packet(hdev, skb);
2001                         break;
2002
2003                 case HCI_SCODATA_PKT:
2004                         BT_DBG("%s SCO data packet", hdev->name);
2005                         hci_scodata_packet(hdev, skb);
2006                         break;
2007
2008                 default:
2009                         kfree_skb(skb);
2010                         break;
2011                 }
2012         }
2013
2014         read_unlock(&hci_task_lock);
2015 }
2016
2017 static void hci_cmd_task(unsigned long arg)
2018 {
2019         struct hci_dev *hdev = (struct hci_dev *) arg;
2020         struct sk_buff *skb;
2021
2022         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2023
2024         /* Send queued commands */
2025         if (atomic_read(&hdev->cmd_cnt)) {
2026                 skb = skb_dequeue(&hdev->cmd_q);
2027                 if (!skb)
2028                         return;
2029
2030                 kfree_skb(hdev->sent_cmd);
2031
2032                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2033                 if (hdev->sent_cmd) {
2034                         atomic_dec(&hdev->cmd_cnt);
2035                         hci_send_frame(skb);
2036                         mod_timer(&hdev->cmd_timer,
2037                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2038                 } else {
2039                         skb_queue_head(&hdev->cmd_q, skb);
2040                         tasklet_schedule(&hdev->cmd_task);
2041                 }
2042         }
2043 }