upload tizen1.0 source
[kernel/linux-2.6.36.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
45
46 #include <asm/system.h>
47 #include <asm/uaccess.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52
53 static void hci_cmd_task(unsigned long arg);
54 static void hci_rx_task(unsigned long arg);
55 static void hci_tx_task(unsigned long arg);
56 static void hci_notify(struct hci_dev *hdev, int event);
57
58 static DEFINE_RWLOCK(hci_task_lock);
59
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
63
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
67
68 /* HCI protocols */
69 #define HCI_MAX_PROTO   2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79         return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84         return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89         atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, int result)
95 {
96         BT_DBG("%s result 0x%2.2x", hdev->name, result);
97
98         if (hdev->req_status == HCI_REQ_PEND) {
99                 hdev->req_result = result;
100                 hdev->req_status = HCI_REQ_DONE;
101                 wake_up_interruptible(&hdev->req_wait_q);
102         }
103 }
104
105 static void hci_req_cancel(struct hci_dev *hdev, int err)
106 {
107         BT_DBG("%s err 0x%2.2x", hdev->name, err);
108
109         if (hdev->req_status == HCI_REQ_PEND) {
110                 hdev->req_result = err;
111                 hdev->req_status = HCI_REQ_CANCELED;
112                 wake_up_interruptible(&hdev->req_wait_q);
113         }
114 }
115
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118                                 unsigned long opt, __u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         int err = 0;
122
123         BT_DBG("%s start", hdev->name);
124
125         hdev->req_status = HCI_REQ_PEND;
126
127         add_wait_queue(&hdev->req_wait_q, &wait);
128         set_current_state(TASK_INTERRUPTIBLE);
129
130         req(hdev, opt);
131         schedule_timeout(timeout);
132
133         remove_wait_queue(&hdev->req_wait_q, &wait);
134
135         if (signal_pending(current))
136                 return -EINTR;
137
138         switch (hdev->req_status) {
139         case HCI_REQ_DONE:
140                 err = -bt_err(hdev->req_result);
141                 break;
142
143         case HCI_REQ_CANCELED:
144                 err = -hdev->req_result;
145                 break;
146
147         default:
148                 err = -ETIMEDOUT;
149                 break;
150         }
151
152         hdev->req_status = hdev->req_result = 0;
153
154         BT_DBG("%s end: err %d", hdev->name, err);
155
156         return err;
157 }
158
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160                                 unsigned long opt, __u32 timeout)
161 {
162         int ret;
163
164         if (!test_bit(HCI_UP, &hdev->flags))
165                 return -ENETDOWN;
166
167         /* Serialize all requests */
168         hci_req_lock(hdev);
169         ret = __hci_request(hdev, req, opt, timeout);
170         hci_req_unlock(hdev);
171
172         return ret;
173 }
174
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 {
177         BT_DBG("%s %ld", hdev->name, opt);
178
179         /* Reset device */
180         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
181 }
182
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184 {
185         struct sk_buff *skb;
186         __le16 param;
187         __u8 flt_type;
188
189         BT_DBG("%s %ld", hdev->name, opt);
190
191         /* Driver initialization */
192
193         /* Special commands */
194         while ((skb = skb_dequeue(&hdev->driver_init))) {
195                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196                 skb->dev = (void *) hdev;
197
198                 skb_queue_tail(&hdev->cmd_q, skb);
199                 tasklet_schedule(&hdev->cmd_task);
200         }
201         skb_queue_purge(&hdev->driver_init);
202
203         /* Mandatory initialization */
204
205         /* Reset */
206         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
207                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208
209         /* Read Local Supported Features */
210         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
211
212         /* Read Local Version */
213         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
217
218 #if 0
219         /* Host buffer size */
220         {
221                 struct hci_cp_host_buffer_size cp;
222                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
223                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
224                 cp.acl_max_pkt = cpu_to_le16(0xffff);
225                 cp.sco_max_pkt = cpu_to_le16(0xffff);
226                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
227         }
228 #endif
229
230         /* Read BD Address */
231         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
232
233         /* Read Class of Device */
234         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
235
236         /* Read Local Name */
237         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
238
239         /* Read Voice Setting */
240         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
241
242         /* Optional initialization */
243
244         /* Clear Event Filters */
245         flt_type = HCI_FLT_CLEAR_ALL;
246         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
247
248         /* Page timeout ~20 secs */
249         param = cpu_to_le16(0x8000);
250         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
251
252         /* Connection accept timeout ~20 secs */
253         param = cpu_to_le16(0x7d00);
254         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
255 }
256
257 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258 {
259         __u8 scan = opt;
260
261         BT_DBG("%s %x", hdev->name, scan);
262
263         /* Inquiry and Page scans */
264         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
265 }
266
267 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268 {
269         __u8 auth = opt;
270
271         BT_DBG("%s %x", hdev->name, auth);
272
273         /* Authentication */
274         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
275 }
276
277 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278 {
279         __u8 encrypt = opt;
280
281         BT_DBG("%s %x", hdev->name, encrypt);
282
283         /* Encryption */
284         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
285 }
286
287 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
288 {
289         __le16 policy = cpu_to_le16(opt);
290
291         BT_DBG("%s %x", hdev->name, policy);
292
293         /* Default link policy */
294         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
295 }
296
297 /* Get HCI device by index.
298  * Device is held on return. */
299 struct hci_dev *hci_dev_get(int index)
300 {
301         struct hci_dev *hdev = NULL;
302         struct list_head *p;
303
304         BT_DBG("%d", index);
305
306         if (index < 0)
307                 return NULL;
308
309         read_lock(&hci_dev_list_lock);
310         list_for_each(p, &hci_dev_list) {
311                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312                 if (d->id == index) {
313                         hdev = hci_dev_hold(d);
314                         break;
315                 }
316         }
317         read_unlock(&hci_dev_list_lock);
318         return hdev;
319 }
320
321 /* ---- Inquiry support ---- */
322 static void inquiry_cache_flush(struct hci_dev *hdev)
323 {
324         struct inquiry_cache *cache = &hdev->inq_cache;
325         struct inquiry_entry *next  = cache->list, *e;
326
327         BT_DBG("cache %p", cache);
328
329         cache->list = NULL;
330         while ((e = next)) {
331                 next = e->next;
332                 kfree(e);
333         }
334 }
335
336 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
337 {
338         struct inquiry_cache *cache = &hdev->inq_cache;
339         struct inquiry_entry *e;
340
341         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
342
343         for (e = cache->list; e; e = e->next)
344                 if (!bacmp(&e->data.bdaddr, bdaddr))
345                         break;
346         return e;
347 }
348
349 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
350 {
351         struct inquiry_cache *cache = &hdev->inq_cache;
352         struct inquiry_entry *e;
353
354         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
355
356         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
357                 /* Entry not in the cache. Add new one. */
358                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
359                         return;
360                 e->next     = cache->list;
361                 cache->list = e;
362         }
363
364         memcpy(&e->data, data, sizeof(*data));
365         e->timestamp = jiffies;
366         cache->timestamp = jiffies;
367 }
368
369 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
370 {
371         struct inquiry_cache *cache = &hdev->inq_cache;
372         struct inquiry_info *info = (struct inquiry_info *) buf;
373         struct inquiry_entry *e;
374         int copied = 0;
375
376         for (e = cache->list; e && copied < num; e = e->next, copied++) {
377                 struct inquiry_data *data = &e->data;
378                 bacpy(&info->bdaddr, &data->bdaddr);
379                 info->pscan_rep_mode    = data->pscan_rep_mode;
380                 info->pscan_period_mode = data->pscan_period_mode;
381                 info->pscan_mode        = data->pscan_mode;
382                 memcpy(info->dev_class, data->dev_class, 3);
383                 info->clock_offset      = data->clock_offset;
384                 info++;
385         }
386
387         BT_DBG("cache %p, copied %d", cache, copied);
388         return copied;
389 }
390
391 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
392 {
393         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
394         struct hci_cp_inquiry cp;
395
396         BT_DBG("%s", hdev->name);
397
398         if (test_bit(HCI_INQUIRY, &hdev->flags))
399                 return;
400
401         /* Start Inquiry */
402         memcpy(&cp.lap, &ir->lap, 3);
403         cp.length  = ir->length;
404         cp.num_rsp = ir->num_rsp;
405         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
406 }
407
408 int hci_inquiry(void __user *arg)
409 {
410         __u8 __user *ptr = arg;
411         struct hci_inquiry_req ir;
412         struct hci_dev *hdev;
413         int err = 0, do_inquiry = 0, max_rsp;
414         long timeo;
415         __u8 *buf;
416
417         if (copy_from_user(&ir, ptr, sizeof(ir)))
418                 return -EFAULT;
419
420         if (!(hdev = hci_dev_get(ir.dev_id)))
421                 return -ENODEV;
422
423         hci_dev_lock_bh(hdev);
424         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
425                                         inquiry_cache_empty(hdev) ||
426                                         ir.flags & IREQ_CACHE_FLUSH) {
427                 inquiry_cache_flush(hdev);
428                 do_inquiry = 1;
429         }
430         hci_dev_unlock_bh(hdev);
431
432         timeo = ir.length * msecs_to_jiffies(2000);
433         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
434                 goto done;
435
436         /* for unlimited number of responses we will use buffer with 255 entries */
437         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
438
439         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440          * copy it to the user space.
441          */
442         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
443                 err = -ENOMEM;
444                 goto done;
445         }
446
447         hci_dev_lock_bh(hdev);
448         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
449         hci_dev_unlock_bh(hdev);
450
451         BT_DBG("num_rsp %d", ir.num_rsp);
452
453         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
454                 ptr += sizeof(ir);
455                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
456                                         ir.num_rsp))
457                         err = -EFAULT;
458         } else
459                 err = -EFAULT;
460
461         kfree(buf);
462
463 done:
464         hci_dev_put(hdev);
465         return err;
466 }
467
468 /* ---- HCI ioctl helpers ---- */
469
470 int hci_dev_open(__u16 dev)
471 {
472         struct hci_dev *hdev;
473         int ret = 0;
474
475         if (!(hdev = hci_dev_get(dev)))
476                 return -ENODEV;
477
478         BT_DBG("%s %p", hdev->name, hdev);
479
480         hci_req_lock(hdev);
481
482         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
483                 ret = -ERFKILL;
484                 goto done;
485         }
486
487         if (test_bit(HCI_UP, &hdev->flags)) {
488                 ret = -EALREADY;
489                 goto done;
490         }
491
492         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
493                 set_bit(HCI_RAW, &hdev->flags);
494
495         /* Treat all non BR/EDR controllers as raw devices for now */
496         if (hdev->dev_type != HCI_BREDR)
497                 set_bit(HCI_RAW, &hdev->flags);
498
499         if (hdev->open(hdev)) {
500                 ret = -EIO;
501                 goto done;
502         }
503
504         if (!test_bit(HCI_RAW, &hdev->flags)) {
505                 atomic_set(&hdev->cmd_cnt, 1);
506                 set_bit(HCI_INIT, &hdev->flags);
507
508                 //__hci_request(hdev, hci_reset_req, 0, HZ);
509                 ret = __hci_request(hdev, hci_init_req, 0,
510                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
511
512                 clear_bit(HCI_INIT, &hdev->flags);
513         }
514
515         if (!ret) {
516                 hci_dev_hold(hdev);
517                 set_bit(HCI_UP, &hdev->flags);
518                 hci_notify(hdev, HCI_DEV_UP);
519         } else {
520                 /* Init failed, cleanup */
521                 tasklet_kill(&hdev->rx_task);
522                 tasklet_kill(&hdev->tx_task);
523                 tasklet_kill(&hdev->cmd_task);
524
525                 skb_queue_purge(&hdev->cmd_q);
526                 skb_queue_purge(&hdev->rx_q);
527
528                 if (hdev->flush)
529                         hdev->flush(hdev);
530
531                 if (hdev->sent_cmd) {
532                         kfree_skb(hdev->sent_cmd);
533                         hdev->sent_cmd = NULL;
534                 }
535
536                 hdev->close(hdev);
537                 hdev->flags = 0;
538         }
539
540 done:
541         hci_req_unlock(hdev);
542         hci_dev_put(hdev);
543         return ret;
544 }
545
546 static int hci_dev_do_close(struct hci_dev *hdev)
547 {
548         BT_DBG("%s %p", hdev->name, hdev);
549
550         hci_req_cancel(hdev, ENODEV);
551         hci_req_lock(hdev);
552
553         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
554                 hci_req_unlock(hdev);
555                 return 0;
556         }
557
558         /* Kill RX and TX tasks */
559         tasklet_kill(&hdev->rx_task);
560         tasklet_kill(&hdev->tx_task);
561
562         hci_dev_lock_bh(hdev);
563         inquiry_cache_flush(hdev);
564         hci_conn_hash_flush(hdev);
565         hci_dev_unlock_bh(hdev);
566
567         hci_notify(hdev, HCI_DEV_DOWN);
568
569         if (hdev->flush)
570                 hdev->flush(hdev);
571
572         /* Reset device */
573         skb_queue_purge(&hdev->cmd_q);
574         atomic_set(&hdev->cmd_cnt, 1);
575         if (!test_bit(HCI_RAW, &hdev->flags)) {
576                 set_bit(HCI_INIT, &hdev->flags);
577                 __hci_request(hdev, hci_reset_req, 0,
578                                         msecs_to_jiffies(250));
579                 clear_bit(HCI_INIT, &hdev->flags);
580         }
581
582         /* Kill cmd task */
583         tasklet_kill(&hdev->cmd_task);
584
585         /* Drop queues */
586         skb_queue_purge(&hdev->rx_q);
587         skb_queue_purge(&hdev->cmd_q);
588         skb_queue_purge(&hdev->raw_q);
589
590         /* Drop last sent command */
591         if (hdev->sent_cmd) {
592                 kfree_skb(hdev->sent_cmd);
593                 hdev->sent_cmd = NULL;
594         }
595
596         /* After this point our queues are empty
597          * and no tasks are scheduled. */
598         hdev->close(hdev);
599
600         /* Clear flags */
601         hdev->flags = 0;
602
603         hci_req_unlock(hdev);
604
605         hci_dev_put(hdev);
606         return 0;
607 }
608
609 int hci_dev_close(__u16 dev)
610 {
611         struct hci_dev *hdev;
612         int err;
613
614         if (!(hdev = hci_dev_get(dev)))
615                 return -ENODEV;
616         err = hci_dev_do_close(hdev);
617         hci_dev_put(hdev);
618         return err;
619 }
620
621 int hci_dev_reset(__u16 dev)
622 {
623         struct hci_dev *hdev;
624         int ret = 0;
625
626         if (!(hdev = hci_dev_get(dev)))
627                 return -ENODEV;
628
629         hci_req_lock(hdev);
630         tasklet_disable(&hdev->tx_task);
631
632         if (!test_bit(HCI_UP, &hdev->flags))
633                 goto done;
634
635         /* Drop queues */
636         skb_queue_purge(&hdev->rx_q);
637         skb_queue_purge(&hdev->cmd_q);
638
639         hci_dev_lock_bh(hdev);
640         inquiry_cache_flush(hdev);
641         hci_conn_hash_flush(hdev);
642         hci_dev_unlock_bh(hdev);
643
644         if (hdev->flush)
645                 hdev->flush(hdev);
646
647         atomic_set(&hdev->cmd_cnt, 1);
648         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
649
650         if (!test_bit(HCI_RAW, &hdev->flags))
651                 ret = __hci_request(hdev, hci_reset_req, 0,
652                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
653
654 done:
655         tasklet_enable(&hdev->tx_task);
656         hci_req_unlock(hdev);
657         hci_dev_put(hdev);
658         return ret;
659 }
660
661 int hci_dev_reset_stat(__u16 dev)
662 {
663         struct hci_dev *hdev;
664         int ret = 0;
665
666         if (!(hdev = hci_dev_get(dev)))
667                 return -ENODEV;
668
669         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
670
671         hci_dev_put(hdev);
672
673         return ret;
674 }
675
676 int hci_dev_cmd(unsigned int cmd, void __user *arg)
677 {
678         struct hci_dev *hdev;
679         struct hci_dev_req dr;
680         int err = 0;
681
682         if (copy_from_user(&dr, arg, sizeof(dr)))
683                 return -EFAULT;
684
685         if (!(hdev = hci_dev_get(dr.dev_id)))
686                 return -ENODEV;
687
688         switch (cmd) {
689         case HCISETAUTH:
690                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
691                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
692                 break;
693
694         case HCISETENCRYPT:
695                 if (!lmp_encrypt_capable(hdev)) {
696                         err = -EOPNOTSUPP;
697                         break;
698                 }
699
700                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
701                         /* Auth must be enabled first */
702                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
703                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
704                         if (err)
705                                 break;
706                 }
707
708                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
709                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
710                 break;
711
712         case HCISETSCAN:
713                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
714                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
715                 break;
716
717         case HCISETLINKPOL:
718                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
719                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
720                 break;
721
722         case HCISETLINKMODE:
723                 hdev->link_mode = ((__u16) dr.dev_opt) &
724                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
725                 break;
726
727         case HCISETPTYPE:
728                 hdev->pkt_type = (__u16) dr.dev_opt;
729                 break;
730
731         case HCISETACLMTU:
732                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
733                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
734                 break;
735
736         case HCISETSCOMTU:
737                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
738                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
739                 break;
740
741         default:
742                 err = -EINVAL;
743                 break;
744         }
745
746         hci_dev_put(hdev);
747         return err;
748 }
749
750 int hci_get_dev_list(void __user *arg)
751 {
752         struct hci_dev_list_req *dl;
753         struct hci_dev_req *dr;
754         struct list_head *p;
755         int n = 0, size, err;
756         __u16 dev_num;
757
758         if (get_user(dev_num, (__u16 __user *) arg))
759                 return -EFAULT;
760
761         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
762                 return -EINVAL;
763
764         size = sizeof(*dl) + dev_num * sizeof(*dr);
765
766         if (!(dl = kzalloc(size, GFP_KERNEL)))
767                 return -ENOMEM;
768
769         dr = dl->dev_req;
770
771         read_lock_bh(&hci_dev_list_lock);
772         list_for_each(p, &hci_dev_list) {
773                 struct hci_dev *hdev;
774                 hdev = list_entry(p, struct hci_dev, list);
775                 (dr + n)->dev_id  = hdev->id;
776                 (dr + n)->dev_opt = hdev->flags;
777                 if (++n >= dev_num)
778                         break;
779         }
780         read_unlock_bh(&hci_dev_list_lock);
781
782         dl->dev_num = n;
783         size = sizeof(*dl) + n * sizeof(*dr);
784
785         err = copy_to_user(arg, dl, size);
786         kfree(dl);
787
788         return err ? -EFAULT : 0;
789 }
790
791 int hci_get_dev_info(void __user *arg)
792 {
793         struct hci_dev *hdev;
794         struct hci_dev_info di;
795         int err = 0;
796
797         if (copy_from_user(&di, arg, sizeof(di)))
798                 return -EFAULT;
799
800         if (!(hdev = hci_dev_get(di.dev_id)))
801                 return -ENODEV;
802
803         strcpy(di.name, hdev->name);
804         di.bdaddr   = hdev->bdaddr;
805         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
806         di.flags    = hdev->flags;
807         di.pkt_type = hdev->pkt_type;
808         di.acl_mtu  = hdev->acl_mtu;
809         di.acl_pkts = hdev->acl_pkts;
810         di.sco_mtu  = hdev->sco_mtu;
811         di.sco_pkts = hdev->sco_pkts;
812         di.link_policy = hdev->link_policy;
813         di.link_mode   = hdev->link_mode;
814
815         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
816         memcpy(&di.features, &hdev->features, sizeof(di.features));
817
818         if (copy_to_user(arg, &di, sizeof(di)))
819                 err = -EFAULT;
820
821         hci_dev_put(hdev);
822
823         return err;
824 }
825
826 /* ---- Interface to HCI drivers ---- */
827
828 static int hci_rfkill_set_block(void *data, bool blocked)
829 {
830         struct hci_dev *hdev = data;
831
832         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
833
834         if (!blocked)
835                 return 0;
836
837         hci_dev_do_close(hdev);
838
839         return 0;
840 }
841
842 static const struct rfkill_ops hci_rfkill_ops = {
843         .set_block = hci_rfkill_set_block,
844 };
845
846 /* Alloc HCI device */
847 struct hci_dev *hci_alloc_dev(void)
848 {
849         struct hci_dev *hdev;
850
851         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
852         if (!hdev)
853                 return NULL;
854
855         skb_queue_head_init(&hdev->driver_init);
856
857         return hdev;
858 }
859 EXPORT_SYMBOL(hci_alloc_dev);
860
861 /* Free HCI device */
862 void hci_free_dev(struct hci_dev *hdev)
863 {
864         skb_queue_purge(&hdev->driver_init);
865
866         /* will free via device release */
867         put_device(&hdev->dev);
868 }
869 EXPORT_SYMBOL(hci_free_dev);
870
871 /* Register HCI device */
872 int hci_register_dev(struct hci_dev *hdev)
873 {
874         struct list_head *head = &hci_dev_list, *p;
875         int i, id = 0;
876
877         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
878                                                 hdev->bus, hdev->owner);
879
880         if (!hdev->open || !hdev->close || !hdev->destruct)
881                 return -EINVAL;
882
883         write_lock_bh(&hci_dev_list_lock);
884
885         /* Find first available device id */
886         list_for_each(p, &hci_dev_list) {
887                 if (list_entry(p, struct hci_dev, list)->id != id)
888                         break;
889                 head = p; id++;
890         }
891
892         sprintf(hdev->name, "hci%d", id);
893         hdev->id = id;
894         list_add(&hdev->list, head);
895
896         atomic_set(&hdev->refcnt, 1);
897         spin_lock_init(&hdev->lock);
898
899         hdev->flags = 0;
900         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
901         hdev->esco_type = (ESCO_HV1);
902         hdev->link_mode = (HCI_LM_ACCEPT);
903
904         hdev->idle_timeout = 0;
905         hdev->sniff_max_interval = 800;
906         hdev->sniff_min_interval = 80;
907
908         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
909         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
910         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
911
912         skb_queue_head_init(&hdev->rx_q);
913         skb_queue_head_init(&hdev->cmd_q);
914         skb_queue_head_init(&hdev->raw_q);
915
916         for (i = 0; i < NUM_REASSEMBLY; i++)
917                 hdev->reassembly[i] = NULL;
918
919         init_waitqueue_head(&hdev->req_wait_q);
920         mutex_init(&hdev->req_lock);
921
922         inquiry_cache_init(hdev);
923
924         hci_conn_hash_init(hdev);
925
926         INIT_LIST_HEAD(&hdev->blacklist);
927
928         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
929
930         atomic_set(&hdev->promisc, 0);
931
932         write_unlock_bh(&hci_dev_list_lock);
933
934         hdev->workqueue = create_singlethread_workqueue(hdev->name);
935         if (!hdev->workqueue)
936                 goto nomem;
937
938         hci_register_sysfs(hdev);
939
940         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
941                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
942         if (hdev->rfkill) {
943                 if (rfkill_register(hdev->rfkill) < 0) {
944                         rfkill_destroy(hdev->rfkill);
945                         hdev->rfkill = NULL;
946                 }
947         }
948
949         hci_notify(hdev, HCI_DEV_REG);
950
951         return id;
952
953 nomem:
954         write_lock_bh(&hci_dev_list_lock);
955         list_del(&hdev->list);
956         write_unlock_bh(&hci_dev_list_lock);
957
958         return -ENOMEM;
959 }
960 EXPORT_SYMBOL(hci_register_dev);
961
962 /* Unregister HCI device */
963 int hci_unregister_dev(struct hci_dev *hdev)
964 {
965         int i;
966
967         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
968
969         write_lock_bh(&hci_dev_list_lock);
970         list_del(&hdev->list);
971         write_unlock_bh(&hci_dev_list_lock);
972
973         hci_dev_do_close(hdev);
974
975         for (i = 0; i < NUM_REASSEMBLY; i++)
976                 kfree_skb(hdev->reassembly[i]);
977
978         hci_notify(hdev, HCI_DEV_UNREG);
979
980         if (hdev->rfkill) {
981                 rfkill_unregister(hdev->rfkill);
982                 rfkill_destroy(hdev->rfkill);
983         }
984
985         hci_unregister_sysfs(hdev);
986
987         destroy_workqueue(hdev->workqueue);
988
989 #ifdef FEATURE_DELAYED_HCI_UNREGISTER
990         hdev->workqueue =  NULL;
991 #endif
992         __hci_dev_put(hdev);
993
994         return 0;
995 }
996 EXPORT_SYMBOL(hci_unregister_dev);
997
998 /* Suspend HCI device */
999 int hci_suspend_dev(struct hci_dev *hdev)
1000 {
1001         hci_notify(hdev, HCI_DEV_SUSPEND);
1002         return 0;
1003 }
1004 EXPORT_SYMBOL(hci_suspend_dev);
1005
1006 /* Resume HCI device */
1007 int hci_resume_dev(struct hci_dev *hdev)
1008 {
1009         hci_notify(hdev, HCI_DEV_RESUME);
1010         return 0;
1011 }
1012 EXPORT_SYMBOL(hci_resume_dev);
1013
1014 /* Receive frame from HCI drivers */
1015 int hci_recv_frame(struct sk_buff *skb)
1016 {
1017         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1018         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1019                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1020                 kfree_skb(skb);
1021                 return -ENXIO;
1022         }
1023
1024         /* Incomming skb */
1025         bt_cb(skb)->incoming = 1;
1026
1027         /* Time stamp */
1028         __net_timestamp(skb);
1029
1030         /* Queue frame for rx task */
1031         skb_queue_tail(&hdev->rx_q, skb);
1032         tasklet_schedule(&hdev->rx_task);
1033
1034         return 0;
1035 }
1036 EXPORT_SYMBOL(hci_recv_frame);
1037
1038 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1039                           int count, __u8 index, gfp_t gfp_mask)
1040 {
1041         int len = 0;
1042         int hlen = 0;
1043         int remain = count;
1044         struct sk_buff *skb;
1045         struct bt_skb_cb *scb;
1046
1047         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1048                                 index >= NUM_REASSEMBLY)
1049                 return -EILSEQ;
1050
1051         skb = hdev->reassembly[index];
1052
1053         if (!skb) {
1054                 switch (type) {
1055                 case HCI_ACLDATA_PKT:
1056                         len = HCI_MAX_FRAME_SIZE;
1057                         hlen = HCI_ACL_HDR_SIZE;
1058                         break;
1059                 case HCI_EVENT_PKT:
1060                         len = HCI_MAX_EVENT_SIZE;
1061                         hlen = HCI_EVENT_HDR_SIZE;
1062                         break;
1063                 case HCI_SCODATA_PKT:
1064                         len = HCI_MAX_SCO_SIZE;
1065                         hlen = HCI_SCO_HDR_SIZE;
1066                         break;
1067                 }
1068
1069                 skb = bt_skb_alloc(len, gfp_mask);
1070                 if (!skb)
1071                         return -ENOMEM;
1072
1073                 scb = (void *) skb->cb;
1074                 scb->expect = hlen;
1075                 scb->pkt_type = type;
1076
1077                 skb->dev = (void *) hdev;
1078                 hdev->reassembly[index] = skb;
1079         }
1080
1081         while (count) {
1082                 scb = (void *) skb->cb;
1083                 len = min(scb->expect, (__u16)count);
1084
1085                 memcpy(skb_put(skb, len), data, len);
1086
1087                 count -= len;
1088                 data += len;
1089                 scb->expect -= len;
1090                 remain = count;
1091
1092                 switch (type) {
1093                 case HCI_EVENT_PKT:
1094                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1095                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1096                                 scb->expect = h->plen;
1097
1098                                 if (skb_tailroom(skb) < scb->expect) {
1099                                         kfree_skb(skb);
1100                                         hdev->reassembly[index] = NULL;
1101                                         return -ENOMEM;
1102                                 }
1103                         }
1104                         break;
1105
1106                 case HCI_ACLDATA_PKT:
1107                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1108                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1109                                 scb->expect = __le16_to_cpu(h->dlen);
1110
1111                                 if (skb_tailroom(skb) < scb->expect) {
1112                                         kfree_skb(skb);
1113                                         hdev->reassembly[index] = NULL;
1114                                         return -ENOMEM;
1115                                 }
1116                         }
1117                         break;
1118
1119                 case HCI_SCODATA_PKT:
1120                         if (skb->len == HCI_SCO_HDR_SIZE) {
1121                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1122                                 scb->expect = h->dlen;
1123
1124                                 if (skb_tailroom(skb) < scb->expect) {
1125                                         kfree_skb(skb);
1126                                         hdev->reassembly[index] = NULL;
1127                                         return -ENOMEM;
1128                                 }
1129                         }
1130                         break;
1131                 }
1132
1133                 if (scb->expect == 0) {
1134                         /* Complete frame */
1135
1136                         bt_cb(skb)->pkt_type = type;
1137                         hci_recv_frame(skb);
1138
1139                         hdev->reassembly[index] = NULL;
1140                         return remain;
1141                 }
1142         }
1143
1144         return remain;
1145 }
1146
1147 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1148 {
1149         int rem = 0;
1150
1151         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1152                 return -EILSEQ;
1153
1154         while (count) {
1155                 rem = hci_reassembly(hdev, type, data, count,
1156                                                 type - 1, GFP_ATOMIC);
1157                 if (rem < 0)
1158                         return rem;
1159
1160                 data += (count - rem);
1161                 count = rem;
1162         };
1163
1164         return rem;
1165 }
1166 EXPORT_SYMBOL(hci_recv_fragment);
1167
1168 #define STREAM_REASSEMBLY 0
1169
1170 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1171 {
1172         int type;
1173         int rem = 0;
1174
1175         while (count) {
1176                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1177
1178                 if (!skb) {
1179                         struct { char type; } *pkt;
1180
1181                         /* Start of the frame */
1182                         pkt = data;
1183                         type = pkt->type;
1184
1185                         data++;
1186                         count--;
1187                 } else
1188                         type = bt_cb(skb)->pkt_type;
1189
1190                 rem = hci_reassembly(hdev, type, data,
1191                                         count, STREAM_REASSEMBLY, GFP_ATOMIC);
1192                 if (rem < 0)
1193                         return rem;
1194
1195                 data += (count - rem);
1196                 count = rem;
1197         };
1198
1199         return rem;
1200 }
1201 EXPORT_SYMBOL(hci_recv_stream_fragment);
1202
1203 /* ---- Interface to upper protocols ---- */
1204
1205 /* Register/Unregister protocols.
1206  * hci_task_lock is used to ensure that no tasks are running. */
1207 int hci_register_proto(struct hci_proto *hp)
1208 {
1209         int err = 0;
1210
1211         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1212
1213         if (hp->id >= HCI_MAX_PROTO)
1214                 return -EINVAL;
1215
1216         write_lock_bh(&hci_task_lock);
1217
1218         if (!hci_proto[hp->id])
1219                 hci_proto[hp->id] = hp;
1220         else
1221                 err = -EEXIST;
1222
1223         write_unlock_bh(&hci_task_lock);
1224
1225         return err;
1226 }
1227 EXPORT_SYMBOL(hci_register_proto);
1228
1229 int hci_unregister_proto(struct hci_proto *hp)
1230 {
1231         int err = 0;
1232
1233         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1234
1235         if (hp->id >= HCI_MAX_PROTO)
1236                 return -EINVAL;
1237
1238         write_lock_bh(&hci_task_lock);
1239
1240         if (hci_proto[hp->id])
1241                 hci_proto[hp->id] = NULL;
1242         else
1243                 err = -ENOENT;
1244
1245         write_unlock_bh(&hci_task_lock);
1246
1247         return err;
1248 }
1249 EXPORT_SYMBOL(hci_unregister_proto);
1250
1251 int hci_register_cb(struct hci_cb *cb)
1252 {
1253         BT_DBG("%p name %s", cb, cb->name);
1254
1255         write_lock_bh(&hci_cb_list_lock);
1256         list_add(&cb->list, &hci_cb_list);
1257         write_unlock_bh(&hci_cb_list_lock);
1258
1259         return 0;
1260 }
1261 EXPORT_SYMBOL(hci_register_cb);
1262
1263 int hci_unregister_cb(struct hci_cb *cb)
1264 {
1265         BT_DBG("%p name %s", cb, cb->name);
1266
1267         write_lock_bh(&hci_cb_list_lock);
1268         list_del(&cb->list);
1269         write_unlock_bh(&hci_cb_list_lock);
1270
1271         return 0;
1272 }
1273 EXPORT_SYMBOL(hci_unregister_cb);
1274
1275 static int hci_send_frame(struct sk_buff *skb)
1276 {
1277         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1278
1279         if (!hdev) {
1280                 kfree_skb(skb);
1281                 return -ENODEV;
1282         }
1283
1284         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1285
1286         if (atomic_read(&hdev->promisc)) {
1287                 /* Time stamp */
1288                 __net_timestamp(skb);
1289
1290                 hci_send_to_sock(hdev, skb);
1291         }
1292
1293         /* Get rid of skb owner, prior to sending to the driver. */
1294         skb_orphan(skb);
1295
1296         return hdev->send(skb);
1297 }
1298
1299 /* Send HCI command */
1300 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1301 {
1302         int len = HCI_COMMAND_HDR_SIZE + plen;
1303         struct hci_command_hdr *hdr;
1304         struct sk_buff *skb;
1305
1306         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1307
1308         skb = bt_skb_alloc(len, GFP_ATOMIC);
1309         if (!skb) {
1310                 BT_ERR("%s no memory for command", hdev->name);
1311                 return -ENOMEM;
1312         }
1313
1314         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1315         hdr->opcode = cpu_to_le16(opcode);
1316         hdr->plen   = plen;
1317
1318         if (plen)
1319                 memcpy(skb_put(skb, plen), param, plen);
1320
1321         BT_DBG("skb len %d", skb->len);
1322
1323         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1324         skb->dev = (void *) hdev;
1325
1326         skb_queue_tail(&hdev->cmd_q, skb);
1327         tasklet_schedule(&hdev->cmd_task);
1328
1329         return 0;
1330 }
1331
1332 /* Get data from the previously sent command */
1333 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1334 {
1335         struct hci_command_hdr *hdr;
1336
1337         if (!hdev->sent_cmd)
1338                 return NULL;
1339
1340         hdr = (void *) hdev->sent_cmd->data;
1341
1342         if (hdr->opcode != cpu_to_le16(opcode))
1343                 return NULL;
1344
1345         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1346
1347         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1348 }
1349
1350 /* Send ACL data */
1351 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1352 {
1353         struct hci_acl_hdr *hdr;
1354         int len = skb->len;
1355
1356         skb_push(skb, HCI_ACL_HDR_SIZE);
1357         skb_reset_transport_header(skb);
1358         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1359         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1360         hdr->dlen   = cpu_to_le16(len);
1361 }
1362
1363 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1364 {
1365         struct hci_dev *hdev = conn->hdev;
1366         struct sk_buff *list;
1367
1368         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1369
1370         skb->dev = (void *) hdev;
1371         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1372         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1373
1374         if (!(list = skb_shinfo(skb)->frag_list)) {
1375                 /* Non fragmented */
1376                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1377
1378                 skb_queue_tail(&conn->data_q, skb);
1379         } else {
1380                 /* Fragmented */
1381                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1382
1383                 skb_shinfo(skb)->frag_list = NULL;
1384
1385                 /* Queue all fragments atomically */
1386                 spin_lock_bh(&conn->data_q.lock);
1387
1388                 __skb_queue_tail(&conn->data_q, skb);
1389                 do {
1390                         skb = list; list = list->next;
1391
1392                         skb->dev = (void *) hdev;
1393                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1394                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1395
1396                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1397
1398                         __skb_queue_tail(&conn->data_q, skb);
1399                 } while (list);
1400
1401                 spin_unlock_bh(&conn->data_q.lock);
1402         }
1403
1404         tasklet_schedule(&hdev->tx_task);
1405 }
1406 EXPORT_SYMBOL(hci_send_acl);
1407
1408 /* Send SCO data */
1409 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1410 {
1411         struct hci_dev *hdev = conn->hdev;
1412         struct hci_sco_hdr hdr;
1413
1414         BT_DBG("%s len %d", hdev->name, skb->len);
1415
1416         hdr.handle = cpu_to_le16(conn->handle);
1417         hdr.dlen   = skb->len;
1418
1419         skb_push(skb, HCI_SCO_HDR_SIZE);
1420         skb_reset_transport_header(skb);
1421         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1422
1423         skb->dev = (void *) hdev;
1424         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1425
1426         skb_queue_tail(&conn->data_q, skb);
1427         tasklet_schedule(&hdev->tx_task);
1428 }
1429 EXPORT_SYMBOL(hci_send_sco);
1430
1431 /* ---- HCI TX task (outgoing data) ---- */
1432
1433 /* HCI Connection scheduler */
1434 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1435 {
1436         struct hci_conn_hash *h = &hdev->conn_hash;
1437         struct hci_conn *conn = NULL;
1438         int num = 0, min = ~0;
1439         struct list_head *p;
1440
1441         /* We don't have to lock device here. Connections are always
1442          * added and removed with TX task disabled. */
1443         list_for_each(p, &h->list) {
1444                 struct hci_conn *c;
1445                 c = list_entry(p, struct hci_conn, list);
1446
1447                 if (c->type != type || skb_queue_empty(&c->data_q))
1448                         continue;
1449
1450                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1451                         continue;
1452
1453                 num++;
1454
1455                 if (c->sent < min) {
1456                         min  = c->sent;
1457                         conn = c;
1458                 }
1459         }
1460
1461         if (conn) {
1462                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1463                 int q = cnt / num;
1464                 *quote = q ? q : 1;
1465         } else
1466                 *quote = 0;
1467
1468         BT_DBG("conn %p quote %d", conn, *quote);
1469         return conn;
1470 }
1471
1472 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1473 {
1474         struct hci_conn_hash *h = &hdev->conn_hash;
1475         struct list_head *p;
1476         struct hci_conn  *c;
1477
1478         BT_ERR("%s ACL tx timeout", hdev->name);
1479
1480         /* Kill stalled connections */
1481         list_for_each(p, &h->list) {
1482                 c = list_entry(p, struct hci_conn, list);
1483                 if (c->type == ACL_LINK && c->sent) {
1484                         BT_ERR("%s killing stalled ACL connection %s",
1485                                 hdev->name, batostr(&c->dst));
1486                         hci_acl_disconn(c, 0x13);
1487                 }
1488         }
1489 }
1490
1491 static inline void hci_sched_acl(struct hci_dev *hdev)
1492 {
1493         struct hci_conn *conn;
1494         struct sk_buff *skb;
1495         int quote;
1496
1497         BT_DBG("%s", hdev->name);
1498
1499         if (!test_bit(HCI_RAW, &hdev->flags)) {
1500                 /* ACL tx timeout must be longer than maximum
1501                  * link supervision timeout (40.9 seconds) */
1502                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1503                         hci_acl_tx_to(hdev);
1504         }
1505
1506         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1507                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1508                         BT_DBG("skb %p len %d", skb, skb->len);
1509
1510                         hci_conn_enter_active_mode(conn);
1511
1512                         hci_send_frame(skb);
1513                         hdev->acl_last_tx = jiffies;
1514
1515                         hdev->acl_cnt--;
1516                         conn->sent++;
1517                 }
1518         }
1519 }
1520
1521 /* Schedule SCO */
1522 static inline void hci_sched_sco(struct hci_dev *hdev)
1523 {
1524         struct hci_conn *conn;
1525         struct sk_buff *skb;
1526         int quote;
1527
1528         BT_DBG("%s", hdev->name);
1529
1530         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1531                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1532                         BT_DBG("skb %p len %d", skb, skb->len);
1533                         hci_send_frame(skb);
1534
1535                         conn->sent++;
1536                         if (conn->sent == ~0)
1537                                 conn->sent = 0;
1538                 }
1539         }
1540 }
1541
1542 static inline void hci_sched_esco(struct hci_dev *hdev)
1543 {
1544         struct hci_conn *conn;
1545         struct sk_buff *skb;
1546         int quote;
1547
1548         BT_DBG("%s", hdev->name);
1549
1550         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1551                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1552                         BT_DBG("skb %p len %d", skb, skb->len);
1553                         hci_send_frame(skb);
1554
1555                         conn->sent++;
1556                         if (conn->sent == ~0)
1557                                 conn->sent = 0;
1558                 }
1559         }
1560 }
1561
1562 static void hci_tx_task(unsigned long arg)
1563 {
1564         struct hci_dev *hdev = (struct hci_dev *) arg;
1565         struct sk_buff *skb;
1566
1567         read_lock(&hci_task_lock);
1568
1569         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1570
1571         /* Schedule queues and send stuff to HCI driver */
1572
1573         hci_sched_acl(hdev);
1574
1575         hci_sched_sco(hdev);
1576
1577         hci_sched_esco(hdev);
1578
1579         /* Send next queued raw (unknown type) packet */
1580         while ((skb = skb_dequeue(&hdev->raw_q)))
1581                 hci_send_frame(skb);
1582
1583         read_unlock(&hci_task_lock);
1584 }
1585
1586 /* ----- HCI RX task (incoming data proccessing) ----- */
1587
1588 /* ACL data packet */
1589 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1590 {
1591         struct hci_acl_hdr *hdr = (void *) skb->data;
1592         struct hci_conn *conn;
1593         __u16 handle, flags;
1594
1595         skb_pull(skb, HCI_ACL_HDR_SIZE);
1596
1597         handle = __le16_to_cpu(hdr->handle);
1598         flags  = hci_flags(handle);
1599         handle = hci_handle(handle);
1600
1601         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1602
1603         hdev->stat.acl_rx++;
1604
1605         hci_dev_lock(hdev);
1606         conn = hci_conn_hash_lookup_handle(hdev, handle);
1607         hci_dev_unlock(hdev);
1608
1609         if (conn) {
1610                 register struct hci_proto *hp;
1611
1612                 hci_conn_enter_active_mode(conn);
1613
1614                 /* Send to upper protocol */
1615                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1616                         hp->recv_acldata(conn, skb, flags);
1617                         return;
1618                 }
1619         } else {
1620                 BT_ERR("%s ACL packet for unknown connection handle %d",
1621                         hdev->name, handle);
1622         }
1623
1624         kfree_skb(skb);
1625 }
1626
1627 /* SCO data packet */
1628 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1629 {
1630         struct hci_sco_hdr *hdr = (void *) skb->data;
1631         struct hci_conn *conn;
1632         __u16 handle;
1633
1634         skb_pull(skb, HCI_SCO_HDR_SIZE);
1635
1636         handle = __le16_to_cpu(hdr->handle);
1637
1638         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1639
1640         hdev->stat.sco_rx++;
1641
1642         hci_dev_lock(hdev);
1643         conn = hci_conn_hash_lookup_handle(hdev, handle);
1644         hci_dev_unlock(hdev);
1645
1646         if (conn) {
1647                 register struct hci_proto *hp;
1648
1649                 /* Send to upper protocol */
1650                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1651                         hp->recv_scodata(conn, skb);
1652                         return;
1653                 }
1654         } else {
1655                 BT_ERR("%s SCO packet for unknown connection handle %d",
1656                         hdev->name, handle);
1657         }
1658
1659         kfree_skb(skb);
1660 }
1661
1662 static void hci_rx_task(unsigned long arg)
1663 {
1664         struct hci_dev *hdev = (struct hci_dev *) arg;
1665         struct sk_buff *skb;
1666
1667         BT_DBG("%s", hdev->name);
1668
1669         read_lock(&hci_task_lock);
1670
1671         while ((skb = skb_dequeue(&hdev->rx_q))) {
1672                 if (atomic_read(&hdev->promisc)) {
1673                         /* Send copy to the sockets */
1674                         hci_send_to_sock(hdev, skb);
1675                 }
1676
1677                 if (test_bit(HCI_RAW, &hdev->flags)) {
1678                         kfree_skb(skb);
1679                         continue;
1680                 }
1681
1682                 if (test_bit(HCI_INIT, &hdev->flags)) {
1683                         /* Don't process data packets in this states. */
1684                         switch (bt_cb(skb)->pkt_type) {
1685                         case HCI_ACLDATA_PKT:
1686                         case HCI_SCODATA_PKT:
1687                                 kfree_skb(skb);
1688                                 continue;
1689                         }
1690                 }
1691
1692                 /* Process frame */
1693                 switch (bt_cb(skb)->pkt_type) {
1694                 case HCI_EVENT_PKT:
1695                         hci_event_packet(hdev, skb);
1696                         break;
1697
1698                 case HCI_ACLDATA_PKT:
1699                         BT_DBG("%s ACL data packet", hdev->name);
1700                         hci_acldata_packet(hdev, skb);
1701                         break;
1702
1703                 case HCI_SCODATA_PKT:
1704                         BT_DBG("%s SCO data packet", hdev->name);
1705                         hci_scodata_packet(hdev, skb);
1706                         break;
1707
1708                 default:
1709                         kfree_skb(skb);
1710                         break;
1711                 }
1712         }
1713
1714         read_unlock(&hci_task_lock);
1715 }
1716
1717 static void hci_cmd_task(unsigned long arg)
1718 {
1719         struct hci_dev *hdev = (struct hci_dev *) arg;
1720         struct sk_buff *skb;
1721
1722         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1723
1724         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1725                 BT_ERR("%s command tx timeout", hdev->name);
1726                 atomic_set(&hdev->cmd_cnt, 1);
1727         }
1728
1729         /* Send queued commands */
1730         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1731                 kfree_skb(hdev->sent_cmd);
1732
1733                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1734                         atomic_dec(&hdev->cmd_cnt);
1735                         hci_send_frame(skb);
1736                         hdev->cmd_last_tx = jiffies;
1737                 } else {
1738                         skb_queue_head(&hdev->cmd_q, skb);
1739                         tasklet_schedule(&hdev->cmd_task);
1740                 }
1741         }
1742 }