Bluetooth: Really fix registering hci with duplicate name
[platform/adaptation/renesas_rcar/renesas_kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 #define AUTO_OFF_TIMEOUT 2000
37
38 static void hci_rx_work(struct work_struct *work);
39 static void hci_cmd_work(struct work_struct *work);
40 static void hci_tx_work(struct work_struct *work);
41
42 /* HCI device list */
43 LIST_HEAD(hci_dev_list);
44 DEFINE_RWLOCK(hci_dev_list_lock);
45
46 /* HCI callback list */
47 LIST_HEAD(hci_cb_list);
48 DEFINE_RWLOCK(hci_cb_list_lock);
49
50 /* HCI ID Numbering */
51 static DEFINE_IDA(hci_index_ida);
52
53 /* ---- HCI notifications ---- */
54
55 static void hci_notify(struct hci_dev *hdev, int event)
56 {
57         hci_sock_dev_event(hdev, event);
58 }
59
60 /* ---- HCI requests ---- */
61
62 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
63 {
64         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
65
66         /* If this is the init phase check if the completed command matches
67          * the last init command, and if not just return.
68          */
69         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
70                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
71                 u16 opcode = __le16_to_cpu(sent->opcode);
72                 struct sk_buff *skb;
73
74                 /* Some CSR based controllers generate a spontaneous
75                  * reset complete event during init and any pending
76                  * command will never be completed. In such a case we
77                  * need to resend whatever was the last sent
78                  * command.
79                  */
80
81                 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
82                         return;
83
84                 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
85                 if (skb) {
86                         skb_queue_head(&hdev->cmd_q, skb);
87                         queue_work(hdev->workqueue, &hdev->cmd_work);
88                 }
89
90                 return;
91         }
92
93         if (hdev->req_status == HCI_REQ_PEND) {
94                 hdev->req_result = result;
95                 hdev->req_status = HCI_REQ_DONE;
96                 wake_up_interruptible(&hdev->req_wait_q);
97         }
98 }
99
100 static void hci_req_cancel(struct hci_dev *hdev, int err)
101 {
102         BT_DBG("%s err 0x%2.2x", hdev->name, err);
103
104         if (hdev->req_status == HCI_REQ_PEND) {
105                 hdev->req_result = err;
106                 hdev->req_status = HCI_REQ_CANCELED;
107                 wake_up_interruptible(&hdev->req_wait_q);
108         }
109 }
110
111 /* Execute request and wait for completion. */
112 static int __hci_request(struct hci_dev *hdev,
113                          void (*req)(struct hci_dev *hdev, unsigned long opt),
114                          unsigned long opt, __u32 timeout)
115 {
116         DECLARE_WAITQUEUE(wait, current);
117         int err = 0;
118
119         BT_DBG("%s start", hdev->name);
120
121         hdev->req_status = HCI_REQ_PEND;
122
123         add_wait_queue(&hdev->req_wait_q, &wait);
124         set_current_state(TASK_INTERRUPTIBLE);
125
126         req(hdev, opt);
127         schedule_timeout(timeout);
128
129         remove_wait_queue(&hdev->req_wait_q, &wait);
130
131         if (signal_pending(current))
132                 return -EINTR;
133
134         switch (hdev->req_status) {
135         case HCI_REQ_DONE:
136                 err = -bt_to_errno(hdev->req_result);
137                 break;
138
139         case HCI_REQ_CANCELED:
140                 err = -hdev->req_result;
141                 break;
142
143         default:
144                 err = -ETIMEDOUT;
145                 break;
146         }
147
148         hdev->req_status = hdev->req_result = 0;
149
150         BT_DBG("%s end: err %d", hdev->name, err);
151
152         return err;
153 }
154
155 static int hci_request(struct hci_dev *hdev,
156                        void (*req)(struct hci_dev *hdev, unsigned long opt),
157                        unsigned long opt, __u32 timeout)
158 {
159         int ret;
160
161         if (!test_bit(HCI_UP, &hdev->flags))
162                 return -ENETDOWN;
163
164         /* Serialize all requests */
165         hci_req_lock(hdev);
166         ret = __hci_request(hdev, req, opt, timeout);
167         hci_req_unlock(hdev);
168
169         return ret;
170 }
171
172 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
173 {
174         BT_DBG("%s %ld", hdev->name, opt);
175
176         /* Reset device */
177         set_bit(HCI_RESET, &hdev->flags);
178         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
179 }
180
181 static void bredr_init(struct hci_dev *hdev)
182 {
183         struct hci_cp_delete_stored_link_key cp;
184         __le16 param;
185         __u8 flt_type;
186
187         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
188
189         /* Mandatory initialization */
190
191         /* Reset */
192         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
193                 set_bit(HCI_RESET, &hdev->flags);
194                 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
195         }
196
197         /* Read Local Supported Features */
198         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
199
200         /* Read Local Version */
201         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
202
203         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
204         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
205
206         /* Read BD Address */
207         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
208
209         /* Read Class of Device */
210         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
211
212         /* Read Local Name */
213         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
214
215         /* Read Voice Setting */
216         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
217
218         /* Optional initialization */
219
220         /* Clear Event Filters */
221         flt_type = HCI_FLT_CLEAR_ALL;
222         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
223
224         /* Connection accept timeout ~20 secs */
225         param = __constant_cpu_to_le16(0x7d00);
226         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
227
228         bacpy(&cp.bdaddr, BDADDR_ANY);
229         cp.delete_all = 1;
230         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
231 }
232
233 static void amp_init(struct hci_dev *hdev)
234 {
235         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
236
237         /* Reset */
238         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
239
240         /* Read Local Version */
241         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
242
243         /* Read Local AMP Info */
244         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
245 }
246
247 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
248 {
249         struct sk_buff *skb;
250
251         BT_DBG("%s %ld", hdev->name, opt);
252
253         /* Driver initialization */
254
255         /* Special commands */
256         while ((skb = skb_dequeue(&hdev->driver_init))) {
257                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
258                 skb->dev = (void *) hdev;
259
260                 skb_queue_tail(&hdev->cmd_q, skb);
261                 queue_work(hdev->workqueue, &hdev->cmd_work);
262         }
263         skb_queue_purge(&hdev->driver_init);
264
265         switch (hdev->dev_type) {
266         case HCI_BREDR:
267                 bredr_init(hdev);
268                 break;
269
270         case HCI_AMP:
271                 amp_init(hdev);
272                 break;
273
274         default:
275                 BT_ERR("Unknown device type %d", hdev->dev_type);
276                 break;
277         }
278
279 }
280
281 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
282 {
283         BT_DBG("%s", hdev->name);
284
285         /* Read LE buffer size */
286         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
287 }
288
289 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
290 {
291         __u8 scan = opt;
292
293         BT_DBG("%s %x", hdev->name, scan);
294
295         /* Inquiry and Page scans */
296         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
297 }
298
299 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
300 {
301         __u8 auth = opt;
302
303         BT_DBG("%s %x", hdev->name, auth);
304
305         /* Authentication */
306         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
307 }
308
309 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
310 {
311         __u8 encrypt = opt;
312
313         BT_DBG("%s %x", hdev->name, encrypt);
314
315         /* Encryption */
316         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
317 }
318
319 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
320 {
321         __le16 policy = cpu_to_le16(opt);
322
323         BT_DBG("%s %x", hdev->name, policy);
324
325         /* Default link policy */
326         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
327 }
328
329 /* Get HCI device by index.
330  * Device is held on return. */
331 struct hci_dev *hci_dev_get(int index)
332 {
333         struct hci_dev *hdev = NULL, *d;
334
335         BT_DBG("%d", index);
336
337         if (index < 0)
338                 return NULL;
339
340         read_lock(&hci_dev_list_lock);
341         list_for_each_entry(d, &hci_dev_list, list) {
342                 if (d->id == index) {
343                         hdev = hci_dev_hold(d);
344                         break;
345                 }
346         }
347         read_unlock(&hci_dev_list_lock);
348         return hdev;
349 }
350
351 /* ---- Inquiry support ---- */
352
353 bool hci_discovery_active(struct hci_dev *hdev)
354 {
355         struct discovery_state *discov = &hdev->discovery;
356
357         switch (discov->state) {
358         case DISCOVERY_FINDING:
359         case DISCOVERY_RESOLVING:
360                 return true;
361
362         default:
363                 return false;
364         }
365 }
366
367 void hci_discovery_set_state(struct hci_dev *hdev, int state)
368 {
369         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
370
371         if (hdev->discovery.state == state)
372                 return;
373
374         switch (state) {
375         case DISCOVERY_STOPPED:
376                 if (hdev->discovery.state != DISCOVERY_STARTING)
377                         mgmt_discovering(hdev, 0);
378                 break;
379         case DISCOVERY_STARTING:
380                 break;
381         case DISCOVERY_FINDING:
382                 mgmt_discovering(hdev, 1);
383                 break;
384         case DISCOVERY_RESOLVING:
385                 break;
386         case DISCOVERY_STOPPING:
387                 break;
388         }
389
390         hdev->discovery.state = state;
391 }
392
393 static void inquiry_cache_flush(struct hci_dev *hdev)
394 {
395         struct discovery_state *cache = &hdev->discovery;
396         struct inquiry_entry *p, *n;
397
398         list_for_each_entry_safe(p, n, &cache->all, all) {
399                 list_del(&p->all);
400                 kfree(p);
401         }
402
403         INIT_LIST_HEAD(&cache->unknown);
404         INIT_LIST_HEAD(&cache->resolve);
405 }
406
407 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
408                                                bdaddr_t *bdaddr)
409 {
410         struct discovery_state *cache = &hdev->discovery;
411         struct inquiry_entry *e;
412
413         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
414
415         list_for_each_entry(e, &cache->all, all) {
416                 if (!bacmp(&e->data.bdaddr, bdaddr))
417                         return e;
418         }
419
420         return NULL;
421 }
422
423 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
424                                                        bdaddr_t *bdaddr)
425 {
426         struct discovery_state *cache = &hdev->discovery;
427         struct inquiry_entry *e;
428
429         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
430
431         list_for_each_entry(e, &cache->unknown, list) {
432                 if (!bacmp(&e->data.bdaddr, bdaddr))
433                         return e;
434         }
435
436         return NULL;
437 }
438
439 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
440                                                        bdaddr_t *bdaddr,
441                                                        int state)
442 {
443         struct discovery_state *cache = &hdev->discovery;
444         struct inquiry_entry *e;
445
446         BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
447
448         list_for_each_entry(e, &cache->resolve, list) {
449                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
450                         return e;
451                 if (!bacmp(&e->data.bdaddr, bdaddr))
452                         return e;
453         }
454
455         return NULL;
456 }
457
458 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
459                                       struct inquiry_entry *ie)
460 {
461         struct discovery_state *cache = &hdev->discovery;
462         struct list_head *pos = &cache->resolve;
463         struct inquiry_entry *p;
464
465         list_del(&ie->list);
466
467         list_for_each_entry(p, &cache->resolve, list) {
468                 if (p->name_state != NAME_PENDING &&
469                     abs(p->data.rssi) >= abs(ie->data.rssi))
470                         break;
471                 pos = &p->list;
472         }
473
474         list_add(&ie->list, pos);
475 }
476
477 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
478                               bool name_known, bool *ssp)
479 {
480         struct discovery_state *cache = &hdev->discovery;
481         struct inquiry_entry *ie;
482
483         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
484
485         if (ssp)
486                 *ssp = data->ssp_mode;
487
488         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
489         if (ie) {
490                 if (ie->data.ssp_mode && ssp)
491                         *ssp = true;
492
493                 if (ie->name_state == NAME_NEEDED &&
494                     data->rssi != ie->data.rssi) {
495                         ie->data.rssi = data->rssi;
496                         hci_inquiry_cache_update_resolve(hdev, ie);
497                 }
498
499                 goto update;
500         }
501
502         /* Entry not in the cache. Add new one. */
503         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
504         if (!ie)
505                 return false;
506
507         list_add(&ie->all, &cache->all);
508
509         if (name_known) {
510                 ie->name_state = NAME_KNOWN;
511         } else {
512                 ie->name_state = NAME_NOT_KNOWN;
513                 list_add(&ie->list, &cache->unknown);
514         }
515
516 update:
517         if (name_known && ie->name_state != NAME_KNOWN &&
518             ie->name_state != NAME_PENDING) {
519                 ie->name_state = NAME_KNOWN;
520                 list_del(&ie->list);
521         }
522
523         memcpy(&ie->data, data, sizeof(*data));
524         ie->timestamp = jiffies;
525         cache->timestamp = jiffies;
526
527         if (ie->name_state == NAME_NOT_KNOWN)
528                 return false;
529
530         return true;
531 }
532
533 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
534 {
535         struct discovery_state *cache = &hdev->discovery;
536         struct inquiry_info *info = (struct inquiry_info *) buf;
537         struct inquiry_entry *e;
538         int copied = 0;
539
540         list_for_each_entry(e, &cache->all, all) {
541                 struct inquiry_data *data = &e->data;
542
543                 if (copied >= num)
544                         break;
545
546                 bacpy(&info->bdaddr, &data->bdaddr);
547                 info->pscan_rep_mode    = data->pscan_rep_mode;
548                 info->pscan_period_mode = data->pscan_period_mode;
549                 info->pscan_mode        = data->pscan_mode;
550                 memcpy(info->dev_class, data->dev_class, 3);
551                 info->clock_offset      = data->clock_offset;
552
553                 info++;
554                 copied++;
555         }
556
557         BT_DBG("cache %p, copied %d", cache, copied);
558         return copied;
559 }
560
561 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
562 {
563         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
564         struct hci_cp_inquiry cp;
565
566         BT_DBG("%s", hdev->name);
567
568         if (test_bit(HCI_INQUIRY, &hdev->flags))
569                 return;
570
571         /* Start Inquiry */
572         memcpy(&cp.lap, &ir->lap, 3);
573         cp.length  = ir->length;
574         cp.num_rsp = ir->num_rsp;
575         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
576 }
577
578 int hci_inquiry(void __user *arg)
579 {
580         __u8 __user *ptr = arg;
581         struct hci_inquiry_req ir;
582         struct hci_dev *hdev;
583         int err = 0, do_inquiry = 0, max_rsp;
584         long timeo;
585         __u8 *buf;
586
587         if (copy_from_user(&ir, ptr, sizeof(ir)))
588                 return -EFAULT;
589
590         hdev = hci_dev_get(ir.dev_id);
591         if (!hdev)
592                 return -ENODEV;
593
594         hci_dev_lock(hdev);
595         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
596             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
597                 inquiry_cache_flush(hdev);
598                 do_inquiry = 1;
599         }
600         hci_dev_unlock(hdev);
601
602         timeo = ir.length * msecs_to_jiffies(2000);
603
604         if (do_inquiry) {
605                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
606                 if (err < 0)
607                         goto done;
608         }
609
610         /* for unlimited number of responses we will use buffer with
611          * 255 entries
612          */
613         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
614
615         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616          * copy it to the user space.
617          */
618         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
619         if (!buf) {
620                 err = -ENOMEM;
621                 goto done;
622         }
623
624         hci_dev_lock(hdev);
625         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
626         hci_dev_unlock(hdev);
627
628         BT_DBG("num_rsp %d", ir.num_rsp);
629
630         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
631                 ptr += sizeof(ir);
632                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
633                                  ir.num_rsp))
634                         err = -EFAULT;
635         } else
636                 err = -EFAULT;
637
638         kfree(buf);
639
640 done:
641         hci_dev_put(hdev);
642         return err;
643 }
644
645 /* ---- HCI ioctl helpers ---- */
646
647 int hci_dev_open(__u16 dev)
648 {
649         struct hci_dev *hdev;
650         int ret = 0;
651
652         hdev = hci_dev_get(dev);
653         if (!hdev)
654                 return -ENODEV;
655
656         BT_DBG("%s %p", hdev->name, hdev);
657
658         hci_req_lock(hdev);
659
660         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
661                 ret = -ENODEV;
662                 goto done;
663         }
664
665         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
666                 ret = -ERFKILL;
667                 goto done;
668         }
669
670         if (test_bit(HCI_UP, &hdev->flags)) {
671                 ret = -EALREADY;
672                 goto done;
673         }
674
675         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
676                 set_bit(HCI_RAW, &hdev->flags);
677
678         /* Treat all non BR/EDR controllers as raw devices if
679            enable_hs is not set */
680         if (hdev->dev_type != HCI_BREDR && !enable_hs)
681                 set_bit(HCI_RAW, &hdev->flags);
682
683         if (hdev->open(hdev)) {
684                 ret = -EIO;
685                 goto done;
686         }
687
688         if (!test_bit(HCI_RAW, &hdev->flags)) {
689                 atomic_set(&hdev->cmd_cnt, 1);
690                 set_bit(HCI_INIT, &hdev->flags);
691                 hdev->init_last_cmd = 0;
692
693                 ret = __hci_request(hdev, hci_init_req, 0,
694                                     msecs_to_jiffies(HCI_INIT_TIMEOUT));
695
696                 if (lmp_host_le_capable(hdev))
697                         ret = __hci_request(hdev, hci_le_init_req, 0,
698                                             msecs_to_jiffies(HCI_INIT_TIMEOUT));
699
700                 clear_bit(HCI_INIT, &hdev->flags);
701         }
702
703         if (!ret) {
704                 hci_dev_hold(hdev);
705                 set_bit(HCI_UP, &hdev->flags);
706                 hci_notify(hdev, HCI_DEV_UP);
707                 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
708                         hci_dev_lock(hdev);
709                         mgmt_powered(hdev, 1);
710                         hci_dev_unlock(hdev);
711                 }
712         } else {
713                 /* Init failed, cleanup */
714                 flush_work(&hdev->tx_work);
715                 flush_work(&hdev->cmd_work);
716                 flush_work(&hdev->rx_work);
717
718                 skb_queue_purge(&hdev->cmd_q);
719                 skb_queue_purge(&hdev->rx_q);
720
721                 if (hdev->flush)
722                         hdev->flush(hdev);
723
724                 if (hdev->sent_cmd) {
725                         kfree_skb(hdev->sent_cmd);
726                         hdev->sent_cmd = NULL;
727                 }
728
729                 hdev->close(hdev);
730                 hdev->flags = 0;
731         }
732
733 done:
734         hci_req_unlock(hdev);
735         hci_dev_put(hdev);
736         return ret;
737 }
738
739 static int hci_dev_do_close(struct hci_dev *hdev)
740 {
741         BT_DBG("%s %p", hdev->name, hdev);
742
743         cancel_work_sync(&hdev->le_scan);
744
745         hci_req_cancel(hdev, ENODEV);
746         hci_req_lock(hdev);
747
748         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
749                 del_timer_sync(&hdev->cmd_timer);
750                 hci_req_unlock(hdev);
751                 return 0;
752         }
753
754         /* Flush RX and TX works */
755         flush_work(&hdev->tx_work);
756         flush_work(&hdev->rx_work);
757
758         if (hdev->discov_timeout > 0) {
759                 cancel_delayed_work(&hdev->discov_off);
760                 hdev->discov_timeout = 0;
761                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
762         }
763
764         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
765                 cancel_delayed_work(&hdev->service_cache);
766
767         cancel_delayed_work_sync(&hdev->le_scan_disable);
768
769         hci_dev_lock(hdev);
770         inquiry_cache_flush(hdev);
771         hci_conn_hash_flush(hdev);
772         hci_dev_unlock(hdev);
773
774         hci_notify(hdev, HCI_DEV_DOWN);
775
776         if (hdev->flush)
777                 hdev->flush(hdev);
778
779         /* Reset device */
780         skb_queue_purge(&hdev->cmd_q);
781         atomic_set(&hdev->cmd_cnt, 1);
782         if (!test_bit(HCI_RAW, &hdev->flags) &&
783             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
784                 set_bit(HCI_INIT, &hdev->flags);
785                 __hci_request(hdev, hci_reset_req, 0,
786                               msecs_to_jiffies(250));
787                 clear_bit(HCI_INIT, &hdev->flags);
788         }
789
790         /* flush cmd  work */
791         flush_work(&hdev->cmd_work);
792
793         /* Drop queues */
794         skb_queue_purge(&hdev->rx_q);
795         skb_queue_purge(&hdev->cmd_q);
796         skb_queue_purge(&hdev->raw_q);
797
798         /* Drop last sent command */
799         if (hdev->sent_cmd) {
800                 del_timer_sync(&hdev->cmd_timer);
801                 kfree_skb(hdev->sent_cmd);
802                 hdev->sent_cmd = NULL;
803         }
804
805         /* After this point our queues are empty
806          * and no tasks are scheduled. */
807         hdev->close(hdev);
808
809         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
810                 hci_dev_lock(hdev);
811                 mgmt_powered(hdev, 0);
812                 hci_dev_unlock(hdev);
813         }
814
815         /* Clear flags */
816         hdev->flags = 0;
817
818         memset(hdev->eir, 0, sizeof(hdev->eir));
819         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
820
821         hci_req_unlock(hdev);
822
823         hci_dev_put(hdev);
824         return 0;
825 }
826
827 int hci_dev_close(__u16 dev)
828 {
829         struct hci_dev *hdev;
830         int err;
831
832         hdev = hci_dev_get(dev);
833         if (!hdev)
834                 return -ENODEV;
835
836         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
837                 cancel_delayed_work(&hdev->power_off);
838
839         err = hci_dev_do_close(hdev);
840
841         hci_dev_put(hdev);
842         return err;
843 }
844
845 int hci_dev_reset(__u16 dev)
846 {
847         struct hci_dev *hdev;
848         int ret = 0;
849
850         hdev = hci_dev_get(dev);
851         if (!hdev)
852                 return -ENODEV;
853
854         hci_req_lock(hdev);
855
856         if (!test_bit(HCI_UP, &hdev->flags))
857                 goto done;
858
859         /* Drop queues */
860         skb_queue_purge(&hdev->rx_q);
861         skb_queue_purge(&hdev->cmd_q);
862
863         hci_dev_lock(hdev);
864         inquiry_cache_flush(hdev);
865         hci_conn_hash_flush(hdev);
866         hci_dev_unlock(hdev);
867
868         if (hdev->flush)
869                 hdev->flush(hdev);
870
871         atomic_set(&hdev->cmd_cnt, 1);
872         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
873
874         if (!test_bit(HCI_RAW, &hdev->flags))
875                 ret = __hci_request(hdev, hci_reset_req, 0,
876                                     msecs_to_jiffies(HCI_INIT_TIMEOUT));
877
878 done:
879         hci_req_unlock(hdev);
880         hci_dev_put(hdev);
881         return ret;
882 }
883
884 int hci_dev_reset_stat(__u16 dev)
885 {
886         struct hci_dev *hdev;
887         int ret = 0;
888
889         hdev = hci_dev_get(dev);
890         if (!hdev)
891                 return -ENODEV;
892
893         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
894
895         hci_dev_put(hdev);
896
897         return ret;
898 }
899
900 int hci_dev_cmd(unsigned int cmd, void __user *arg)
901 {
902         struct hci_dev *hdev;
903         struct hci_dev_req dr;
904         int err = 0;
905
906         if (copy_from_user(&dr, arg, sizeof(dr)))
907                 return -EFAULT;
908
909         hdev = hci_dev_get(dr.dev_id);
910         if (!hdev)
911                 return -ENODEV;
912
913         switch (cmd) {
914         case HCISETAUTH:
915                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
916                                   msecs_to_jiffies(HCI_INIT_TIMEOUT));
917                 break;
918
919         case HCISETENCRYPT:
920                 if (!lmp_encrypt_capable(hdev)) {
921                         err = -EOPNOTSUPP;
922                         break;
923                 }
924
925                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
926                         /* Auth must be enabled first */
927                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
928                                           msecs_to_jiffies(HCI_INIT_TIMEOUT));
929                         if (err)
930                                 break;
931                 }
932
933                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
934                                   msecs_to_jiffies(HCI_INIT_TIMEOUT));
935                 break;
936
937         case HCISETSCAN:
938                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
939                                   msecs_to_jiffies(HCI_INIT_TIMEOUT));
940                 break;
941
942         case HCISETLINKPOL:
943                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
944                                   msecs_to_jiffies(HCI_INIT_TIMEOUT));
945                 break;
946
947         case HCISETLINKMODE:
948                 hdev->link_mode = ((__u16) dr.dev_opt) &
949                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
950                 break;
951
952         case HCISETPTYPE:
953                 hdev->pkt_type = (__u16) dr.dev_opt;
954                 break;
955
956         case HCISETACLMTU:
957                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
958                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
959                 break;
960
961         case HCISETSCOMTU:
962                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
963                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
964                 break;
965
966         default:
967                 err = -EINVAL;
968                 break;
969         }
970
971         hci_dev_put(hdev);
972         return err;
973 }
974
975 int hci_get_dev_list(void __user *arg)
976 {
977         struct hci_dev *hdev;
978         struct hci_dev_list_req *dl;
979         struct hci_dev_req *dr;
980         int n = 0, size, err;
981         __u16 dev_num;
982
983         if (get_user(dev_num, (__u16 __user *) arg))
984                 return -EFAULT;
985
986         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
987                 return -EINVAL;
988
989         size = sizeof(*dl) + dev_num * sizeof(*dr);
990
991         dl = kzalloc(size, GFP_KERNEL);
992         if (!dl)
993                 return -ENOMEM;
994
995         dr = dl->dev_req;
996
997         read_lock(&hci_dev_list_lock);
998         list_for_each_entry(hdev, &hci_dev_list, list) {
999                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1000                         cancel_delayed_work(&hdev->power_off);
1001
1002                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1003                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1004
1005                 (dr + n)->dev_id  = hdev->id;
1006                 (dr + n)->dev_opt = hdev->flags;
1007
1008                 if (++n >= dev_num)
1009                         break;
1010         }
1011         read_unlock(&hci_dev_list_lock);
1012
1013         dl->dev_num = n;
1014         size = sizeof(*dl) + n * sizeof(*dr);
1015
1016         err = copy_to_user(arg, dl, size);
1017         kfree(dl);
1018
1019         return err ? -EFAULT : 0;
1020 }
1021
1022 int hci_get_dev_info(void __user *arg)
1023 {
1024         struct hci_dev *hdev;
1025         struct hci_dev_info di;
1026         int err = 0;
1027
1028         if (copy_from_user(&di, arg, sizeof(di)))
1029                 return -EFAULT;
1030
1031         hdev = hci_dev_get(di.dev_id);
1032         if (!hdev)
1033                 return -ENODEV;
1034
1035         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1036                 cancel_delayed_work_sync(&hdev->power_off);
1037
1038         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1039                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1040
1041         strcpy(di.name, hdev->name);
1042         di.bdaddr   = hdev->bdaddr;
1043         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1044         di.flags    = hdev->flags;
1045         di.pkt_type = hdev->pkt_type;
1046         di.acl_mtu  = hdev->acl_mtu;
1047         di.acl_pkts = hdev->acl_pkts;
1048         di.sco_mtu  = hdev->sco_mtu;
1049         di.sco_pkts = hdev->sco_pkts;
1050         di.link_policy = hdev->link_policy;
1051         di.link_mode   = hdev->link_mode;
1052
1053         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1054         memcpy(&di.features, &hdev->features, sizeof(di.features));
1055
1056         if (copy_to_user(arg, &di, sizeof(di)))
1057                 err = -EFAULT;
1058
1059         hci_dev_put(hdev);
1060
1061         return err;
1062 }
1063
1064 /* ---- Interface to HCI drivers ---- */
1065
1066 static int hci_rfkill_set_block(void *data, bool blocked)
1067 {
1068         struct hci_dev *hdev = data;
1069
1070         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1071
1072         if (!blocked)
1073                 return 0;
1074
1075         hci_dev_do_close(hdev);
1076
1077         return 0;
1078 }
1079
1080 static const struct rfkill_ops hci_rfkill_ops = {
1081         .set_block = hci_rfkill_set_block,
1082 };
1083
1084 static void hci_power_on(struct work_struct *work)
1085 {
1086         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1087
1088         BT_DBG("%s", hdev->name);
1089
1090         if (hci_dev_open(hdev->id) < 0)
1091                 return;
1092
1093         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1094                 schedule_delayed_work(&hdev->power_off,
1095                                       msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1096
1097         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1098                 mgmt_index_added(hdev);
1099 }
1100
1101 static void hci_power_off(struct work_struct *work)
1102 {
1103         struct hci_dev *hdev = container_of(work, struct hci_dev,
1104                                             power_off.work);
1105
1106         BT_DBG("%s", hdev->name);
1107
1108         hci_dev_do_close(hdev);
1109 }
1110
1111 static void hci_discov_off(struct work_struct *work)
1112 {
1113         struct hci_dev *hdev;
1114         u8 scan = SCAN_PAGE;
1115
1116         hdev = container_of(work, struct hci_dev, discov_off.work);
1117
1118         BT_DBG("%s", hdev->name);
1119
1120         hci_dev_lock(hdev);
1121
1122         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1123
1124         hdev->discov_timeout = 0;
1125
1126         hci_dev_unlock(hdev);
1127 }
1128
1129 int hci_uuids_clear(struct hci_dev *hdev)
1130 {
1131         struct list_head *p, *n;
1132
1133         list_for_each_safe(p, n, &hdev->uuids) {
1134                 struct bt_uuid *uuid;
1135
1136                 uuid = list_entry(p, struct bt_uuid, list);
1137
1138                 list_del(p);
1139                 kfree(uuid);
1140         }
1141
1142         return 0;
1143 }
1144
1145 int hci_link_keys_clear(struct hci_dev *hdev)
1146 {
1147         struct list_head *p, *n;
1148
1149         list_for_each_safe(p, n, &hdev->link_keys) {
1150                 struct link_key *key;
1151
1152                 key = list_entry(p, struct link_key, list);
1153
1154                 list_del(p);
1155                 kfree(key);
1156         }
1157
1158         return 0;
1159 }
1160
1161 int hci_smp_ltks_clear(struct hci_dev *hdev)
1162 {
1163         struct smp_ltk *k, *tmp;
1164
1165         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1166                 list_del(&k->list);
1167                 kfree(k);
1168         }
1169
1170         return 0;
1171 }
1172
1173 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1174 {
1175         struct link_key *k;
1176
1177         list_for_each_entry(k, &hdev->link_keys, list)
1178                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1179                         return k;
1180
1181         return NULL;
1182 }
1183
1184 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1185                                u8 key_type, u8 old_key_type)
1186 {
1187         /* Legacy key */
1188         if (key_type < 0x03)
1189                 return true;
1190
1191         /* Debug keys are insecure so don't store them persistently */
1192         if (key_type == HCI_LK_DEBUG_COMBINATION)
1193                 return false;
1194
1195         /* Changed combination key and there's no previous one */
1196         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1197                 return false;
1198
1199         /* Security mode 3 case */
1200         if (!conn)
1201                 return true;
1202
1203         /* Neither local nor remote side had no-bonding as requirement */
1204         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1205                 return true;
1206
1207         /* Local side had dedicated bonding as requirement */
1208         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1209                 return true;
1210
1211         /* Remote side had dedicated bonding as requirement */
1212         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1213                 return true;
1214
1215         /* If none of the above criteria match, then don't store the key
1216          * persistently */
1217         return false;
1218 }
1219
1220 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1221 {
1222         struct smp_ltk *k;
1223
1224         list_for_each_entry(k, &hdev->long_term_keys, list) {
1225                 if (k->ediv != ediv ||
1226                     memcmp(rand, k->rand, sizeof(k->rand)))
1227                         continue;
1228
1229                 return k;
1230         }
1231
1232         return NULL;
1233 }
1234
1235 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1236                                      u8 addr_type)
1237 {
1238         struct smp_ltk *k;
1239
1240         list_for_each_entry(k, &hdev->long_term_keys, list)
1241                 if (addr_type == k->bdaddr_type &&
1242                     bacmp(bdaddr, &k->bdaddr) == 0)
1243                         return k;
1244
1245         return NULL;
1246 }
1247
1248 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1249                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1250 {
1251         struct link_key *key, *old_key;
1252         u8 old_key_type;
1253         bool persistent;
1254
1255         old_key = hci_find_link_key(hdev, bdaddr);
1256         if (old_key) {
1257                 old_key_type = old_key->type;
1258                 key = old_key;
1259         } else {
1260                 old_key_type = conn ? conn->key_type : 0xff;
1261                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1262                 if (!key)
1263                         return -ENOMEM;
1264                 list_add(&key->list, &hdev->link_keys);
1265         }
1266
1267         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1268
1269         /* Some buggy controller combinations generate a changed
1270          * combination key for legacy pairing even when there's no
1271          * previous key */
1272         if (type == HCI_LK_CHANGED_COMBINATION &&
1273             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1274                 type = HCI_LK_COMBINATION;
1275                 if (conn)
1276                         conn->key_type = type;
1277         }
1278
1279         bacpy(&key->bdaddr, bdaddr);
1280         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1281         key->pin_len = pin_len;
1282
1283         if (type == HCI_LK_CHANGED_COMBINATION)
1284                 key->type = old_key_type;
1285         else
1286                 key->type = type;
1287
1288         if (!new_key)
1289                 return 0;
1290
1291         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1292
1293         mgmt_new_link_key(hdev, key, persistent);
1294
1295         if (conn)
1296                 conn->flush_key = !persistent;
1297
1298         return 0;
1299 }
1300
1301 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1302                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1303                 ediv, u8 rand[8])
1304 {
1305         struct smp_ltk *key, *old_key;
1306
1307         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1308                 return 0;
1309
1310         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1311         if (old_key)
1312                 key = old_key;
1313         else {
1314                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1315                 if (!key)
1316                         return -ENOMEM;
1317                 list_add(&key->list, &hdev->long_term_keys);
1318         }
1319
1320         bacpy(&key->bdaddr, bdaddr);
1321         key->bdaddr_type = addr_type;
1322         memcpy(key->val, tk, sizeof(key->val));
1323         key->authenticated = authenticated;
1324         key->ediv = ediv;
1325         key->enc_size = enc_size;
1326         key->type = type;
1327         memcpy(key->rand, rand, sizeof(key->rand));
1328
1329         if (!new_key)
1330                 return 0;
1331
1332         if (type & HCI_SMP_LTK)
1333                 mgmt_new_ltk(hdev, key, 1);
1334
1335         return 0;
1336 }
1337
1338 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1339 {
1340         struct link_key *key;
1341
1342         key = hci_find_link_key(hdev, bdaddr);
1343         if (!key)
1344                 return -ENOENT;
1345
1346         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1347
1348         list_del(&key->list);
1349         kfree(key);
1350
1351         return 0;
1352 }
1353
1354 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1355 {
1356         struct smp_ltk *k, *tmp;
1357
1358         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1359                 if (bacmp(bdaddr, &k->bdaddr))
1360                         continue;
1361
1362                 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1363
1364                 list_del(&k->list);
1365                 kfree(k);
1366         }
1367
1368         return 0;
1369 }
1370
1371 /* HCI command timer function */
1372 static void hci_cmd_timer(unsigned long arg)
1373 {
1374         struct hci_dev *hdev = (void *) arg;
1375
1376         BT_ERR("%s command tx timeout", hdev->name);
1377         atomic_set(&hdev->cmd_cnt, 1);
1378         queue_work(hdev->workqueue, &hdev->cmd_work);
1379 }
1380
1381 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1382                                           bdaddr_t *bdaddr)
1383 {
1384         struct oob_data *data;
1385
1386         list_for_each_entry(data, &hdev->remote_oob_data, list)
1387                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1388                         return data;
1389
1390         return NULL;
1391 }
1392
1393 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1394 {
1395         struct oob_data *data;
1396
1397         data = hci_find_remote_oob_data(hdev, bdaddr);
1398         if (!data)
1399                 return -ENOENT;
1400
1401         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1402
1403         list_del(&data->list);
1404         kfree(data);
1405
1406         return 0;
1407 }
1408
1409 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1410 {
1411         struct oob_data *data, *n;
1412
1413         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1414                 list_del(&data->list);
1415                 kfree(data);
1416         }
1417
1418         return 0;
1419 }
1420
1421 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1422                             u8 *randomizer)
1423 {
1424         struct oob_data *data;
1425
1426         data = hci_find_remote_oob_data(hdev, bdaddr);
1427
1428         if (!data) {
1429                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1430                 if (!data)
1431                         return -ENOMEM;
1432
1433                 bacpy(&data->bdaddr, bdaddr);
1434                 list_add(&data->list, &hdev->remote_oob_data);
1435         }
1436
1437         memcpy(data->hash, hash, sizeof(data->hash));
1438         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1439
1440         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1441
1442         return 0;
1443 }
1444
1445 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1446 {
1447         struct bdaddr_list *b;
1448
1449         list_for_each_entry(b, &hdev->blacklist, list)
1450                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1451                         return b;
1452
1453         return NULL;
1454 }
1455
1456 int hci_blacklist_clear(struct hci_dev *hdev)
1457 {
1458         struct list_head *p, *n;
1459
1460         list_for_each_safe(p, n, &hdev->blacklist) {
1461                 struct bdaddr_list *b;
1462
1463                 b = list_entry(p, struct bdaddr_list, list);
1464
1465                 list_del(p);
1466                 kfree(b);
1467         }
1468
1469         return 0;
1470 }
1471
1472 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1473 {
1474         struct bdaddr_list *entry;
1475
1476         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1477                 return -EBADF;
1478
1479         if (hci_blacklist_lookup(hdev, bdaddr))
1480                 return -EEXIST;
1481
1482         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1483         if (!entry)
1484                 return -ENOMEM;
1485
1486         bacpy(&entry->bdaddr, bdaddr);
1487
1488         list_add(&entry->list, &hdev->blacklist);
1489
1490         return mgmt_device_blocked(hdev, bdaddr, type);
1491 }
1492
1493 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1494 {
1495         struct bdaddr_list *entry;
1496
1497         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1498                 return hci_blacklist_clear(hdev);
1499
1500         entry = hci_blacklist_lookup(hdev, bdaddr);
1501         if (!entry)
1502                 return -ENOENT;
1503
1504         list_del(&entry->list);
1505         kfree(entry);
1506
1507         return mgmt_device_unblocked(hdev, bdaddr, type);
1508 }
1509
1510 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1511 {
1512         struct le_scan_params *param =  (struct le_scan_params *) opt;
1513         struct hci_cp_le_set_scan_param cp;
1514
1515         memset(&cp, 0, sizeof(cp));
1516         cp.type = param->type;
1517         cp.interval = cpu_to_le16(param->interval);
1518         cp.window = cpu_to_le16(param->window);
1519
1520         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1521 }
1522
1523 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1524 {
1525         struct hci_cp_le_set_scan_enable cp;
1526
1527         memset(&cp, 0, sizeof(cp));
1528         cp.enable = 1;
1529
1530         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1531 }
1532
1533 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1534                           u16 window, int timeout)
1535 {
1536         long timeo = msecs_to_jiffies(3000);
1537         struct le_scan_params param;
1538         int err;
1539
1540         BT_DBG("%s", hdev->name);
1541
1542         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1543                 return -EINPROGRESS;
1544
1545         param.type = type;
1546         param.interval = interval;
1547         param.window = window;
1548
1549         hci_req_lock(hdev);
1550
1551         err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1552                             timeo);
1553         if (!err)
1554                 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1555
1556         hci_req_unlock(hdev);
1557
1558         if (err < 0)
1559                 return err;
1560
1561         schedule_delayed_work(&hdev->le_scan_disable,
1562                               msecs_to_jiffies(timeout));
1563
1564         return 0;
1565 }
1566
1567 int hci_cancel_le_scan(struct hci_dev *hdev)
1568 {
1569         BT_DBG("%s", hdev->name);
1570
1571         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1572                 return -EALREADY;
1573
1574         if (cancel_delayed_work(&hdev->le_scan_disable)) {
1575                 struct hci_cp_le_set_scan_enable cp;
1576
1577                 /* Send HCI command to disable LE Scan */
1578                 memset(&cp, 0, sizeof(cp));
1579                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1580         }
1581
1582         return 0;
1583 }
1584
1585 static void le_scan_disable_work(struct work_struct *work)
1586 {
1587         struct hci_dev *hdev = container_of(work, struct hci_dev,
1588                                             le_scan_disable.work);
1589         struct hci_cp_le_set_scan_enable cp;
1590
1591         BT_DBG("%s", hdev->name);
1592
1593         memset(&cp, 0, sizeof(cp));
1594
1595         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1596 }
1597
1598 static void le_scan_work(struct work_struct *work)
1599 {
1600         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1601         struct le_scan_params *param = &hdev->le_scan_params;
1602
1603         BT_DBG("%s", hdev->name);
1604
1605         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1606                        param->timeout);
1607 }
1608
1609 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1610                 int timeout)
1611 {
1612         struct le_scan_params *param = &hdev->le_scan_params;
1613
1614         BT_DBG("%s", hdev->name);
1615
1616         if (work_busy(&hdev->le_scan))
1617                 return -EINPROGRESS;
1618
1619         param->type = type;
1620         param->interval = interval;
1621         param->window = window;
1622         param->timeout = timeout;
1623
1624         queue_work(system_long_wq, &hdev->le_scan);
1625
1626         return 0;
1627 }
1628
1629 /* Alloc HCI device */
1630 struct hci_dev *hci_alloc_dev(void)
1631 {
1632         struct hci_dev *hdev;
1633
1634         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1635         if (!hdev)
1636                 return NULL;
1637
1638         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1639         hdev->esco_type = (ESCO_HV1);
1640         hdev->link_mode = (HCI_LM_ACCEPT);
1641         hdev->io_capability = 0x03; /* No Input No Output */
1642
1643         hdev->sniff_max_interval = 800;
1644         hdev->sniff_min_interval = 80;
1645
1646         mutex_init(&hdev->lock);
1647         mutex_init(&hdev->req_lock);
1648
1649         INIT_LIST_HEAD(&hdev->mgmt_pending);
1650         INIT_LIST_HEAD(&hdev->blacklist);
1651         INIT_LIST_HEAD(&hdev->uuids);
1652         INIT_LIST_HEAD(&hdev->link_keys);
1653         INIT_LIST_HEAD(&hdev->long_term_keys);
1654         INIT_LIST_HEAD(&hdev->remote_oob_data);
1655
1656         INIT_WORK(&hdev->rx_work, hci_rx_work);
1657         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1658         INIT_WORK(&hdev->tx_work, hci_tx_work);
1659         INIT_WORK(&hdev->power_on, hci_power_on);
1660         INIT_WORK(&hdev->le_scan, le_scan_work);
1661
1662         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1663         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1664         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1665
1666         skb_queue_head_init(&hdev->driver_init);
1667         skb_queue_head_init(&hdev->rx_q);
1668         skb_queue_head_init(&hdev->cmd_q);
1669         skb_queue_head_init(&hdev->raw_q);
1670
1671         init_waitqueue_head(&hdev->req_wait_q);
1672
1673         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1674
1675         hci_init_sysfs(hdev);
1676         discovery_init(hdev);
1677         hci_conn_hash_init(hdev);
1678
1679         return hdev;
1680 }
1681 EXPORT_SYMBOL(hci_alloc_dev);
1682
1683 /* Free HCI device */
1684 void hci_free_dev(struct hci_dev *hdev)
1685 {
1686         skb_queue_purge(&hdev->driver_init);
1687
1688         /* will free via device release */
1689         put_device(&hdev->dev);
1690 }
1691 EXPORT_SYMBOL(hci_free_dev);
1692
1693 /* Register HCI device */
1694 int hci_register_dev(struct hci_dev *hdev)
1695 {
1696         int id, error;
1697
1698         if (!hdev->open || !hdev->close)
1699                 return -EINVAL;
1700
1701         /* Do not allow HCI_AMP devices to register at index 0,
1702          * so the index can be used as the AMP controller ID.
1703          */
1704         switch (hdev->dev_type) {
1705         case HCI_BREDR:
1706                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1707                 break;
1708         case HCI_AMP:
1709                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1710                 break;
1711         default:
1712                 return -EINVAL;
1713         }
1714
1715         if (id < 0)
1716                 return id;
1717
1718         sprintf(hdev->name, "hci%d", id);
1719         hdev->id = id;
1720
1721         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1722
1723         write_lock(&hci_dev_list_lock);
1724         list_add(&hdev->list, &hci_dev_list);
1725         write_unlock(&hci_dev_list_lock);
1726
1727         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1728                                           WQ_MEM_RECLAIM, 1);
1729         if (!hdev->workqueue) {
1730                 error = -ENOMEM;
1731                 goto err;
1732         }
1733
1734         error = hci_add_sysfs(hdev);
1735         if (error < 0)
1736                 goto err_wqueue;
1737
1738         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1739                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1740                                     hdev);
1741         if (hdev->rfkill) {
1742                 if (rfkill_register(hdev->rfkill) < 0) {
1743                         rfkill_destroy(hdev->rfkill);
1744                         hdev->rfkill = NULL;
1745                 }
1746         }
1747
1748         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1749         set_bit(HCI_SETUP, &hdev->dev_flags);
1750         schedule_work(&hdev->power_on);
1751
1752         hci_notify(hdev, HCI_DEV_REG);
1753         hci_dev_hold(hdev);
1754
1755         return id;
1756
1757 err_wqueue:
1758         destroy_workqueue(hdev->workqueue);
1759 err:
1760         ida_simple_remove(&hci_index_ida, hdev->id);
1761         write_lock(&hci_dev_list_lock);
1762         list_del(&hdev->list);
1763         write_unlock(&hci_dev_list_lock);
1764
1765         return error;
1766 }
1767 EXPORT_SYMBOL(hci_register_dev);
1768
1769 /* Unregister HCI device */
1770 void hci_unregister_dev(struct hci_dev *hdev)
1771 {
1772         int i, id;
1773
1774         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1775
1776         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1777
1778         id = hdev->id;
1779
1780         write_lock(&hci_dev_list_lock);
1781         list_del(&hdev->list);
1782         write_unlock(&hci_dev_list_lock);
1783
1784         hci_dev_do_close(hdev);
1785
1786         for (i = 0; i < NUM_REASSEMBLY; i++)
1787                 kfree_skb(hdev->reassembly[i]);
1788
1789         if (!test_bit(HCI_INIT, &hdev->flags) &&
1790             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1791                 hci_dev_lock(hdev);
1792                 mgmt_index_removed(hdev);
1793                 hci_dev_unlock(hdev);
1794         }
1795
1796         /* mgmt_index_removed should take care of emptying the
1797          * pending list */
1798         BUG_ON(!list_empty(&hdev->mgmt_pending));
1799
1800         hci_notify(hdev, HCI_DEV_UNREG);
1801
1802         if (hdev->rfkill) {
1803                 rfkill_unregister(hdev->rfkill);
1804                 rfkill_destroy(hdev->rfkill);
1805         }
1806
1807         hci_del_sysfs(hdev);
1808
1809         destroy_workqueue(hdev->workqueue);
1810
1811         hci_dev_lock(hdev);
1812         hci_blacklist_clear(hdev);
1813         hci_uuids_clear(hdev);
1814         hci_link_keys_clear(hdev);
1815         hci_smp_ltks_clear(hdev);
1816         hci_remote_oob_data_clear(hdev);
1817         hci_dev_unlock(hdev);
1818
1819         hci_dev_put(hdev);
1820
1821         ida_simple_remove(&hci_index_ida, id);
1822 }
1823 EXPORT_SYMBOL(hci_unregister_dev);
1824
1825 /* Suspend HCI device */
1826 int hci_suspend_dev(struct hci_dev *hdev)
1827 {
1828         hci_notify(hdev, HCI_DEV_SUSPEND);
1829         return 0;
1830 }
1831 EXPORT_SYMBOL(hci_suspend_dev);
1832
1833 /* Resume HCI device */
1834 int hci_resume_dev(struct hci_dev *hdev)
1835 {
1836         hci_notify(hdev, HCI_DEV_RESUME);
1837         return 0;
1838 }
1839 EXPORT_SYMBOL(hci_resume_dev);
1840
1841 /* Receive frame from HCI drivers */
1842 int hci_recv_frame(struct sk_buff *skb)
1843 {
1844         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1845         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1846                       && !test_bit(HCI_INIT, &hdev->flags))) {
1847                 kfree_skb(skb);
1848                 return -ENXIO;
1849         }
1850
1851         /* Incomming skb */
1852         bt_cb(skb)->incoming = 1;
1853
1854         /* Time stamp */
1855         __net_timestamp(skb);
1856
1857         skb_queue_tail(&hdev->rx_q, skb);
1858         queue_work(hdev->workqueue, &hdev->rx_work);
1859
1860         return 0;
1861 }
1862 EXPORT_SYMBOL(hci_recv_frame);
1863
1864 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1865                           int count, __u8 index)
1866 {
1867         int len = 0;
1868         int hlen = 0;
1869         int remain = count;
1870         struct sk_buff *skb;
1871         struct bt_skb_cb *scb;
1872
1873         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1874             index >= NUM_REASSEMBLY)
1875                 return -EILSEQ;
1876
1877         skb = hdev->reassembly[index];
1878
1879         if (!skb) {
1880                 switch (type) {
1881                 case HCI_ACLDATA_PKT:
1882                         len = HCI_MAX_FRAME_SIZE;
1883                         hlen = HCI_ACL_HDR_SIZE;
1884                         break;
1885                 case HCI_EVENT_PKT:
1886                         len = HCI_MAX_EVENT_SIZE;
1887                         hlen = HCI_EVENT_HDR_SIZE;
1888                         break;
1889                 case HCI_SCODATA_PKT:
1890                         len = HCI_MAX_SCO_SIZE;
1891                         hlen = HCI_SCO_HDR_SIZE;
1892                         break;
1893                 }
1894
1895                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1896                 if (!skb)
1897                         return -ENOMEM;
1898
1899                 scb = (void *) skb->cb;
1900                 scb->expect = hlen;
1901                 scb->pkt_type = type;
1902
1903                 skb->dev = (void *) hdev;
1904                 hdev->reassembly[index] = skb;
1905         }
1906
1907         while (count) {
1908                 scb = (void *) skb->cb;
1909                 len = min_t(uint, scb->expect, count);
1910
1911                 memcpy(skb_put(skb, len), data, len);
1912
1913                 count -= len;
1914                 data += len;
1915                 scb->expect -= len;
1916                 remain = count;
1917
1918                 switch (type) {
1919                 case HCI_EVENT_PKT:
1920                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1921                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1922                                 scb->expect = h->plen;
1923
1924                                 if (skb_tailroom(skb) < scb->expect) {
1925                                         kfree_skb(skb);
1926                                         hdev->reassembly[index] = NULL;
1927                                         return -ENOMEM;
1928                                 }
1929                         }
1930                         break;
1931
1932                 case HCI_ACLDATA_PKT:
1933                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1934                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1935                                 scb->expect = __le16_to_cpu(h->dlen);
1936
1937                                 if (skb_tailroom(skb) < scb->expect) {
1938                                         kfree_skb(skb);
1939                                         hdev->reassembly[index] = NULL;
1940                                         return -ENOMEM;
1941                                 }
1942                         }
1943                         break;
1944
1945                 case HCI_SCODATA_PKT:
1946                         if (skb->len == HCI_SCO_HDR_SIZE) {
1947                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1948                                 scb->expect = h->dlen;
1949
1950                                 if (skb_tailroom(skb) < scb->expect) {
1951                                         kfree_skb(skb);
1952                                         hdev->reassembly[index] = NULL;
1953                                         return -ENOMEM;
1954                                 }
1955                         }
1956                         break;
1957                 }
1958
1959                 if (scb->expect == 0) {
1960                         /* Complete frame */
1961
1962                         bt_cb(skb)->pkt_type = type;
1963                         hci_recv_frame(skb);
1964
1965                         hdev->reassembly[index] = NULL;
1966                         return remain;
1967                 }
1968         }
1969
1970         return remain;
1971 }
1972
1973 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1974 {
1975         int rem = 0;
1976
1977         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1978                 return -EILSEQ;
1979
1980         while (count) {
1981                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1982                 if (rem < 0)
1983                         return rem;
1984
1985                 data += (count - rem);
1986                 count = rem;
1987         }
1988
1989         return rem;
1990 }
1991 EXPORT_SYMBOL(hci_recv_fragment);
1992
1993 #define STREAM_REASSEMBLY 0
1994
1995 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1996 {
1997         int type;
1998         int rem = 0;
1999
2000         while (count) {
2001                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2002
2003                 if (!skb) {
2004                         struct { char type; } *pkt;
2005
2006                         /* Start of the frame */
2007                         pkt = data;
2008                         type = pkt->type;
2009
2010                         data++;
2011                         count--;
2012                 } else
2013                         type = bt_cb(skb)->pkt_type;
2014
2015                 rem = hci_reassembly(hdev, type, data, count,
2016                                      STREAM_REASSEMBLY);
2017                 if (rem < 0)
2018                         return rem;
2019
2020                 data += (count - rem);
2021                 count = rem;
2022         }
2023
2024         return rem;
2025 }
2026 EXPORT_SYMBOL(hci_recv_stream_fragment);
2027
2028 /* ---- Interface to upper protocols ---- */
2029
2030 int hci_register_cb(struct hci_cb *cb)
2031 {
2032         BT_DBG("%p name %s", cb, cb->name);
2033
2034         write_lock(&hci_cb_list_lock);
2035         list_add(&cb->list, &hci_cb_list);
2036         write_unlock(&hci_cb_list_lock);
2037
2038         return 0;
2039 }
2040 EXPORT_SYMBOL(hci_register_cb);
2041
2042 int hci_unregister_cb(struct hci_cb *cb)
2043 {
2044         BT_DBG("%p name %s", cb, cb->name);
2045
2046         write_lock(&hci_cb_list_lock);
2047         list_del(&cb->list);
2048         write_unlock(&hci_cb_list_lock);
2049
2050         return 0;
2051 }
2052 EXPORT_SYMBOL(hci_unregister_cb);
2053
2054 static int hci_send_frame(struct sk_buff *skb)
2055 {
2056         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2057
2058         if (!hdev) {
2059                 kfree_skb(skb);
2060                 return -ENODEV;
2061         }
2062
2063         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2064
2065         /* Time stamp */
2066         __net_timestamp(skb);
2067
2068         /* Send copy to monitor */
2069         hci_send_to_monitor(hdev, skb);
2070
2071         if (atomic_read(&hdev->promisc)) {
2072                 /* Send copy to the sockets */
2073                 hci_send_to_sock(hdev, skb);
2074         }
2075
2076         /* Get rid of skb owner, prior to sending to the driver. */
2077         skb_orphan(skb);
2078
2079         return hdev->send(skb);
2080 }
2081
2082 /* Send HCI command */
2083 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2084 {
2085         int len = HCI_COMMAND_HDR_SIZE + plen;
2086         struct hci_command_hdr *hdr;
2087         struct sk_buff *skb;
2088
2089         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2090
2091         skb = bt_skb_alloc(len, GFP_ATOMIC);
2092         if (!skb) {
2093                 BT_ERR("%s no memory for command", hdev->name);
2094                 return -ENOMEM;
2095         }
2096
2097         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2098         hdr->opcode = cpu_to_le16(opcode);
2099         hdr->plen   = plen;
2100
2101         if (plen)
2102                 memcpy(skb_put(skb, plen), param, plen);
2103
2104         BT_DBG("skb len %d", skb->len);
2105
2106         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2107         skb->dev = (void *) hdev;
2108
2109         if (test_bit(HCI_INIT, &hdev->flags))
2110                 hdev->init_last_cmd = opcode;
2111
2112         skb_queue_tail(&hdev->cmd_q, skb);
2113         queue_work(hdev->workqueue, &hdev->cmd_work);
2114
2115         return 0;
2116 }
2117
2118 /* Get data from the previously sent command */
2119 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2120 {
2121         struct hci_command_hdr *hdr;
2122
2123         if (!hdev->sent_cmd)
2124                 return NULL;
2125
2126         hdr = (void *) hdev->sent_cmd->data;
2127
2128         if (hdr->opcode != cpu_to_le16(opcode))
2129                 return NULL;
2130
2131         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2132
2133         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2134 }
2135
2136 /* Send ACL data */
2137 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2138 {
2139         struct hci_acl_hdr *hdr;
2140         int len = skb->len;
2141
2142         skb_push(skb, HCI_ACL_HDR_SIZE);
2143         skb_reset_transport_header(skb);
2144         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2145         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2146         hdr->dlen   = cpu_to_le16(len);
2147 }
2148
2149 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2150                           struct sk_buff *skb, __u16 flags)
2151 {
2152         struct hci_dev *hdev = conn->hdev;
2153         struct sk_buff *list;
2154
2155         skb->len = skb_headlen(skb);
2156         skb->data_len = 0;
2157
2158         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2159         hci_add_acl_hdr(skb, conn->handle, flags);
2160
2161         list = skb_shinfo(skb)->frag_list;
2162         if (!list) {
2163                 /* Non fragmented */
2164                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2165
2166                 skb_queue_tail(queue, skb);
2167         } else {
2168                 /* Fragmented */
2169                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2170
2171                 skb_shinfo(skb)->frag_list = NULL;
2172
2173                 /* Queue all fragments atomically */
2174                 spin_lock(&queue->lock);
2175
2176                 __skb_queue_tail(queue, skb);
2177
2178                 flags &= ~ACL_START;
2179                 flags |= ACL_CONT;
2180                 do {
2181                         skb = list; list = list->next;
2182
2183                         skb->dev = (void *) hdev;
2184                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2185                         hci_add_acl_hdr(skb, conn->handle, flags);
2186
2187                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2188
2189                         __skb_queue_tail(queue, skb);
2190                 } while (list);
2191
2192                 spin_unlock(&queue->lock);
2193         }
2194 }
2195
2196 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2197 {
2198         struct hci_conn *conn = chan->conn;
2199         struct hci_dev *hdev = conn->hdev;
2200
2201         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2202
2203         skb->dev = (void *) hdev;
2204
2205         hci_queue_acl(conn, &chan->data_q, skb, flags);
2206
2207         queue_work(hdev->workqueue, &hdev->tx_work);
2208 }
2209
2210 /* Send SCO data */
2211 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2212 {
2213         struct hci_dev *hdev = conn->hdev;
2214         struct hci_sco_hdr hdr;
2215
2216         BT_DBG("%s len %d", hdev->name, skb->len);
2217
2218         hdr.handle = cpu_to_le16(conn->handle);
2219         hdr.dlen   = skb->len;
2220
2221         skb_push(skb, HCI_SCO_HDR_SIZE);
2222         skb_reset_transport_header(skb);
2223         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2224
2225         skb->dev = (void *) hdev;
2226         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2227
2228         skb_queue_tail(&conn->data_q, skb);
2229         queue_work(hdev->workqueue, &hdev->tx_work);
2230 }
2231
2232 /* ---- HCI TX task (outgoing data) ---- */
2233
2234 /* HCI Connection scheduler */
2235 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2236                                      int *quote)
2237 {
2238         struct hci_conn_hash *h = &hdev->conn_hash;
2239         struct hci_conn *conn = NULL, *c;
2240         unsigned int num = 0, min = ~0;
2241
2242         /* We don't have to lock device here. Connections are always
2243          * added and removed with TX task disabled. */
2244
2245         rcu_read_lock();
2246
2247         list_for_each_entry_rcu(c, &h->list, list) {
2248                 if (c->type != type || skb_queue_empty(&c->data_q))
2249                         continue;
2250
2251                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2252                         continue;
2253
2254                 num++;
2255
2256                 if (c->sent < min) {
2257                         min  = c->sent;
2258                         conn = c;
2259                 }
2260
2261                 if (hci_conn_num(hdev, type) == num)
2262                         break;
2263         }
2264
2265         rcu_read_unlock();
2266
2267         if (conn) {
2268                 int cnt, q;
2269
2270                 switch (conn->type) {
2271                 case ACL_LINK:
2272                         cnt = hdev->acl_cnt;
2273                         break;
2274                 case SCO_LINK:
2275                 case ESCO_LINK:
2276                         cnt = hdev->sco_cnt;
2277                         break;
2278                 case LE_LINK:
2279                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2280                         break;
2281                 default:
2282                         cnt = 0;
2283                         BT_ERR("Unknown link type");
2284                 }
2285
2286                 q = cnt / num;
2287                 *quote = q ? q : 1;
2288         } else
2289                 *quote = 0;
2290
2291         BT_DBG("conn %p quote %d", conn, *quote);
2292         return conn;
2293 }
2294
2295 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2296 {
2297         struct hci_conn_hash *h = &hdev->conn_hash;
2298         struct hci_conn *c;
2299
2300         BT_ERR("%s link tx timeout", hdev->name);
2301
2302         rcu_read_lock();
2303
2304         /* Kill stalled connections */
2305         list_for_each_entry_rcu(c, &h->list, list) {
2306                 if (c->type == type && c->sent) {
2307                         BT_ERR("%s killing stalled connection %s",
2308                                hdev->name, batostr(&c->dst));
2309                         hci_acl_disconn(c, 0x13);
2310                 }
2311         }
2312
2313         rcu_read_unlock();
2314 }
2315
2316 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2317                                       int *quote)
2318 {
2319         struct hci_conn_hash *h = &hdev->conn_hash;
2320         struct hci_chan *chan = NULL;
2321         unsigned int num = 0, min = ~0, cur_prio = 0;
2322         struct hci_conn *conn;
2323         int cnt, q, conn_num = 0;
2324
2325         BT_DBG("%s", hdev->name);
2326
2327         rcu_read_lock();
2328
2329         list_for_each_entry_rcu(conn, &h->list, list) {
2330                 struct hci_chan *tmp;
2331
2332                 if (conn->type != type)
2333                         continue;
2334
2335                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2336                         continue;
2337
2338                 conn_num++;
2339
2340                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2341                         struct sk_buff *skb;
2342
2343                         if (skb_queue_empty(&tmp->data_q))
2344                                 continue;
2345
2346                         skb = skb_peek(&tmp->data_q);
2347                         if (skb->priority < cur_prio)
2348                                 continue;
2349
2350                         if (skb->priority > cur_prio) {
2351                                 num = 0;
2352                                 min = ~0;
2353                                 cur_prio = skb->priority;
2354                         }
2355
2356                         num++;
2357
2358                         if (conn->sent < min) {
2359                                 min  = conn->sent;
2360                                 chan = tmp;
2361                         }
2362                 }
2363
2364                 if (hci_conn_num(hdev, type) == conn_num)
2365                         break;
2366         }
2367
2368         rcu_read_unlock();
2369
2370         if (!chan)
2371                 return NULL;
2372
2373         switch (chan->conn->type) {
2374         case ACL_LINK:
2375                 cnt = hdev->acl_cnt;
2376                 break;
2377         case SCO_LINK:
2378         case ESCO_LINK:
2379                 cnt = hdev->sco_cnt;
2380                 break;
2381         case LE_LINK:
2382                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2383                 break;
2384         default:
2385                 cnt = 0;
2386                 BT_ERR("Unknown link type");
2387         }
2388
2389         q = cnt / num;
2390         *quote = q ? q : 1;
2391         BT_DBG("chan %p quote %d", chan, *quote);
2392         return chan;
2393 }
2394
2395 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2396 {
2397         struct hci_conn_hash *h = &hdev->conn_hash;
2398         struct hci_conn *conn;
2399         int num = 0;
2400
2401         BT_DBG("%s", hdev->name);
2402
2403         rcu_read_lock();
2404
2405         list_for_each_entry_rcu(conn, &h->list, list) {
2406                 struct hci_chan *chan;
2407
2408                 if (conn->type != type)
2409                         continue;
2410
2411                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2412                         continue;
2413
2414                 num++;
2415
2416                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2417                         struct sk_buff *skb;
2418
2419                         if (chan->sent) {
2420                                 chan->sent = 0;
2421                                 continue;
2422                         }
2423
2424                         if (skb_queue_empty(&chan->data_q))
2425                                 continue;
2426
2427                         skb = skb_peek(&chan->data_q);
2428                         if (skb->priority >= HCI_PRIO_MAX - 1)
2429                                 continue;
2430
2431                         skb->priority = HCI_PRIO_MAX - 1;
2432
2433                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2434                                skb->priority);
2435                 }
2436
2437                 if (hci_conn_num(hdev, type) == num)
2438                         break;
2439         }
2440
2441         rcu_read_unlock();
2442
2443 }
2444
2445 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2446 {
2447         /* Calculate count of blocks used by this packet */
2448         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2449 }
2450
2451 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2452 {
2453         if (!test_bit(HCI_RAW, &hdev->flags)) {
2454                 /* ACL tx timeout must be longer than maximum
2455                  * link supervision timeout (40.9 seconds) */
2456                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2457                                        msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2458                         hci_link_tx_to(hdev, ACL_LINK);
2459         }
2460 }
2461
2462 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2463 {
2464         unsigned int cnt = hdev->acl_cnt;
2465         struct hci_chan *chan;
2466         struct sk_buff *skb;
2467         int quote;
2468
2469         __check_timeout(hdev, cnt);
2470
2471         while (hdev->acl_cnt &&
2472                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2473                 u32 priority = (skb_peek(&chan->data_q))->priority;
2474                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2475                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2476                                skb->len, skb->priority);
2477
2478                         /* Stop if priority has changed */
2479                         if (skb->priority < priority)
2480                                 break;
2481
2482                         skb = skb_dequeue(&chan->data_q);
2483
2484                         hci_conn_enter_active_mode(chan->conn,
2485                                                    bt_cb(skb)->force_active);
2486
2487                         hci_send_frame(skb);
2488                         hdev->acl_last_tx = jiffies;
2489
2490                         hdev->acl_cnt--;
2491                         chan->sent++;
2492                         chan->conn->sent++;
2493                 }
2494         }
2495
2496         if (cnt != hdev->acl_cnt)
2497                 hci_prio_recalculate(hdev, ACL_LINK);
2498 }
2499
2500 static void hci_sched_acl_blk(struct hci_dev *hdev)
2501 {
2502         unsigned int cnt = hdev->block_cnt;
2503         struct hci_chan *chan;
2504         struct sk_buff *skb;
2505         int quote;
2506
2507         __check_timeout(hdev, cnt);
2508
2509         while (hdev->block_cnt > 0 &&
2510                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2511                 u32 priority = (skb_peek(&chan->data_q))->priority;
2512                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2513                         int blocks;
2514
2515                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2516                                skb->len, skb->priority);
2517
2518                         /* Stop if priority has changed */
2519                         if (skb->priority < priority)
2520                                 break;
2521
2522                         skb = skb_dequeue(&chan->data_q);
2523
2524                         blocks = __get_blocks(hdev, skb);
2525                         if (blocks > hdev->block_cnt)
2526                                 return;
2527
2528                         hci_conn_enter_active_mode(chan->conn,
2529                                                    bt_cb(skb)->force_active);
2530
2531                         hci_send_frame(skb);
2532                         hdev->acl_last_tx = jiffies;
2533
2534                         hdev->block_cnt -= blocks;
2535                         quote -= blocks;
2536
2537                         chan->sent += blocks;
2538                         chan->conn->sent += blocks;
2539                 }
2540         }
2541
2542         if (cnt != hdev->block_cnt)
2543                 hci_prio_recalculate(hdev, ACL_LINK);
2544 }
2545
2546 static void hci_sched_acl(struct hci_dev *hdev)
2547 {
2548         BT_DBG("%s", hdev->name);
2549
2550         if (!hci_conn_num(hdev, ACL_LINK))
2551                 return;
2552
2553         switch (hdev->flow_ctl_mode) {
2554         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2555                 hci_sched_acl_pkt(hdev);
2556                 break;
2557
2558         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2559                 hci_sched_acl_blk(hdev);
2560                 break;
2561         }
2562 }
2563
2564 /* Schedule SCO */
2565 static void hci_sched_sco(struct hci_dev *hdev)
2566 {
2567         struct hci_conn *conn;
2568         struct sk_buff *skb;
2569         int quote;
2570
2571         BT_DBG("%s", hdev->name);
2572
2573         if (!hci_conn_num(hdev, SCO_LINK))
2574                 return;
2575
2576         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2577                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2578                         BT_DBG("skb %p len %d", skb, skb->len);
2579                         hci_send_frame(skb);
2580
2581                         conn->sent++;
2582                         if (conn->sent == ~0)
2583                                 conn->sent = 0;
2584                 }
2585         }
2586 }
2587
2588 static void hci_sched_esco(struct hci_dev *hdev)
2589 {
2590         struct hci_conn *conn;
2591         struct sk_buff *skb;
2592         int quote;
2593
2594         BT_DBG("%s", hdev->name);
2595
2596         if (!hci_conn_num(hdev, ESCO_LINK))
2597                 return;
2598
2599         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2600                                                      &quote))) {
2601                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2602                         BT_DBG("skb %p len %d", skb, skb->len);
2603                         hci_send_frame(skb);
2604
2605                         conn->sent++;
2606                         if (conn->sent == ~0)
2607                                 conn->sent = 0;
2608                 }
2609         }
2610 }
2611
2612 static void hci_sched_le(struct hci_dev *hdev)
2613 {
2614         struct hci_chan *chan;
2615         struct sk_buff *skb;
2616         int quote, cnt, tmp;
2617
2618         BT_DBG("%s", hdev->name);
2619
2620         if (!hci_conn_num(hdev, LE_LINK))
2621                 return;
2622
2623         if (!test_bit(HCI_RAW, &hdev->flags)) {
2624                 /* LE tx timeout must be longer than maximum
2625                  * link supervision timeout (40.9 seconds) */
2626                 if (!hdev->le_cnt && hdev->le_pkts &&
2627                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
2628                         hci_link_tx_to(hdev, LE_LINK);
2629         }
2630
2631         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2632         tmp = cnt;
2633         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2634                 u32 priority = (skb_peek(&chan->data_q))->priority;
2635                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2636                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2637                                skb->len, skb->priority);
2638
2639                         /* Stop if priority has changed */
2640                         if (skb->priority < priority)
2641                                 break;
2642
2643                         skb = skb_dequeue(&chan->data_q);
2644
2645                         hci_send_frame(skb);
2646                         hdev->le_last_tx = jiffies;
2647
2648                         cnt--;
2649                         chan->sent++;
2650                         chan->conn->sent++;
2651                 }
2652         }
2653
2654         if (hdev->le_pkts)
2655                 hdev->le_cnt = cnt;
2656         else
2657                 hdev->acl_cnt = cnt;
2658
2659         if (cnt != tmp)
2660                 hci_prio_recalculate(hdev, LE_LINK);
2661 }
2662
2663 static void hci_tx_work(struct work_struct *work)
2664 {
2665         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2666         struct sk_buff *skb;
2667
2668         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2669                hdev->sco_cnt, hdev->le_cnt);
2670
2671         /* Schedule queues and send stuff to HCI driver */
2672
2673         hci_sched_acl(hdev);
2674
2675         hci_sched_sco(hdev);
2676
2677         hci_sched_esco(hdev);
2678
2679         hci_sched_le(hdev);
2680
2681         /* Send next queued raw (unknown type) packet */
2682         while ((skb = skb_dequeue(&hdev->raw_q)))
2683                 hci_send_frame(skb);
2684 }
2685
2686 /* ----- HCI RX task (incoming data processing) ----- */
2687
2688 /* ACL data packet */
2689 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2690 {
2691         struct hci_acl_hdr *hdr = (void *) skb->data;
2692         struct hci_conn *conn;
2693         __u16 handle, flags;
2694
2695         skb_pull(skb, HCI_ACL_HDR_SIZE);
2696
2697         handle = __le16_to_cpu(hdr->handle);
2698         flags  = hci_flags(handle);
2699         handle = hci_handle(handle);
2700
2701         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2702                handle, flags);
2703
2704         hdev->stat.acl_rx++;
2705
2706         hci_dev_lock(hdev);
2707         conn = hci_conn_hash_lookup_handle(hdev, handle);
2708         hci_dev_unlock(hdev);
2709
2710         if (conn) {
2711                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2712
2713                 hci_dev_lock(hdev);
2714                 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2715                     !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2716                         mgmt_device_connected(hdev, &conn->dst, conn->type,
2717                                               conn->dst_type, 0, NULL, 0,
2718                                               conn->dev_class);
2719                 hci_dev_unlock(hdev);
2720
2721                 /* Send to upper protocol */
2722                 l2cap_recv_acldata(conn, skb, flags);
2723                 return;
2724         } else {
2725                 BT_ERR("%s ACL packet for unknown connection handle %d",
2726                        hdev->name, handle);
2727         }
2728
2729         kfree_skb(skb);
2730 }
2731
2732 /* SCO data packet */
2733 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2734 {
2735         struct hci_sco_hdr *hdr = (void *) skb->data;
2736         struct hci_conn *conn;
2737         __u16 handle;
2738
2739         skb_pull(skb, HCI_SCO_HDR_SIZE);
2740
2741         handle = __le16_to_cpu(hdr->handle);
2742
2743         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2744
2745         hdev->stat.sco_rx++;
2746
2747         hci_dev_lock(hdev);
2748         conn = hci_conn_hash_lookup_handle(hdev, handle);
2749         hci_dev_unlock(hdev);
2750
2751         if (conn) {
2752                 /* Send to upper protocol */
2753                 sco_recv_scodata(conn, skb);
2754                 return;
2755         } else {
2756                 BT_ERR("%s SCO packet for unknown connection handle %d",
2757                        hdev->name, handle);
2758         }
2759
2760         kfree_skb(skb);
2761 }
2762
2763 static void hci_rx_work(struct work_struct *work)
2764 {
2765         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2766         struct sk_buff *skb;
2767
2768         BT_DBG("%s", hdev->name);
2769
2770         while ((skb = skb_dequeue(&hdev->rx_q))) {
2771                 /* Send copy to monitor */
2772                 hci_send_to_monitor(hdev, skb);
2773
2774                 if (atomic_read(&hdev->promisc)) {
2775                         /* Send copy to the sockets */
2776                         hci_send_to_sock(hdev, skb);
2777                 }
2778
2779                 if (test_bit(HCI_RAW, &hdev->flags)) {
2780                         kfree_skb(skb);
2781                         continue;
2782                 }
2783
2784                 if (test_bit(HCI_INIT, &hdev->flags)) {
2785                         /* Don't process data packets in this states. */
2786                         switch (bt_cb(skb)->pkt_type) {
2787                         case HCI_ACLDATA_PKT:
2788                         case HCI_SCODATA_PKT:
2789                                 kfree_skb(skb);
2790                                 continue;
2791                         }
2792                 }
2793
2794                 /* Process frame */
2795                 switch (bt_cb(skb)->pkt_type) {
2796                 case HCI_EVENT_PKT:
2797                         BT_DBG("%s Event packet", hdev->name);
2798                         hci_event_packet(hdev, skb);
2799                         break;
2800
2801                 case HCI_ACLDATA_PKT:
2802                         BT_DBG("%s ACL data packet", hdev->name);
2803                         hci_acldata_packet(hdev, skb);
2804                         break;
2805
2806                 case HCI_SCODATA_PKT:
2807                         BT_DBG("%s SCO data packet", hdev->name);
2808                         hci_scodata_packet(hdev, skb);
2809                         break;
2810
2811                 default:
2812                         kfree_skb(skb);
2813                         break;
2814                 }
2815         }
2816 }
2817
2818 static void hci_cmd_work(struct work_struct *work)
2819 {
2820         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2821         struct sk_buff *skb;
2822
2823         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2824
2825         /* Send queued commands */
2826         if (atomic_read(&hdev->cmd_cnt)) {
2827                 skb = skb_dequeue(&hdev->cmd_q);
2828                 if (!skb)
2829                         return;
2830
2831                 kfree_skb(hdev->sent_cmd);
2832
2833                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2834                 if (hdev->sent_cmd) {
2835                         atomic_dec(&hdev->cmd_cnt);
2836                         hci_send_frame(skb);
2837                         if (test_bit(HCI_RESET, &hdev->flags))
2838                                 del_timer(&hdev->cmd_timer);
2839                         else
2840                                 mod_timer(&hdev->cmd_timer,
2841                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2842                 } else {
2843                         skb_queue_head(&hdev->cmd_q, skb);
2844                         queue_work(hdev->workqueue, &hdev->cmd_work);
2845                 }
2846         }
2847 }
2848
2849 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2850 {
2851         /* General inquiry access code (GIAC) */
2852         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2853         struct hci_cp_inquiry cp;
2854
2855         BT_DBG("%s", hdev->name);
2856
2857         if (test_bit(HCI_INQUIRY, &hdev->flags))
2858                 return -EINPROGRESS;
2859
2860         inquiry_cache_flush(hdev);
2861
2862         memset(&cp, 0, sizeof(cp));
2863         memcpy(&cp.lap, lap, sizeof(cp.lap));
2864         cp.length  = length;
2865
2866         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2867 }
2868
2869 int hci_cancel_inquiry(struct hci_dev *hdev)
2870 {
2871         BT_DBG("%s", hdev->name);
2872
2873         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2874                 return -EALREADY;
2875
2876         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2877 }
2878
2879 u8 bdaddr_to_le(u8 bdaddr_type)
2880 {
2881         switch (bdaddr_type) {
2882         case BDADDR_LE_PUBLIC:
2883                 return ADDR_LE_DEV_PUBLIC;
2884
2885         default:
2886                 /* Fallback to LE Random address type */
2887                 return ADDR_LE_DEV_RANDOM;
2888         }
2889 }