Bluetooth: Add synchronization train parameters reading support
[platform/adaptation/renesas_rcar/renesas_kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320
321         BT_DBG("%s %ld", hdev->name, opt);
322
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341
342 static void bredr_setup(struct hci_request *req)
343 {
344         __le16 param;
345         __u8 flt_type;
346
347         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350         /* Read Class of Device */
351         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353         /* Read Local Name */
354         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356         /* Read Voice Setting */
357         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359         /* Clear Event Filters */
360         flt_type = HCI_FLT_CLEAR_ALL;
361         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363         /* Connection accept timeout ~20 secs */
364         param = __constant_cpu_to_le16(0x7d00);
365         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366
367         /* Read page scan parameters */
368         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371         }
372 }
373
374 static void le_setup(struct hci_request *req)
375 {
376         struct hci_dev *hdev = req->hdev;
377
378         /* Read LE Buffer Size */
379         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380
381         /* Read LE Local Supported Features */
382         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383
384         /* Read LE Advertising Channel TX Power */
385         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386
387         /* Read LE White List Size */
388         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389
390         /* Read LE Supported States */
391         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392
393         /* LE-only controllers have LE implicitly enabled */
394         if (!lmp_bredr_capable(hdev))
395                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396 }
397
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399 {
400         if (lmp_ext_inq_capable(hdev))
401                 return 0x02;
402
403         if (lmp_inq_rssi_capable(hdev))
404                 return 0x01;
405
406         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407             hdev->lmp_subver == 0x0757)
408                 return 0x01;
409
410         if (hdev->manufacturer == 15) {
411                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412                         return 0x01;
413                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414                         return 0x01;
415                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416                         return 0x01;
417         }
418
419         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420             hdev->lmp_subver == 0x1805)
421                 return 0x01;
422
423         return 0x00;
424 }
425
426 static void hci_setup_inquiry_mode(struct hci_request *req)
427 {
428         u8 mode;
429
430         mode = hci_get_inquiry_mode(req->hdev);
431
432         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 }
434
435 static void hci_setup_event_mask(struct hci_request *req)
436 {
437         struct hci_dev *hdev = req->hdev;
438
439         /* The second byte is 0xff instead of 0x9f (two reserved bits
440          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441          * command otherwise.
442          */
443         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446          * any event mask for pre 1.2 devices.
447          */
448         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449                 return;
450
451         if (lmp_bredr_capable(hdev)) {
452                 events[4] |= 0x01; /* Flow Specification Complete */
453                 events[4] |= 0x02; /* Inquiry Result with RSSI */
454                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455                 events[5] |= 0x08; /* Synchronous Connection Complete */
456                 events[5] |= 0x10; /* Synchronous Connection Changed */
457         } else {
458                 /* Use a different default for LE-only devices */
459                 memset(events, 0, sizeof(events));
460                 events[0] |= 0x10; /* Disconnection Complete */
461                 events[0] |= 0x80; /* Encryption Change */
462                 events[1] |= 0x08; /* Read Remote Version Information Complete */
463                 events[1] |= 0x20; /* Command Complete */
464                 events[1] |= 0x40; /* Command Status */
465                 events[1] |= 0x80; /* Hardware Error */
466                 events[2] |= 0x04; /* Number of Completed Packets */
467                 events[3] |= 0x02; /* Data Buffer Overflow */
468                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
469         }
470
471         if (lmp_inq_rssi_capable(hdev))
472                 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474         if (lmp_sniffsubr_capable(hdev))
475                 events[5] |= 0x20; /* Sniff Subrating */
476
477         if (lmp_pause_enc_capable(hdev))
478                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480         if (lmp_ext_inq_capable(hdev))
481                 events[5] |= 0x40; /* Extended Inquiry Result */
482
483         if (lmp_no_flush_capable(hdev))
484                 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486         if (lmp_lsto_capable(hdev))
487                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489         if (lmp_ssp_capable(hdev)) {
490                 events[6] |= 0x01;      /* IO Capability Request */
491                 events[6] |= 0x02;      /* IO Capability Response */
492                 events[6] |= 0x04;      /* User Confirmation Request */
493                 events[6] |= 0x08;      /* User Passkey Request */
494                 events[6] |= 0x10;      /* Remote OOB Data Request */
495                 events[6] |= 0x20;      /* Simple Pairing Complete */
496                 events[7] |= 0x04;      /* User Passkey Notification */
497                 events[7] |= 0x08;      /* Keypress Notification */
498                 events[7] |= 0x10;      /* Remote Host Supported
499                                          * Features Notification
500                                          */
501         }
502
503         if (lmp_le_capable(hdev))
504                 events[7] |= 0x20;      /* LE Meta-Event */
505
506         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
507
508         if (lmp_le_capable(hdev)) {
509                 memset(events, 0, sizeof(events));
510                 events[0] = 0x1f;
511                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512                             sizeof(events), events);
513         }
514 }
515
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
517 {
518         struct hci_dev *hdev = req->hdev;
519
520         if (lmp_bredr_capable(hdev))
521                 bredr_setup(req);
522
523         if (lmp_le_capable(hdev))
524                 le_setup(req);
525
526         hci_setup_event_mask(req);
527
528         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529          * local supported commands HCI command.
530          */
531         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
532                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
533
534         if (lmp_ssp_capable(hdev)) {
535                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536                         u8 mode = 0x01;
537                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538                                     sizeof(mode), &mode);
539                 } else {
540                         struct hci_cp_write_eir cp;
541
542                         memset(hdev->eir, 0, sizeof(hdev->eir));
543                         memset(&cp, 0, sizeof(cp));
544
545                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
546                 }
547         }
548
549         if (lmp_inq_rssi_capable(hdev))
550                 hci_setup_inquiry_mode(req);
551
552         if (lmp_inq_tx_pwr_capable(hdev))
553                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
554
555         if (lmp_ext_feat_capable(hdev)) {
556                 struct hci_cp_read_local_ext_features cp;
557
558                 cp.page = 0x01;
559                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560                             sizeof(cp), &cp);
561         }
562
563         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564                 u8 enable = 1;
565                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566                             &enable);
567         }
568 }
569
570 static void hci_setup_link_policy(struct hci_request *req)
571 {
572         struct hci_dev *hdev = req->hdev;
573         struct hci_cp_write_def_link_policy cp;
574         u16 link_policy = 0;
575
576         if (lmp_rswitch_capable(hdev))
577                 link_policy |= HCI_LP_RSWITCH;
578         if (lmp_hold_capable(hdev))
579                 link_policy |= HCI_LP_HOLD;
580         if (lmp_sniff_capable(hdev))
581                 link_policy |= HCI_LP_SNIFF;
582         if (lmp_park_capable(hdev))
583                 link_policy |= HCI_LP_PARK;
584
585         cp.policy = cpu_to_le16(link_policy);
586         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
587 }
588
589 static void hci_set_le_support(struct hci_request *req)
590 {
591         struct hci_dev *hdev = req->hdev;
592         struct hci_cp_write_le_host_supported cp;
593
594         /* LE-only devices do not support explicit enablement */
595         if (!lmp_bredr_capable(hdev))
596                 return;
597
598         memset(&cp, 0, sizeof(cp));
599
600         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601                 cp.le = 0x01;
602                 cp.simul = lmp_le_br_capable(hdev);
603         }
604
605         if (cp.le != lmp_host_le_capable(hdev))
606                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607                             &cp);
608 }
609
610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
611 {
612         struct hci_dev *hdev = req->hdev;
613         u8 p;
614
615         /* Some Broadcom based Bluetooth controllers do not support the
616          * Delete Stored Link Key command. They are clearly indicating its
617          * absence in the bit mask of supported commands.
618          *
619          * Check the supported commands and only if the the command is marked
620          * as supported send it. If not supported assume that the controller
621          * does not have actual support for stored link keys which makes this
622          * command redundant anyway.
623          */
624         if (hdev->commands[6] & 0x80) {
625                 struct hci_cp_delete_stored_link_key cp;
626
627                 bacpy(&cp.bdaddr, BDADDR_ANY);
628                 cp.delete_all = 0x01;
629                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
630                             sizeof(cp), &cp);
631         }
632
633         if (hdev->commands[5] & 0x10)
634                 hci_setup_link_policy(req);
635
636         if (lmp_le_capable(hdev)) {
637                 hci_set_le_support(req);
638                 hci_update_ad(req);
639         }
640
641         /* Read features beyond page 1 if available */
642         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
643                 struct hci_cp_read_local_ext_features cp;
644
645                 cp.page = p;
646                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
647                             sizeof(cp), &cp);
648         }
649 }
650
651 static void hci_init4_req(struct hci_request *req, unsigned long opt)
652 {
653         struct hci_dev *hdev = req->hdev;
654
655         /* Check for Synchronization Train support */
656         if (hdev->features[2][0] & 0x04)
657                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
658 }
659
660 static int __hci_init(struct hci_dev *hdev)
661 {
662         int err;
663
664         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
665         if (err < 0)
666                 return err;
667
668         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
669          * BR/EDR/LE type controllers. AMP controllers only need the
670          * first stage init.
671          */
672         if (hdev->dev_type != HCI_BREDR)
673                 return 0;
674
675         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
676         if (err < 0)
677                 return err;
678
679         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
680         if (err < 0)
681                 return err;
682
683         return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
684 }
685
686 static void hci_scan_req(struct hci_request *req, unsigned long opt)
687 {
688         __u8 scan = opt;
689
690         BT_DBG("%s %x", req->hdev->name, scan);
691
692         /* Inquiry and Page scans */
693         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
694 }
695
696 static void hci_auth_req(struct hci_request *req, unsigned long opt)
697 {
698         __u8 auth = opt;
699
700         BT_DBG("%s %x", req->hdev->name, auth);
701
702         /* Authentication */
703         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
704 }
705
706 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
707 {
708         __u8 encrypt = opt;
709
710         BT_DBG("%s %x", req->hdev->name, encrypt);
711
712         /* Encryption */
713         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
714 }
715
716 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
717 {
718         __le16 policy = cpu_to_le16(opt);
719
720         BT_DBG("%s %x", req->hdev->name, policy);
721
722         /* Default link policy */
723         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
724 }
725
726 /* Get HCI device by index.
727  * Device is held on return. */
728 struct hci_dev *hci_dev_get(int index)
729 {
730         struct hci_dev *hdev = NULL, *d;
731
732         BT_DBG("%d", index);
733
734         if (index < 0)
735                 return NULL;
736
737         read_lock(&hci_dev_list_lock);
738         list_for_each_entry(d, &hci_dev_list, list) {
739                 if (d->id == index) {
740                         hdev = hci_dev_hold(d);
741                         break;
742                 }
743         }
744         read_unlock(&hci_dev_list_lock);
745         return hdev;
746 }
747
748 /* ---- Inquiry support ---- */
749
750 bool hci_discovery_active(struct hci_dev *hdev)
751 {
752         struct discovery_state *discov = &hdev->discovery;
753
754         switch (discov->state) {
755         case DISCOVERY_FINDING:
756         case DISCOVERY_RESOLVING:
757                 return true;
758
759         default:
760                 return false;
761         }
762 }
763
764 void hci_discovery_set_state(struct hci_dev *hdev, int state)
765 {
766         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
767
768         if (hdev->discovery.state == state)
769                 return;
770
771         switch (state) {
772         case DISCOVERY_STOPPED:
773                 if (hdev->discovery.state != DISCOVERY_STARTING)
774                         mgmt_discovering(hdev, 0);
775                 break;
776         case DISCOVERY_STARTING:
777                 break;
778         case DISCOVERY_FINDING:
779                 mgmt_discovering(hdev, 1);
780                 break;
781         case DISCOVERY_RESOLVING:
782                 break;
783         case DISCOVERY_STOPPING:
784                 break;
785         }
786
787         hdev->discovery.state = state;
788 }
789
790 void hci_inquiry_cache_flush(struct hci_dev *hdev)
791 {
792         struct discovery_state *cache = &hdev->discovery;
793         struct inquiry_entry *p, *n;
794
795         list_for_each_entry_safe(p, n, &cache->all, all) {
796                 list_del(&p->all);
797                 kfree(p);
798         }
799
800         INIT_LIST_HEAD(&cache->unknown);
801         INIT_LIST_HEAD(&cache->resolve);
802 }
803
804 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
805                                                bdaddr_t *bdaddr)
806 {
807         struct discovery_state *cache = &hdev->discovery;
808         struct inquiry_entry *e;
809
810         BT_DBG("cache %p, %pMR", cache, bdaddr);
811
812         list_for_each_entry(e, &cache->all, all) {
813                 if (!bacmp(&e->data.bdaddr, bdaddr))
814                         return e;
815         }
816
817         return NULL;
818 }
819
820 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
821                                                        bdaddr_t *bdaddr)
822 {
823         struct discovery_state *cache = &hdev->discovery;
824         struct inquiry_entry *e;
825
826         BT_DBG("cache %p, %pMR", cache, bdaddr);
827
828         list_for_each_entry(e, &cache->unknown, list) {
829                 if (!bacmp(&e->data.bdaddr, bdaddr))
830                         return e;
831         }
832
833         return NULL;
834 }
835
836 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
837                                                        bdaddr_t *bdaddr,
838                                                        int state)
839 {
840         struct discovery_state *cache = &hdev->discovery;
841         struct inquiry_entry *e;
842
843         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
844
845         list_for_each_entry(e, &cache->resolve, list) {
846                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
847                         return e;
848                 if (!bacmp(&e->data.bdaddr, bdaddr))
849                         return e;
850         }
851
852         return NULL;
853 }
854
855 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
856                                       struct inquiry_entry *ie)
857 {
858         struct discovery_state *cache = &hdev->discovery;
859         struct list_head *pos = &cache->resolve;
860         struct inquiry_entry *p;
861
862         list_del(&ie->list);
863
864         list_for_each_entry(p, &cache->resolve, list) {
865                 if (p->name_state != NAME_PENDING &&
866                     abs(p->data.rssi) >= abs(ie->data.rssi))
867                         break;
868                 pos = &p->list;
869         }
870
871         list_add(&ie->list, pos);
872 }
873
874 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
875                               bool name_known, bool *ssp)
876 {
877         struct discovery_state *cache = &hdev->discovery;
878         struct inquiry_entry *ie;
879
880         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
881
882         hci_remove_remote_oob_data(hdev, &data->bdaddr);
883
884         if (ssp)
885                 *ssp = data->ssp_mode;
886
887         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
888         if (ie) {
889                 if (ie->data.ssp_mode && ssp)
890                         *ssp = true;
891
892                 if (ie->name_state == NAME_NEEDED &&
893                     data->rssi != ie->data.rssi) {
894                         ie->data.rssi = data->rssi;
895                         hci_inquiry_cache_update_resolve(hdev, ie);
896                 }
897
898                 goto update;
899         }
900
901         /* Entry not in the cache. Add new one. */
902         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
903         if (!ie)
904                 return false;
905
906         list_add(&ie->all, &cache->all);
907
908         if (name_known) {
909                 ie->name_state = NAME_KNOWN;
910         } else {
911                 ie->name_state = NAME_NOT_KNOWN;
912                 list_add(&ie->list, &cache->unknown);
913         }
914
915 update:
916         if (name_known && ie->name_state != NAME_KNOWN &&
917             ie->name_state != NAME_PENDING) {
918                 ie->name_state = NAME_KNOWN;
919                 list_del(&ie->list);
920         }
921
922         memcpy(&ie->data, data, sizeof(*data));
923         ie->timestamp = jiffies;
924         cache->timestamp = jiffies;
925
926         if (ie->name_state == NAME_NOT_KNOWN)
927                 return false;
928
929         return true;
930 }
931
932 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
933 {
934         struct discovery_state *cache = &hdev->discovery;
935         struct inquiry_info *info = (struct inquiry_info *) buf;
936         struct inquiry_entry *e;
937         int copied = 0;
938
939         list_for_each_entry(e, &cache->all, all) {
940                 struct inquiry_data *data = &e->data;
941
942                 if (copied >= num)
943                         break;
944
945                 bacpy(&info->bdaddr, &data->bdaddr);
946                 info->pscan_rep_mode    = data->pscan_rep_mode;
947                 info->pscan_period_mode = data->pscan_period_mode;
948                 info->pscan_mode        = data->pscan_mode;
949                 memcpy(info->dev_class, data->dev_class, 3);
950                 info->clock_offset      = data->clock_offset;
951
952                 info++;
953                 copied++;
954         }
955
956         BT_DBG("cache %p, copied %d", cache, copied);
957         return copied;
958 }
959
960 static void hci_inq_req(struct hci_request *req, unsigned long opt)
961 {
962         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
963         struct hci_dev *hdev = req->hdev;
964         struct hci_cp_inquiry cp;
965
966         BT_DBG("%s", hdev->name);
967
968         if (test_bit(HCI_INQUIRY, &hdev->flags))
969                 return;
970
971         /* Start Inquiry */
972         memcpy(&cp.lap, &ir->lap, 3);
973         cp.length  = ir->length;
974         cp.num_rsp = ir->num_rsp;
975         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
976 }
977
978 static int wait_inquiry(void *word)
979 {
980         schedule();
981         return signal_pending(current);
982 }
983
984 int hci_inquiry(void __user *arg)
985 {
986         __u8 __user *ptr = arg;
987         struct hci_inquiry_req ir;
988         struct hci_dev *hdev;
989         int err = 0, do_inquiry = 0, max_rsp;
990         long timeo;
991         __u8 *buf;
992
993         if (copy_from_user(&ir, ptr, sizeof(ir)))
994                 return -EFAULT;
995
996         hdev = hci_dev_get(ir.dev_id);
997         if (!hdev)
998                 return -ENODEV;
999
1000         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1001                 err = -EBUSY;
1002                 goto done;
1003         }
1004
1005         hci_dev_lock(hdev);
1006         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1007             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1008                 hci_inquiry_cache_flush(hdev);
1009                 do_inquiry = 1;
1010         }
1011         hci_dev_unlock(hdev);
1012
1013         timeo = ir.length * msecs_to_jiffies(2000);
1014
1015         if (do_inquiry) {
1016                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1017                                    timeo);
1018                 if (err < 0)
1019                         goto done;
1020
1021                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1022                  * cleared). If it is interrupted by a signal, return -EINTR.
1023                  */
1024                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1025                                 TASK_INTERRUPTIBLE))
1026                         return -EINTR;
1027         }
1028
1029         /* for unlimited number of responses we will use buffer with
1030          * 255 entries
1031          */
1032         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1033
1034         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1035          * copy it to the user space.
1036          */
1037         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1038         if (!buf) {
1039                 err = -ENOMEM;
1040                 goto done;
1041         }
1042
1043         hci_dev_lock(hdev);
1044         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1045         hci_dev_unlock(hdev);
1046
1047         BT_DBG("num_rsp %d", ir.num_rsp);
1048
1049         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1050                 ptr += sizeof(ir);
1051                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1052                                  ir.num_rsp))
1053                         err = -EFAULT;
1054         } else
1055                 err = -EFAULT;
1056
1057         kfree(buf);
1058
1059 done:
1060         hci_dev_put(hdev);
1061         return err;
1062 }
1063
1064 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1065 {
1066         u8 ad_len = 0, flags = 0;
1067         size_t name_len;
1068
1069         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1070                 flags |= LE_AD_GENERAL;
1071
1072         if (!lmp_bredr_capable(hdev))
1073                 flags |= LE_AD_NO_BREDR;
1074
1075         if (lmp_le_br_capable(hdev))
1076                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1077
1078         if (lmp_host_le_br_capable(hdev))
1079                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1080
1081         if (flags) {
1082                 BT_DBG("adv flags 0x%02x", flags);
1083
1084                 ptr[0] = 2;
1085                 ptr[1] = EIR_FLAGS;
1086                 ptr[2] = flags;
1087
1088                 ad_len += 3;
1089                 ptr += 3;
1090         }
1091
1092         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1093                 ptr[0] = 2;
1094                 ptr[1] = EIR_TX_POWER;
1095                 ptr[2] = (u8) hdev->adv_tx_power;
1096
1097                 ad_len += 3;
1098                 ptr += 3;
1099         }
1100
1101         name_len = strlen(hdev->dev_name);
1102         if (name_len > 0) {
1103                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1104
1105                 if (name_len > max_len) {
1106                         name_len = max_len;
1107                         ptr[1] = EIR_NAME_SHORT;
1108                 } else
1109                         ptr[1] = EIR_NAME_COMPLETE;
1110
1111                 ptr[0] = name_len + 1;
1112
1113                 memcpy(ptr + 2, hdev->dev_name, name_len);
1114
1115                 ad_len += (name_len + 2);
1116                 ptr += (name_len + 2);
1117         }
1118
1119         return ad_len;
1120 }
1121
1122 void hci_update_ad(struct hci_request *req)
1123 {
1124         struct hci_dev *hdev = req->hdev;
1125         struct hci_cp_le_set_adv_data cp;
1126         u8 len;
1127
1128         if (!lmp_le_capable(hdev))
1129                 return;
1130
1131         memset(&cp, 0, sizeof(cp));
1132
1133         len = create_ad(hdev, cp.data);
1134
1135         if (hdev->adv_data_len == len &&
1136             memcmp(cp.data, hdev->adv_data, len) == 0)
1137                 return;
1138
1139         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1140         hdev->adv_data_len = len;
1141
1142         cp.length = len;
1143
1144         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1145 }
1146
1147 /* ---- HCI ioctl helpers ---- */
1148
1149 int hci_dev_open(__u16 dev)
1150 {
1151         struct hci_dev *hdev;
1152         int ret = 0;
1153
1154         hdev = hci_dev_get(dev);
1155         if (!hdev)
1156                 return -ENODEV;
1157
1158         BT_DBG("%s %p", hdev->name, hdev);
1159
1160         hci_req_lock(hdev);
1161
1162         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1163                 ret = -ENODEV;
1164                 goto done;
1165         }
1166
1167         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1168                 ret = -ERFKILL;
1169                 goto done;
1170         }
1171
1172         if (test_bit(HCI_UP, &hdev->flags)) {
1173                 ret = -EALREADY;
1174                 goto done;
1175         }
1176
1177         if (hdev->open(hdev)) {
1178                 ret = -EIO;
1179                 goto done;
1180         }
1181
1182         atomic_set(&hdev->cmd_cnt, 1);
1183         set_bit(HCI_INIT, &hdev->flags);
1184
1185         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1186                 ret = hdev->setup(hdev);
1187
1188         if (!ret) {
1189                 /* Treat all non BR/EDR controllers as raw devices if
1190                  * enable_hs is not set.
1191                  */
1192                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1193                         set_bit(HCI_RAW, &hdev->flags);
1194
1195                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1196                         set_bit(HCI_RAW, &hdev->flags);
1197
1198                 if (!test_bit(HCI_RAW, &hdev->flags) &&
1199                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1200                         ret = __hci_init(hdev);
1201         }
1202
1203         clear_bit(HCI_INIT, &hdev->flags);
1204
1205         if (!ret) {
1206                 hci_dev_hold(hdev);
1207                 set_bit(HCI_UP, &hdev->flags);
1208                 hci_notify(hdev, HCI_DEV_UP);
1209                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1210                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1211                     mgmt_valid_hdev(hdev)) {
1212                         hci_dev_lock(hdev);
1213                         mgmt_powered(hdev, 1);
1214                         hci_dev_unlock(hdev);
1215                 }
1216         } else {
1217                 /* Init failed, cleanup */
1218                 flush_work(&hdev->tx_work);
1219                 flush_work(&hdev->cmd_work);
1220                 flush_work(&hdev->rx_work);
1221
1222                 skb_queue_purge(&hdev->cmd_q);
1223                 skb_queue_purge(&hdev->rx_q);
1224
1225                 if (hdev->flush)
1226                         hdev->flush(hdev);
1227
1228                 if (hdev->sent_cmd) {
1229                         kfree_skb(hdev->sent_cmd);
1230                         hdev->sent_cmd = NULL;
1231                 }
1232
1233                 hdev->close(hdev);
1234                 hdev->flags = 0;
1235         }
1236
1237 done:
1238         hci_req_unlock(hdev);
1239         hci_dev_put(hdev);
1240         return ret;
1241 }
1242
1243 static int hci_dev_do_close(struct hci_dev *hdev)
1244 {
1245         BT_DBG("%s %p", hdev->name, hdev);
1246
1247         cancel_delayed_work(&hdev->power_off);
1248
1249         hci_req_cancel(hdev, ENODEV);
1250         hci_req_lock(hdev);
1251
1252         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1253                 del_timer_sync(&hdev->cmd_timer);
1254                 hci_req_unlock(hdev);
1255                 return 0;
1256         }
1257
1258         /* Flush RX and TX works */
1259         flush_work(&hdev->tx_work);
1260         flush_work(&hdev->rx_work);
1261
1262         if (hdev->discov_timeout > 0) {
1263                 cancel_delayed_work(&hdev->discov_off);
1264                 hdev->discov_timeout = 0;
1265                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1266         }
1267
1268         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1269                 cancel_delayed_work(&hdev->service_cache);
1270
1271         cancel_delayed_work_sync(&hdev->le_scan_disable);
1272
1273         hci_dev_lock(hdev);
1274         hci_inquiry_cache_flush(hdev);
1275         hci_conn_hash_flush(hdev);
1276         hci_dev_unlock(hdev);
1277
1278         hci_notify(hdev, HCI_DEV_DOWN);
1279
1280         if (hdev->flush)
1281                 hdev->flush(hdev);
1282
1283         /* Reset device */
1284         skb_queue_purge(&hdev->cmd_q);
1285         atomic_set(&hdev->cmd_cnt, 1);
1286         if (!test_bit(HCI_RAW, &hdev->flags) &&
1287             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1288                 set_bit(HCI_INIT, &hdev->flags);
1289                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1290                 clear_bit(HCI_INIT, &hdev->flags);
1291         }
1292
1293         /* flush cmd  work */
1294         flush_work(&hdev->cmd_work);
1295
1296         /* Drop queues */
1297         skb_queue_purge(&hdev->rx_q);
1298         skb_queue_purge(&hdev->cmd_q);
1299         skb_queue_purge(&hdev->raw_q);
1300
1301         /* Drop last sent command */
1302         if (hdev->sent_cmd) {
1303                 del_timer_sync(&hdev->cmd_timer);
1304                 kfree_skb(hdev->sent_cmd);
1305                 hdev->sent_cmd = NULL;
1306         }
1307
1308         kfree_skb(hdev->recv_evt);
1309         hdev->recv_evt = NULL;
1310
1311         /* After this point our queues are empty
1312          * and no tasks are scheduled. */
1313         hdev->close(hdev);
1314
1315         /* Clear flags */
1316         hdev->flags = 0;
1317         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1318
1319         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1320             mgmt_valid_hdev(hdev)) {
1321                 hci_dev_lock(hdev);
1322                 mgmt_powered(hdev, 0);
1323                 hci_dev_unlock(hdev);
1324         }
1325
1326         /* Controller radio is available but is currently powered down */
1327         hdev->amp_status = 0;
1328
1329         memset(hdev->eir, 0, sizeof(hdev->eir));
1330         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1331
1332         hci_req_unlock(hdev);
1333
1334         hci_dev_put(hdev);
1335         return 0;
1336 }
1337
1338 int hci_dev_close(__u16 dev)
1339 {
1340         struct hci_dev *hdev;
1341         int err;
1342
1343         hdev = hci_dev_get(dev);
1344         if (!hdev)
1345                 return -ENODEV;
1346
1347         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1348                 err = -EBUSY;
1349                 goto done;
1350         }
1351
1352         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1353                 cancel_delayed_work(&hdev->power_off);
1354
1355         err = hci_dev_do_close(hdev);
1356
1357 done:
1358         hci_dev_put(hdev);
1359         return err;
1360 }
1361
1362 int hci_dev_reset(__u16 dev)
1363 {
1364         struct hci_dev *hdev;
1365         int ret = 0;
1366
1367         hdev = hci_dev_get(dev);
1368         if (!hdev)
1369                 return -ENODEV;
1370
1371         hci_req_lock(hdev);
1372
1373         if (!test_bit(HCI_UP, &hdev->flags)) {
1374                 ret = -ENETDOWN;
1375                 goto done;
1376         }
1377
1378         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1379                 ret = -EBUSY;
1380                 goto done;
1381         }
1382
1383         /* Drop queues */
1384         skb_queue_purge(&hdev->rx_q);
1385         skb_queue_purge(&hdev->cmd_q);
1386
1387         hci_dev_lock(hdev);
1388         hci_inquiry_cache_flush(hdev);
1389         hci_conn_hash_flush(hdev);
1390         hci_dev_unlock(hdev);
1391
1392         if (hdev->flush)
1393                 hdev->flush(hdev);
1394
1395         atomic_set(&hdev->cmd_cnt, 1);
1396         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1397
1398         if (!test_bit(HCI_RAW, &hdev->flags))
1399                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1400
1401 done:
1402         hci_req_unlock(hdev);
1403         hci_dev_put(hdev);
1404         return ret;
1405 }
1406
1407 int hci_dev_reset_stat(__u16 dev)
1408 {
1409         struct hci_dev *hdev;
1410         int ret = 0;
1411
1412         hdev = hci_dev_get(dev);
1413         if (!hdev)
1414                 return -ENODEV;
1415
1416         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1417                 ret = -EBUSY;
1418                 goto done;
1419         }
1420
1421         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1422
1423 done:
1424         hci_dev_put(hdev);
1425         return ret;
1426 }
1427
1428 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1429 {
1430         struct hci_dev *hdev;
1431         struct hci_dev_req dr;
1432         int err = 0;
1433
1434         if (copy_from_user(&dr, arg, sizeof(dr)))
1435                 return -EFAULT;
1436
1437         hdev = hci_dev_get(dr.dev_id);
1438         if (!hdev)
1439                 return -ENODEV;
1440
1441         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1442                 err = -EBUSY;
1443                 goto done;
1444         }
1445
1446         switch (cmd) {
1447         case HCISETAUTH:
1448                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1449                                    HCI_INIT_TIMEOUT);
1450                 break;
1451
1452         case HCISETENCRYPT:
1453                 if (!lmp_encrypt_capable(hdev)) {
1454                         err = -EOPNOTSUPP;
1455                         break;
1456                 }
1457
1458                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1459                         /* Auth must be enabled first */
1460                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1461                                            HCI_INIT_TIMEOUT);
1462                         if (err)
1463                                 break;
1464                 }
1465
1466                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1467                                    HCI_INIT_TIMEOUT);
1468                 break;
1469
1470         case HCISETSCAN:
1471                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1472                                    HCI_INIT_TIMEOUT);
1473                 break;
1474
1475         case HCISETLINKPOL:
1476                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1477                                    HCI_INIT_TIMEOUT);
1478                 break;
1479
1480         case HCISETLINKMODE:
1481                 hdev->link_mode = ((__u16) dr.dev_opt) &
1482                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1483                 break;
1484
1485         case HCISETPTYPE:
1486                 hdev->pkt_type = (__u16) dr.dev_opt;
1487                 break;
1488
1489         case HCISETACLMTU:
1490                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1491                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1492                 break;
1493
1494         case HCISETSCOMTU:
1495                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1496                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1497                 break;
1498
1499         default:
1500                 err = -EINVAL;
1501                 break;
1502         }
1503
1504 done:
1505         hci_dev_put(hdev);
1506         return err;
1507 }
1508
1509 int hci_get_dev_list(void __user *arg)
1510 {
1511         struct hci_dev *hdev;
1512         struct hci_dev_list_req *dl;
1513         struct hci_dev_req *dr;
1514         int n = 0, size, err;
1515         __u16 dev_num;
1516
1517         if (get_user(dev_num, (__u16 __user *) arg))
1518                 return -EFAULT;
1519
1520         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1521                 return -EINVAL;
1522
1523         size = sizeof(*dl) + dev_num * sizeof(*dr);
1524
1525         dl = kzalloc(size, GFP_KERNEL);
1526         if (!dl)
1527                 return -ENOMEM;
1528
1529         dr = dl->dev_req;
1530
1531         read_lock(&hci_dev_list_lock);
1532         list_for_each_entry(hdev, &hci_dev_list, list) {
1533                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1534                         cancel_delayed_work(&hdev->power_off);
1535
1536                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1537                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1538
1539                 (dr + n)->dev_id  = hdev->id;
1540                 (dr + n)->dev_opt = hdev->flags;
1541
1542                 if (++n >= dev_num)
1543                         break;
1544         }
1545         read_unlock(&hci_dev_list_lock);
1546
1547         dl->dev_num = n;
1548         size = sizeof(*dl) + n * sizeof(*dr);
1549
1550         err = copy_to_user(arg, dl, size);
1551         kfree(dl);
1552
1553         return err ? -EFAULT : 0;
1554 }
1555
1556 int hci_get_dev_info(void __user *arg)
1557 {
1558         struct hci_dev *hdev;
1559         struct hci_dev_info di;
1560         int err = 0;
1561
1562         if (copy_from_user(&di, arg, sizeof(di)))
1563                 return -EFAULT;
1564
1565         hdev = hci_dev_get(di.dev_id);
1566         if (!hdev)
1567                 return -ENODEV;
1568
1569         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1570                 cancel_delayed_work_sync(&hdev->power_off);
1571
1572         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1573                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1574
1575         strcpy(di.name, hdev->name);
1576         di.bdaddr   = hdev->bdaddr;
1577         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1578         di.flags    = hdev->flags;
1579         di.pkt_type = hdev->pkt_type;
1580         if (lmp_bredr_capable(hdev)) {
1581                 di.acl_mtu  = hdev->acl_mtu;
1582                 di.acl_pkts = hdev->acl_pkts;
1583                 di.sco_mtu  = hdev->sco_mtu;
1584                 di.sco_pkts = hdev->sco_pkts;
1585         } else {
1586                 di.acl_mtu  = hdev->le_mtu;
1587                 di.acl_pkts = hdev->le_pkts;
1588                 di.sco_mtu  = 0;
1589                 di.sco_pkts = 0;
1590         }
1591         di.link_policy = hdev->link_policy;
1592         di.link_mode   = hdev->link_mode;
1593
1594         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1595         memcpy(&di.features, &hdev->features, sizeof(di.features));
1596
1597         if (copy_to_user(arg, &di, sizeof(di)))
1598                 err = -EFAULT;
1599
1600         hci_dev_put(hdev);
1601
1602         return err;
1603 }
1604
1605 /* ---- Interface to HCI drivers ---- */
1606
1607 static int hci_rfkill_set_block(void *data, bool blocked)
1608 {
1609         struct hci_dev *hdev = data;
1610
1611         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1612
1613         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1614                 return -EBUSY;
1615
1616         if (!blocked)
1617                 return 0;
1618
1619         hci_dev_do_close(hdev);
1620
1621         return 0;
1622 }
1623
1624 static const struct rfkill_ops hci_rfkill_ops = {
1625         .set_block = hci_rfkill_set_block,
1626 };
1627
1628 static void hci_power_on(struct work_struct *work)
1629 {
1630         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1631         int err;
1632
1633         BT_DBG("%s", hdev->name);
1634
1635         err = hci_dev_open(hdev->id);
1636         if (err < 0) {
1637                 mgmt_set_powered_failed(hdev, err);
1638                 return;
1639         }
1640
1641         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1642                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1643                                    HCI_AUTO_OFF_TIMEOUT);
1644
1645         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1646                 mgmt_index_added(hdev);
1647 }
1648
1649 static void hci_power_off(struct work_struct *work)
1650 {
1651         struct hci_dev *hdev = container_of(work, struct hci_dev,
1652                                             power_off.work);
1653
1654         BT_DBG("%s", hdev->name);
1655
1656         hci_dev_do_close(hdev);
1657 }
1658
1659 static void hci_discov_off(struct work_struct *work)
1660 {
1661         struct hci_dev *hdev;
1662         u8 scan = SCAN_PAGE;
1663
1664         hdev = container_of(work, struct hci_dev, discov_off.work);
1665
1666         BT_DBG("%s", hdev->name);
1667
1668         hci_dev_lock(hdev);
1669
1670         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1671
1672         hdev->discov_timeout = 0;
1673
1674         hci_dev_unlock(hdev);
1675 }
1676
1677 int hci_uuids_clear(struct hci_dev *hdev)
1678 {
1679         struct bt_uuid *uuid, *tmp;
1680
1681         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1682                 list_del(&uuid->list);
1683                 kfree(uuid);
1684         }
1685
1686         return 0;
1687 }
1688
1689 int hci_link_keys_clear(struct hci_dev *hdev)
1690 {
1691         struct list_head *p, *n;
1692
1693         list_for_each_safe(p, n, &hdev->link_keys) {
1694                 struct link_key *key;
1695
1696                 key = list_entry(p, struct link_key, list);
1697
1698                 list_del(p);
1699                 kfree(key);
1700         }
1701
1702         return 0;
1703 }
1704
1705 int hci_smp_ltks_clear(struct hci_dev *hdev)
1706 {
1707         struct smp_ltk *k, *tmp;
1708
1709         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1710                 list_del(&k->list);
1711                 kfree(k);
1712         }
1713
1714         return 0;
1715 }
1716
1717 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1718 {
1719         struct link_key *k;
1720
1721         list_for_each_entry(k, &hdev->link_keys, list)
1722                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1723                         return k;
1724
1725         return NULL;
1726 }
1727
1728 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1729                                u8 key_type, u8 old_key_type)
1730 {
1731         /* Legacy key */
1732         if (key_type < 0x03)
1733                 return true;
1734
1735         /* Debug keys are insecure so don't store them persistently */
1736         if (key_type == HCI_LK_DEBUG_COMBINATION)
1737                 return false;
1738
1739         /* Changed combination key and there's no previous one */
1740         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1741                 return false;
1742
1743         /* Security mode 3 case */
1744         if (!conn)
1745                 return true;
1746
1747         /* Neither local nor remote side had no-bonding as requirement */
1748         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1749                 return true;
1750
1751         /* Local side had dedicated bonding as requirement */
1752         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1753                 return true;
1754
1755         /* Remote side had dedicated bonding as requirement */
1756         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1757                 return true;
1758
1759         /* If none of the above criteria match, then don't store the key
1760          * persistently */
1761         return false;
1762 }
1763
1764 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1765 {
1766         struct smp_ltk *k;
1767
1768         list_for_each_entry(k, &hdev->long_term_keys, list) {
1769                 if (k->ediv != ediv ||
1770                     memcmp(rand, k->rand, sizeof(k->rand)))
1771                         continue;
1772
1773                 return k;
1774         }
1775
1776         return NULL;
1777 }
1778
1779 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1780                                      u8 addr_type)
1781 {
1782         struct smp_ltk *k;
1783
1784         list_for_each_entry(k, &hdev->long_term_keys, list)
1785                 if (addr_type == k->bdaddr_type &&
1786                     bacmp(bdaddr, &k->bdaddr) == 0)
1787                         return k;
1788
1789         return NULL;
1790 }
1791
1792 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1793                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1794 {
1795         struct link_key *key, *old_key;
1796         u8 old_key_type;
1797         bool persistent;
1798
1799         old_key = hci_find_link_key(hdev, bdaddr);
1800         if (old_key) {
1801                 old_key_type = old_key->type;
1802                 key = old_key;
1803         } else {
1804                 old_key_type = conn ? conn->key_type : 0xff;
1805                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1806                 if (!key)
1807                         return -ENOMEM;
1808                 list_add(&key->list, &hdev->link_keys);
1809         }
1810
1811         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1812
1813         /* Some buggy controller combinations generate a changed
1814          * combination key for legacy pairing even when there's no
1815          * previous key */
1816         if (type == HCI_LK_CHANGED_COMBINATION &&
1817             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1818                 type = HCI_LK_COMBINATION;
1819                 if (conn)
1820                         conn->key_type = type;
1821         }
1822
1823         bacpy(&key->bdaddr, bdaddr);
1824         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1825         key->pin_len = pin_len;
1826
1827         if (type == HCI_LK_CHANGED_COMBINATION)
1828                 key->type = old_key_type;
1829         else
1830                 key->type = type;
1831
1832         if (!new_key)
1833                 return 0;
1834
1835         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1836
1837         mgmt_new_link_key(hdev, key, persistent);
1838
1839         if (conn)
1840                 conn->flush_key = !persistent;
1841
1842         return 0;
1843 }
1844
1845 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1846                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1847                 ediv, u8 rand[8])
1848 {
1849         struct smp_ltk *key, *old_key;
1850
1851         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1852                 return 0;
1853
1854         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1855         if (old_key)
1856                 key = old_key;
1857         else {
1858                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1859                 if (!key)
1860                         return -ENOMEM;
1861                 list_add(&key->list, &hdev->long_term_keys);
1862         }
1863
1864         bacpy(&key->bdaddr, bdaddr);
1865         key->bdaddr_type = addr_type;
1866         memcpy(key->val, tk, sizeof(key->val));
1867         key->authenticated = authenticated;
1868         key->ediv = ediv;
1869         key->enc_size = enc_size;
1870         key->type = type;
1871         memcpy(key->rand, rand, sizeof(key->rand));
1872
1873         if (!new_key)
1874                 return 0;
1875
1876         if (type & HCI_SMP_LTK)
1877                 mgmt_new_ltk(hdev, key, 1);
1878
1879         return 0;
1880 }
1881
1882 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1883 {
1884         struct link_key *key;
1885
1886         key = hci_find_link_key(hdev, bdaddr);
1887         if (!key)
1888                 return -ENOENT;
1889
1890         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1891
1892         list_del(&key->list);
1893         kfree(key);
1894
1895         return 0;
1896 }
1897
1898 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1899 {
1900         struct smp_ltk *k, *tmp;
1901
1902         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1903                 if (bacmp(bdaddr, &k->bdaddr))
1904                         continue;
1905
1906                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1907
1908                 list_del(&k->list);
1909                 kfree(k);
1910         }
1911
1912         return 0;
1913 }
1914
1915 /* HCI command timer function */
1916 static void hci_cmd_timeout(unsigned long arg)
1917 {
1918         struct hci_dev *hdev = (void *) arg;
1919
1920         if (hdev->sent_cmd) {
1921                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1922                 u16 opcode = __le16_to_cpu(sent->opcode);
1923
1924                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1925         } else {
1926                 BT_ERR("%s command tx timeout", hdev->name);
1927         }
1928
1929         atomic_set(&hdev->cmd_cnt, 1);
1930         queue_work(hdev->workqueue, &hdev->cmd_work);
1931 }
1932
1933 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1934                                           bdaddr_t *bdaddr)
1935 {
1936         struct oob_data *data;
1937
1938         list_for_each_entry(data, &hdev->remote_oob_data, list)
1939                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1940                         return data;
1941
1942         return NULL;
1943 }
1944
1945 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1946 {
1947         struct oob_data *data;
1948
1949         data = hci_find_remote_oob_data(hdev, bdaddr);
1950         if (!data)
1951                 return -ENOENT;
1952
1953         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1954
1955         list_del(&data->list);
1956         kfree(data);
1957
1958         return 0;
1959 }
1960
1961 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1962 {
1963         struct oob_data *data, *n;
1964
1965         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1966                 list_del(&data->list);
1967                 kfree(data);
1968         }
1969
1970         return 0;
1971 }
1972
1973 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1974                             u8 *randomizer)
1975 {
1976         struct oob_data *data;
1977
1978         data = hci_find_remote_oob_data(hdev, bdaddr);
1979
1980         if (!data) {
1981                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1982                 if (!data)
1983                         return -ENOMEM;
1984
1985                 bacpy(&data->bdaddr, bdaddr);
1986                 list_add(&data->list, &hdev->remote_oob_data);
1987         }
1988
1989         memcpy(data->hash, hash, sizeof(data->hash));
1990         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1991
1992         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1993
1994         return 0;
1995 }
1996
1997 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1998 {
1999         struct bdaddr_list *b;
2000
2001         list_for_each_entry(b, &hdev->blacklist, list)
2002                 if (bacmp(bdaddr, &b->bdaddr) == 0)
2003                         return b;
2004
2005         return NULL;
2006 }
2007
2008 int hci_blacklist_clear(struct hci_dev *hdev)
2009 {
2010         struct list_head *p, *n;
2011
2012         list_for_each_safe(p, n, &hdev->blacklist) {
2013                 struct bdaddr_list *b;
2014
2015                 b = list_entry(p, struct bdaddr_list, list);
2016
2017                 list_del(p);
2018                 kfree(b);
2019         }
2020
2021         return 0;
2022 }
2023
2024 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2025 {
2026         struct bdaddr_list *entry;
2027
2028         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2029                 return -EBADF;
2030
2031         if (hci_blacklist_lookup(hdev, bdaddr))
2032                 return -EEXIST;
2033
2034         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2035         if (!entry)
2036                 return -ENOMEM;
2037
2038         bacpy(&entry->bdaddr, bdaddr);
2039
2040         list_add(&entry->list, &hdev->blacklist);
2041
2042         return mgmt_device_blocked(hdev, bdaddr, type);
2043 }
2044
2045 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2046 {
2047         struct bdaddr_list *entry;
2048
2049         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2050                 return hci_blacklist_clear(hdev);
2051
2052         entry = hci_blacklist_lookup(hdev, bdaddr);
2053         if (!entry)
2054                 return -ENOENT;
2055
2056         list_del(&entry->list);
2057         kfree(entry);
2058
2059         return mgmt_device_unblocked(hdev, bdaddr, type);
2060 }
2061
2062 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2063 {
2064         if (status) {
2065                 BT_ERR("Failed to start inquiry: status %d", status);
2066
2067                 hci_dev_lock(hdev);
2068                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2069                 hci_dev_unlock(hdev);
2070                 return;
2071         }
2072 }
2073
2074 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2075 {
2076         /* General inquiry access code (GIAC) */
2077         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2078         struct hci_request req;
2079         struct hci_cp_inquiry cp;
2080         int err;
2081
2082         if (status) {
2083                 BT_ERR("Failed to disable LE scanning: status %d", status);
2084                 return;
2085         }
2086
2087         switch (hdev->discovery.type) {
2088         case DISCOV_TYPE_LE:
2089                 hci_dev_lock(hdev);
2090                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2091                 hci_dev_unlock(hdev);
2092                 break;
2093
2094         case DISCOV_TYPE_INTERLEAVED:
2095                 hci_req_init(&req, hdev);
2096
2097                 memset(&cp, 0, sizeof(cp));
2098                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2099                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2100                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2101
2102                 hci_dev_lock(hdev);
2103
2104                 hci_inquiry_cache_flush(hdev);
2105
2106                 err = hci_req_run(&req, inquiry_complete);
2107                 if (err) {
2108                         BT_ERR("Inquiry request failed: err %d", err);
2109                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2110                 }
2111
2112                 hci_dev_unlock(hdev);
2113                 break;
2114         }
2115 }
2116
2117 static void le_scan_disable_work(struct work_struct *work)
2118 {
2119         struct hci_dev *hdev = container_of(work, struct hci_dev,
2120                                             le_scan_disable.work);
2121         struct hci_cp_le_set_scan_enable cp;
2122         struct hci_request req;
2123         int err;
2124
2125         BT_DBG("%s", hdev->name);
2126
2127         hci_req_init(&req, hdev);
2128
2129         memset(&cp, 0, sizeof(cp));
2130         cp.enable = LE_SCAN_DISABLE;
2131         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2132
2133         err = hci_req_run(&req, le_scan_disable_work_complete);
2134         if (err)
2135                 BT_ERR("Disable LE scanning request failed: err %d", err);
2136 }
2137
2138 /* Alloc HCI device */
2139 struct hci_dev *hci_alloc_dev(void)
2140 {
2141         struct hci_dev *hdev;
2142
2143         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2144         if (!hdev)
2145                 return NULL;
2146
2147         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2148         hdev->esco_type = (ESCO_HV1);
2149         hdev->link_mode = (HCI_LM_ACCEPT);
2150         hdev->io_capability = 0x03; /* No Input No Output */
2151         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2152         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2153
2154         hdev->sniff_max_interval = 800;
2155         hdev->sniff_min_interval = 80;
2156
2157         mutex_init(&hdev->lock);
2158         mutex_init(&hdev->req_lock);
2159
2160         INIT_LIST_HEAD(&hdev->mgmt_pending);
2161         INIT_LIST_HEAD(&hdev->blacklist);
2162         INIT_LIST_HEAD(&hdev->uuids);
2163         INIT_LIST_HEAD(&hdev->link_keys);
2164         INIT_LIST_HEAD(&hdev->long_term_keys);
2165         INIT_LIST_HEAD(&hdev->remote_oob_data);
2166         INIT_LIST_HEAD(&hdev->conn_hash.list);
2167
2168         INIT_WORK(&hdev->rx_work, hci_rx_work);
2169         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2170         INIT_WORK(&hdev->tx_work, hci_tx_work);
2171         INIT_WORK(&hdev->power_on, hci_power_on);
2172
2173         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2174         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2175         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2176
2177         skb_queue_head_init(&hdev->rx_q);
2178         skb_queue_head_init(&hdev->cmd_q);
2179         skb_queue_head_init(&hdev->raw_q);
2180
2181         init_waitqueue_head(&hdev->req_wait_q);
2182
2183         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2184
2185         hci_init_sysfs(hdev);
2186         discovery_init(hdev);
2187
2188         return hdev;
2189 }
2190 EXPORT_SYMBOL(hci_alloc_dev);
2191
2192 /* Free HCI device */
2193 void hci_free_dev(struct hci_dev *hdev)
2194 {
2195         /* will free via device release */
2196         put_device(&hdev->dev);
2197 }
2198 EXPORT_SYMBOL(hci_free_dev);
2199
2200 /* Register HCI device */
2201 int hci_register_dev(struct hci_dev *hdev)
2202 {
2203         int id, error;
2204
2205         if (!hdev->open || !hdev->close)
2206                 return -EINVAL;
2207
2208         /* Do not allow HCI_AMP devices to register at index 0,
2209          * so the index can be used as the AMP controller ID.
2210          */
2211         switch (hdev->dev_type) {
2212         case HCI_BREDR:
2213                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2214                 break;
2215         case HCI_AMP:
2216                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2217                 break;
2218         default:
2219                 return -EINVAL;
2220         }
2221
2222         if (id < 0)
2223                 return id;
2224
2225         sprintf(hdev->name, "hci%d", id);
2226         hdev->id = id;
2227
2228         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2229
2230         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2231                                           WQ_MEM_RECLAIM, 1, hdev->name);
2232         if (!hdev->workqueue) {
2233                 error = -ENOMEM;
2234                 goto err;
2235         }
2236
2237         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2238                                               WQ_MEM_RECLAIM, 1, hdev->name);
2239         if (!hdev->req_workqueue) {
2240                 destroy_workqueue(hdev->workqueue);
2241                 error = -ENOMEM;
2242                 goto err;
2243         }
2244
2245         error = hci_add_sysfs(hdev);
2246         if (error < 0)
2247                 goto err_wqueue;
2248
2249         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2250                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2251                                     hdev);
2252         if (hdev->rfkill) {
2253                 if (rfkill_register(hdev->rfkill) < 0) {
2254                         rfkill_destroy(hdev->rfkill);
2255                         hdev->rfkill = NULL;
2256                 }
2257         }
2258
2259         set_bit(HCI_SETUP, &hdev->dev_flags);
2260
2261         if (hdev->dev_type != HCI_AMP)
2262                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2263
2264         write_lock(&hci_dev_list_lock);
2265         list_add(&hdev->list, &hci_dev_list);
2266         write_unlock(&hci_dev_list_lock);
2267
2268         hci_notify(hdev, HCI_DEV_REG);
2269         hci_dev_hold(hdev);
2270
2271         queue_work(hdev->req_workqueue, &hdev->power_on);
2272
2273         return id;
2274
2275 err_wqueue:
2276         destroy_workqueue(hdev->workqueue);
2277         destroy_workqueue(hdev->req_workqueue);
2278 err:
2279         ida_simple_remove(&hci_index_ida, hdev->id);
2280
2281         return error;
2282 }
2283 EXPORT_SYMBOL(hci_register_dev);
2284
2285 /* Unregister HCI device */
2286 void hci_unregister_dev(struct hci_dev *hdev)
2287 {
2288         int i, id;
2289
2290         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2291
2292         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2293
2294         id = hdev->id;
2295
2296         write_lock(&hci_dev_list_lock);
2297         list_del(&hdev->list);
2298         write_unlock(&hci_dev_list_lock);
2299
2300         hci_dev_do_close(hdev);
2301
2302         for (i = 0; i < NUM_REASSEMBLY; i++)
2303                 kfree_skb(hdev->reassembly[i]);
2304
2305         cancel_work_sync(&hdev->power_on);
2306
2307         if (!test_bit(HCI_INIT, &hdev->flags) &&
2308             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2309                 hci_dev_lock(hdev);
2310                 mgmt_index_removed(hdev);
2311                 hci_dev_unlock(hdev);
2312         }
2313
2314         /* mgmt_index_removed should take care of emptying the
2315          * pending list */
2316         BUG_ON(!list_empty(&hdev->mgmt_pending));
2317
2318         hci_notify(hdev, HCI_DEV_UNREG);
2319
2320         if (hdev->rfkill) {
2321                 rfkill_unregister(hdev->rfkill);
2322                 rfkill_destroy(hdev->rfkill);
2323         }
2324
2325         hci_del_sysfs(hdev);
2326
2327         destroy_workqueue(hdev->workqueue);
2328         destroy_workqueue(hdev->req_workqueue);
2329
2330         hci_dev_lock(hdev);
2331         hci_blacklist_clear(hdev);
2332         hci_uuids_clear(hdev);
2333         hci_link_keys_clear(hdev);
2334         hci_smp_ltks_clear(hdev);
2335         hci_remote_oob_data_clear(hdev);
2336         hci_dev_unlock(hdev);
2337
2338         hci_dev_put(hdev);
2339
2340         ida_simple_remove(&hci_index_ida, id);
2341 }
2342 EXPORT_SYMBOL(hci_unregister_dev);
2343
2344 /* Suspend HCI device */
2345 int hci_suspend_dev(struct hci_dev *hdev)
2346 {
2347         hci_notify(hdev, HCI_DEV_SUSPEND);
2348         return 0;
2349 }
2350 EXPORT_SYMBOL(hci_suspend_dev);
2351
2352 /* Resume HCI device */
2353 int hci_resume_dev(struct hci_dev *hdev)
2354 {
2355         hci_notify(hdev, HCI_DEV_RESUME);
2356         return 0;
2357 }
2358 EXPORT_SYMBOL(hci_resume_dev);
2359
2360 /* Receive frame from HCI drivers */
2361 int hci_recv_frame(struct sk_buff *skb)
2362 {
2363         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2364         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2365                       && !test_bit(HCI_INIT, &hdev->flags))) {
2366                 kfree_skb(skb);
2367                 return -ENXIO;
2368         }
2369
2370         /* Incoming skb */
2371         bt_cb(skb)->incoming = 1;
2372
2373         /* Time stamp */
2374         __net_timestamp(skb);
2375
2376         skb_queue_tail(&hdev->rx_q, skb);
2377         queue_work(hdev->workqueue, &hdev->rx_work);
2378
2379         return 0;
2380 }
2381 EXPORT_SYMBOL(hci_recv_frame);
2382
2383 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2384                           int count, __u8 index)
2385 {
2386         int len = 0;
2387         int hlen = 0;
2388         int remain = count;
2389         struct sk_buff *skb;
2390         struct bt_skb_cb *scb;
2391
2392         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2393             index >= NUM_REASSEMBLY)
2394                 return -EILSEQ;
2395
2396         skb = hdev->reassembly[index];
2397
2398         if (!skb) {
2399                 switch (type) {
2400                 case HCI_ACLDATA_PKT:
2401                         len = HCI_MAX_FRAME_SIZE;
2402                         hlen = HCI_ACL_HDR_SIZE;
2403                         break;
2404                 case HCI_EVENT_PKT:
2405                         len = HCI_MAX_EVENT_SIZE;
2406                         hlen = HCI_EVENT_HDR_SIZE;
2407                         break;
2408                 case HCI_SCODATA_PKT:
2409                         len = HCI_MAX_SCO_SIZE;
2410                         hlen = HCI_SCO_HDR_SIZE;
2411                         break;
2412                 }
2413
2414                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2415                 if (!skb)
2416                         return -ENOMEM;
2417
2418                 scb = (void *) skb->cb;
2419                 scb->expect = hlen;
2420                 scb->pkt_type = type;
2421
2422                 skb->dev = (void *) hdev;
2423                 hdev->reassembly[index] = skb;
2424         }
2425
2426         while (count) {
2427                 scb = (void *) skb->cb;
2428                 len = min_t(uint, scb->expect, count);
2429
2430                 memcpy(skb_put(skb, len), data, len);
2431
2432                 count -= len;
2433                 data += len;
2434                 scb->expect -= len;
2435                 remain = count;
2436
2437                 switch (type) {
2438                 case HCI_EVENT_PKT:
2439                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2440                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2441                                 scb->expect = h->plen;
2442
2443                                 if (skb_tailroom(skb) < scb->expect) {
2444                                         kfree_skb(skb);
2445                                         hdev->reassembly[index] = NULL;
2446                                         return -ENOMEM;
2447                                 }
2448                         }
2449                         break;
2450
2451                 case HCI_ACLDATA_PKT:
2452                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2453                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2454                                 scb->expect = __le16_to_cpu(h->dlen);
2455
2456                                 if (skb_tailroom(skb) < scb->expect) {
2457                                         kfree_skb(skb);
2458                                         hdev->reassembly[index] = NULL;
2459                                         return -ENOMEM;
2460                                 }
2461                         }
2462                         break;
2463
2464                 case HCI_SCODATA_PKT:
2465                         if (skb->len == HCI_SCO_HDR_SIZE) {
2466                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2467                                 scb->expect = h->dlen;
2468
2469                                 if (skb_tailroom(skb) < scb->expect) {
2470                                         kfree_skb(skb);
2471                                         hdev->reassembly[index] = NULL;
2472                                         return -ENOMEM;
2473                                 }
2474                         }
2475                         break;
2476                 }
2477
2478                 if (scb->expect == 0) {
2479                         /* Complete frame */
2480
2481                         bt_cb(skb)->pkt_type = type;
2482                         hci_recv_frame(skb);
2483
2484                         hdev->reassembly[index] = NULL;
2485                         return remain;
2486                 }
2487         }
2488
2489         return remain;
2490 }
2491
2492 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2493 {
2494         int rem = 0;
2495
2496         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2497                 return -EILSEQ;
2498
2499         while (count) {
2500                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2501                 if (rem < 0)
2502                         return rem;
2503
2504                 data += (count - rem);
2505                 count = rem;
2506         }
2507
2508         return rem;
2509 }
2510 EXPORT_SYMBOL(hci_recv_fragment);
2511
2512 #define STREAM_REASSEMBLY 0
2513
2514 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2515 {
2516         int type;
2517         int rem = 0;
2518
2519         while (count) {
2520                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2521
2522                 if (!skb) {
2523                         struct { char type; } *pkt;
2524
2525                         /* Start of the frame */
2526                         pkt = data;
2527                         type = pkt->type;
2528
2529                         data++;
2530                         count--;
2531                 } else
2532                         type = bt_cb(skb)->pkt_type;
2533
2534                 rem = hci_reassembly(hdev, type, data, count,
2535                                      STREAM_REASSEMBLY);
2536                 if (rem < 0)
2537                         return rem;
2538
2539                 data += (count - rem);
2540                 count = rem;
2541         }
2542
2543         return rem;
2544 }
2545 EXPORT_SYMBOL(hci_recv_stream_fragment);
2546
2547 /* ---- Interface to upper protocols ---- */
2548
2549 int hci_register_cb(struct hci_cb *cb)
2550 {
2551         BT_DBG("%p name %s", cb, cb->name);
2552
2553         write_lock(&hci_cb_list_lock);
2554         list_add(&cb->list, &hci_cb_list);
2555         write_unlock(&hci_cb_list_lock);
2556
2557         return 0;
2558 }
2559 EXPORT_SYMBOL(hci_register_cb);
2560
2561 int hci_unregister_cb(struct hci_cb *cb)
2562 {
2563         BT_DBG("%p name %s", cb, cb->name);
2564
2565         write_lock(&hci_cb_list_lock);
2566         list_del(&cb->list);
2567         write_unlock(&hci_cb_list_lock);
2568
2569         return 0;
2570 }
2571 EXPORT_SYMBOL(hci_unregister_cb);
2572
2573 static int hci_send_frame(struct sk_buff *skb)
2574 {
2575         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2576
2577         if (!hdev) {
2578                 kfree_skb(skb);
2579                 return -ENODEV;
2580         }
2581
2582         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2583
2584         /* Time stamp */
2585         __net_timestamp(skb);
2586
2587         /* Send copy to monitor */
2588         hci_send_to_monitor(hdev, skb);
2589
2590         if (atomic_read(&hdev->promisc)) {
2591                 /* Send copy to the sockets */
2592                 hci_send_to_sock(hdev, skb);
2593         }
2594
2595         /* Get rid of skb owner, prior to sending to the driver. */
2596         skb_orphan(skb);
2597
2598         return hdev->send(skb);
2599 }
2600
2601 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2602 {
2603         skb_queue_head_init(&req->cmd_q);
2604         req->hdev = hdev;
2605         req->err = 0;
2606 }
2607
2608 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2609 {
2610         struct hci_dev *hdev = req->hdev;
2611         struct sk_buff *skb;
2612         unsigned long flags;
2613
2614         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2615
2616         /* If an error occured during request building, remove all HCI
2617          * commands queued on the HCI request queue.
2618          */
2619         if (req->err) {
2620                 skb_queue_purge(&req->cmd_q);
2621                 return req->err;
2622         }
2623
2624         /* Do not allow empty requests */
2625         if (skb_queue_empty(&req->cmd_q))
2626                 return -ENODATA;
2627
2628         skb = skb_peek_tail(&req->cmd_q);
2629         bt_cb(skb)->req.complete = complete;
2630
2631         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2632         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2633         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2634
2635         queue_work(hdev->workqueue, &hdev->cmd_work);
2636
2637         return 0;
2638 }
2639
2640 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2641                                        u32 plen, const void *param)
2642 {
2643         int len = HCI_COMMAND_HDR_SIZE + plen;
2644         struct hci_command_hdr *hdr;
2645         struct sk_buff *skb;
2646
2647         skb = bt_skb_alloc(len, GFP_ATOMIC);
2648         if (!skb)
2649                 return NULL;
2650
2651         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2652         hdr->opcode = cpu_to_le16(opcode);
2653         hdr->plen   = plen;
2654
2655         if (plen)
2656                 memcpy(skb_put(skb, plen), param, plen);
2657
2658         BT_DBG("skb len %d", skb->len);
2659
2660         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2661         skb->dev = (void *) hdev;
2662
2663         return skb;
2664 }
2665
2666 /* Send HCI command */
2667 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2668                  const void *param)
2669 {
2670         struct sk_buff *skb;
2671
2672         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2673
2674         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2675         if (!skb) {
2676                 BT_ERR("%s no memory for command", hdev->name);
2677                 return -ENOMEM;
2678         }
2679
2680         /* Stand-alone HCI commands must be flaged as
2681          * single-command requests.
2682          */
2683         bt_cb(skb)->req.start = true;
2684
2685         skb_queue_tail(&hdev->cmd_q, skb);
2686         queue_work(hdev->workqueue, &hdev->cmd_work);
2687
2688         return 0;
2689 }
2690
2691 /* Queue a command to an asynchronous HCI request */
2692 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2693                     const void *param, u8 event)
2694 {
2695         struct hci_dev *hdev = req->hdev;
2696         struct sk_buff *skb;
2697
2698         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2699
2700         /* If an error occured during request building, there is no point in
2701          * queueing the HCI command. We can simply return.
2702          */
2703         if (req->err)
2704                 return;
2705
2706         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2707         if (!skb) {
2708                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2709                        hdev->name, opcode);
2710                 req->err = -ENOMEM;
2711                 return;
2712         }
2713
2714         if (skb_queue_empty(&req->cmd_q))
2715                 bt_cb(skb)->req.start = true;
2716
2717         bt_cb(skb)->req.event = event;
2718
2719         skb_queue_tail(&req->cmd_q, skb);
2720 }
2721
2722 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2723                  const void *param)
2724 {
2725         hci_req_add_ev(req, opcode, plen, param, 0);
2726 }
2727
2728 /* Get data from the previously sent command */
2729 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2730 {
2731         struct hci_command_hdr *hdr;
2732
2733         if (!hdev->sent_cmd)
2734                 return NULL;
2735
2736         hdr = (void *) hdev->sent_cmd->data;
2737
2738         if (hdr->opcode != cpu_to_le16(opcode))
2739                 return NULL;
2740
2741         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2742
2743         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2744 }
2745
2746 /* Send ACL data */
2747 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2748 {
2749         struct hci_acl_hdr *hdr;
2750         int len = skb->len;
2751
2752         skb_push(skb, HCI_ACL_HDR_SIZE);
2753         skb_reset_transport_header(skb);
2754         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2755         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2756         hdr->dlen   = cpu_to_le16(len);
2757 }
2758
2759 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2760                           struct sk_buff *skb, __u16 flags)
2761 {
2762         struct hci_conn *conn = chan->conn;
2763         struct hci_dev *hdev = conn->hdev;
2764         struct sk_buff *list;
2765
2766         skb->len = skb_headlen(skb);
2767         skb->data_len = 0;
2768
2769         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2770
2771         switch (hdev->dev_type) {
2772         case HCI_BREDR:
2773                 hci_add_acl_hdr(skb, conn->handle, flags);
2774                 break;
2775         case HCI_AMP:
2776                 hci_add_acl_hdr(skb, chan->handle, flags);
2777                 break;
2778         default:
2779                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2780                 return;
2781         }
2782
2783         list = skb_shinfo(skb)->frag_list;
2784         if (!list) {
2785                 /* Non fragmented */
2786                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2787
2788                 skb_queue_tail(queue, skb);
2789         } else {
2790                 /* Fragmented */
2791                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2792
2793                 skb_shinfo(skb)->frag_list = NULL;
2794
2795                 /* Queue all fragments atomically */
2796                 spin_lock(&queue->lock);
2797
2798                 __skb_queue_tail(queue, skb);
2799
2800                 flags &= ~ACL_START;
2801                 flags |= ACL_CONT;
2802                 do {
2803                         skb = list; list = list->next;
2804
2805                         skb->dev = (void *) hdev;
2806                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2807                         hci_add_acl_hdr(skb, conn->handle, flags);
2808
2809                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2810
2811                         __skb_queue_tail(queue, skb);
2812                 } while (list);
2813
2814                 spin_unlock(&queue->lock);
2815         }
2816 }
2817
2818 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2819 {
2820         struct hci_dev *hdev = chan->conn->hdev;
2821
2822         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2823
2824         skb->dev = (void *) hdev;
2825
2826         hci_queue_acl(chan, &chan->data_q, skb, flags);
2827
2828         queue_work(hdev->workqueue, &hdev->tx_work);
2829 }
2830
2831 /* Send SCO data */
2832 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2833 {
2834         struct hci_dev *hdev = conn->hdev;
2835         struct hci_sco_hdr hdr;
2836
2837         BT_DBG("%s len %d", hdev->name, skb->len);
2838
2839         hdr.handle = cpu_to_le16(conn->handle);
2840         hdr.dlen   = skb->len;
2841
2842         skb_push(skb, HCI_SCO_HDR_SIZE);
2843         skb_reset_transport_header(skb);
2844         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2845
2846         skb->dev = (void *) hdev;
2847         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2848
2849         skb_queue_tail(&conn->data_q, skb);
2850         queue_work(hdev->workqueue, &hdev->tx_work);
2851 }
2852
2853 /* ---- HCI TX task (outgoing data) ---- */
2854
2855 /* HCI Connection scheduler */
2856 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2857                                      int *quote)
2858 {
2859         struct hci_conn_hash *h = &hdev->conn_hash;
2860         struct hci_conn *conn = NULL, *c;
2861         unsigned int num = 0, min = ~0;
2862
2863         /* We don't have to lock device here. Connections are always
2864          * added and removed with TX task disabled. */
2865
2866         rcu_read_lock();
2867
2868         list_for_each_entry_rcu(c, &h->list, list) {
2869                 if (c->type != type || skb_queue_empty(&c->data_q))
2870                         continue;
2871
2872                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2873                         continue;
2874
2875                 num++;
2876
2877                 if (c->sent < min) {
2878                         min  = c->sent;
2879                         conn = c;
2880                 }
2881
2882                 if (hci_conn_num(hdev, type) == num)
2883                         break;
2884         }
2885
2886         rcu_read_unlock();
2887
2888         if (conn) {
2889                 int cnt, q;
2890
2891                 switch (conn->type) {
2892                 case ACL_LINK:
2893                         cnt = hdev->acl_cnt;
2894                         break;
2895                 case SCO_LINK:
2896                 case ESCO_LINK:
2897                         cnt = hdev->sco_cnt;
2898                         break;
2899                 case LE_LINK:
2900                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2901                         break;
2902                 default:
2903                         cnt = 0;
2904                         BT_ERR("Unknown link type");
2905                 }
2906
2907                 q = cnt / num;
2908                 *quote = q ? q : 1;
2909         } else
2910                 *quote = 0;
2911
2912         BT_DBG("conn %p quote %d", conn, *quote);
2913         return conn;
2914 }
2915
2916 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2917 {
2918         struct hci_conn_hash *h = &hdev->conn_hash;
2919         struct hci_conn *c;
2920
2921         BT_ERR("%s link tx timeout", hdev->name);
2922
2923         rcu_read_lock();
2924
2925         /* Kill stalled connections */
2926         list_for_each_entry_rcu(c, &h->list, list) {
2927                 if (c->type == type && c->sent) {
2928                         BT_ERR("%s killing stalled connection %pMR",
2929                                hdev->name, &c->dst);
2930                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2931                 }
2932         }
2933
2934         rcu_read_unlock();
2935 }
2936
2937 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2938                                       int *quote)
2939 {
2940         struct hci_conn_hash *h = &hdev->conn_hash;
2941         struct hci_chan *chan = NULL;
2942         unsigned int num = 0, min = ~0, cur_prio = 0;
2943         struct hci_conn *conn;
2944         int cnt, q, conn_num = 0;
2945
2946         BT_DBG("%s", hdev->name);
2947
2948         rcu_read_lock();
2949
2950         list_for_each_entry_rcu(conn, &h->list, list) {
2951                 struct hci_chan *tmp;
2952
2953                 if (conn->type != type)
2954                         continue;
2955
2956                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2957                         continue;
2958
2959                 conn_num++;
2960
2961                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2962                         struct sk_buff *skb;
2963
2964                         if (skb_queue_empty(&tmp->data_q))
2965                                 continue;
2966
2967                         skb = skb_peek(&tmp->data_q);
2968                         if (skb->priority < cur_prio)
2969                                 continue;
2970
2971                         if (skb->priority > cur_prio) {
2972                                 num = 0;
2973                                 min = ~0;
2974                                 cur_prio = skb->priority;
2975                         }
2976
2977                         num++;
2978
2979                         if (conn->sent < min) {
2980                                 min  = conn->sent;
2981                                 chan = tmp;
2982                         }
2983                 }
2984
2985                 if (hci_conn_num(hdev, type) == conn_num)
2986                         break;
2987         }
2988
2989         rcu_read_unlock();
2990
2991         if (!chan)
2992                 return NULL;
2993
2994         switch (chan->conn->type) {
2995         case ACL_LINK:
2996                 cnt = hdev->acl_cnt;
2997                 break;
2998         case AMP_LINK:
2999                 cnt = hdev->block_cnt;
3000                 break;
3001         case SCO_LINK:
3002         case ESCO_LINK:
3003                 cnt = hdev->sco_cnt;
3004                 break;
3005         case LE_LINK:
3006                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3007                 break;
3008         default:
3009                 cnt = 0;
3010                 BT_ERR("Unknown link type");
3011         }
3012
3013         q = cnt / num;
3014         *quote = q ? q : 1;
3015         BT_DBG("chan %p quote %d", chan, *quote);
3016         return chan;
3017 }
3018
3019 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3020 {
3021         struct hci_conn_hash *h = &hdev->conn_hash;
3022         struct hci_conn *conn;
3023         int num = 0;
3024
3025         BT_DBG("%s", hdev->name);
3026
3027         rcu_read_lock();
3028
3029         list_for_each_entry_rcu(conn, &h->list, list) {
3030                 struct hci_chan *chan;
3031
3032                 if (conn->type != type)
3033                         continue;
3034
3035                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3036                         continue;
3037
3038                 num++;
3039
3040                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3041                         struct sk_buff *skb;
3042
3043                         if (chan->sent) {
3044                                 chan->sent = 0;
3045                                 continue;
3046                         }
3047
3048                         if (skb_queue_empty(&chan->data_q))
3049                                 continue;
3050
3051                         skb = skb_peek(&chan->data_q);
3052                         if (skb->priority >= HCI_PRIO_MAX - 1)
3053                                 continue;
3054
3055                         skb->priority = HCI_PRIO_MAX - 1;
3056
3057                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3058                                skb->priority);
3059                 }
3060
3061                 if (hci_conn_num(hdev, type) == num)
3062                         break;
3063         }
3064
3065         rcu_read_unlock();
3066
3067 }
3068
3069 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3070 {
3071         /* Calculate count of blocks used by this packet */
3072         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3073 }
3074
3075 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3076 {
3077         if (!test_bit(HCI_RAW, &hdev->flags)) {
3078                 /* ACL tx timeout must be longer than maximum
3079                  * link supervision timeout (40.9 seconds) */
3080                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3081                                        HCI_ACL_TX_TIMEOUT))
3082                         hci_link_tx_to(hdev, ACL_LINK);
3083         }
3084 }
3085
3086 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3087 {
3088         unsigned int cnt = hdev->acl_cnt;
3089         struct hci_chan *chan;
3090         struct sk_buff *skb;
3091         int quote;
3092
3093         __check_timeout(hdev, cnt);
3094
3095         while (hdev->acl_cnt &&
3096                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3097                 u32 priority = (skb_peek(&chan->data_q))->priority;
3098                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3099                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3100                                skb->len, skb->priority);
3101
3102                         /* Stop if priority has changed */
3103                         if (skb->priority < priority)
3104                                 break;
3105
3106                         skb = skb_dequeue(&chan->data_q);
3107
3108                         hci_conn_enter_active_mode(chan->conn,
3109                                                    bt_cb(skb)->force_active);
3110
3111                         hci_send_frame(skb);
3112                         hdev->acl_last_tx = jiffies;
3113
3114                         hdev->acl_cnt--;
3115                         chan->sent++;
3116                         chan->conn->sent++;
3117                 }
3118         }
3119
3120         if (cnt != hdev->acl_cnt)
3121                 hci_prio_recalculate(hdev, ACL_LINK);
3122 }
3123
3124 static void hci_sched_acl_blk(struct hci_dev *hdev)
3125 {
3126         unsigned int cnt = hdev->block_cnt;
3127         struct hci_chan *chan;
3128         struct sk_buff *skb;
3129         int quote;
3130         u8 type;
3131
3132         __check_timeout(hdev, cnt);
3133
3134         BT_DBG("%s", hdev->name);
3135
3136         if (hdev->dev_type == HCI_AMP)
3137                 type = AMP_LINK;
3138         else
3139                 type = ACL_LINK;
3140
3141         while (hdev->block_cnt > 0 &&
3142                (chan = hci_chan_sent(hdev, type, &quote))) {
3143                 u32 priority = (skb_peek(&chan->data_q))->priority;
3144                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3145                         int blocks;
3146
3147                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3148                                skb->len, skb->priority);
3149
3150                         /* Stop if priority has changed */
3151                         if (skb->priority < priority)
3152                                 break;
3153
3154                         skb = skb_dequeue(&chan->data_q);
3155
3156                         blocks = __get_blocks(hdev, skb);
3157                         if (blocks > hdev->block_cnt)
3158                                 return;
3159
3160                         hci_conn_enter_active_mode(chan->conn,
3161                                                    bt_cb(skb)->force_active);
3162
3163                         hci_send_frame(skb);
3164                         hdev->acl_last_tx = jiffies;
3165
3166                         hdev->block_cnt -= blocks;
3167                         quote -= blocks;
3168
3169                         chan->sent += blocks;
3170                         chan->conn->sent += blocks;
3171                 }
3172         }
3173
3174         if (cnt != hdev->block_cnt)
3175                 hci_prio_recalculate(hdev, type);
3176 }
3177
3178 static void hci_sched_acl(struct hci_dev *hdev)
3179 {
3180         BT_DBG("%s", hdev->name);
3181
3182         /* No ACL link over BR/EDR controller */
3183         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3184                 return;
3185
3186         /* No AMP link over AMP controller */
3187         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3188                 return;
3189
3190         switch (hdev->flow_ctl_mode) {
3191         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3192                 hci_sched_acl_pkt(hdev);
3193                 break;
3194
3195         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3196                 hci_sched_acl_blk(hdev);
3197                 break;
3198         }
3199 }
3200
3201 /* Schedule SCO */
3202 static void hci_sched_sco(struct hci_dev *hdev)
3203 {
3204         struct hci_conn *conn;
3205         struct sk_buff *skb;
3206         int quote;
3207
3208         BT_DBG("%s", hdev->name);
3209
3210         if (!hci_conn_num(hdev, SCO_LINK))
3211                 return;
3212
3213         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3214                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3215                         BT_DBG("skb %p len %d", skb, skb->len);
3216                         hci_send_frame(skb);
3217
3218                         conn->sent++;
3219                         if (conn->sent == ~0)
3220                                 conn->sent = 0;
3221                 }
3222         }
3223 }
3224
3225 static void hci_sched_esco(struct hci_dev *hdev)
3226 {
3227         struct hci_conn *conn;
3228         struct sk_buff *skb;
3229         int quote;
3230
3231         BT_DBG("%s", hdev->name);
3232
3233         if (!hci_conn_num(hdev, ESCO_LINK))
3234                 return;
3235
3236         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3237                                                      &quote))) {
3238                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3239                         BT_DBG("skb %p len %d", skb, skb->len);
3240                         hci_send_frame(skb);
3241
3242                         conn->sent++;
3243                         if (conn->sent == ~0)
3244                                 conn->sent = 0;
3245                 }
3246         }
3247 }
3248
3249 static void hci_sched_le(struct hci_dev *hdev)
3250 {
3251         struct hci_chan *chan;
3252         struct sk_buff *skb;
3253         int quote, cnt, tmp;
3254
3255         BT_DBG("%s", hdev->name);
3256
3257         if (!hci_conn_num(hdev, LE_LINK))
3258                 return;
3259
3260         if (!test_bit(HCI_RAW, &hdev->flags)) {
3261                 /* LE tx timeout must be longer than maximum
3262                  * link supervision timeout (40.9 seconds) */
3263                 if (!hdev->le_cnt && hdev->le_pkts &&
3264                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3265                         hci_link_tx_to(hdev, LE_LINK);
3266         }
3267
3268         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3269         tmp = cnt;
3270         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3271                 u32 priority = (skb_peek(&chan->data_q))->priority;
3272                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3273                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3274                                skb->len, skb->priority);
3275
3276                         /* Stop if priority has changed */
3277                         if (skb->priority < priority)
3278                                 break;
3279
3280                         skb = skb_dequeue(&chan->data_q);
3281
3282                         hci_send_frame(skb);
3283                         hdev->le_last_tx = jiffies;
3284
3285                         cnt--;
3286                         chan->sent++;
3287                         chan->conn->sent++;
3288                 }
3289         }
3290
3291         if (hdev->le_pkts)
3292                 hdev->le_cnt = cnt;
3293         else
3294                 hdev->acl_cnt = cnt;
3295
3296         if (cnt != tmp)
3297                 hci_prio_recalculate(hdev, LE_LINK);
3298 }
3299
3300 static void hci_tx_work(struct work_struct *work)
3301 {
3302         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3303         struct sk_buff *skb;
3304
3305         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3306                hdev->sco_cnt, hdev->le_cnt);
3307
3308         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3309                 /* Schedule queues and send stuff to HCI driver */
3310                 hci_sched_acl(hdev);
3311                 hci_sched_sco(hdev);
3312                 hci_sched_esco(hdev);
3313                 hci_sched_le(hdev);
3314         }
3315
3316         /* Send next queued raw (unknown type) packet */
3317         while ((skb = skb_dequeue(&hdev->raw_q)))
3318                 hci_send_frame(skb);
3319 }
3320
3321 /* ----- HCI RX task (incoming data processing) ----- */
3322
3323 /* ACL data packet */
3324 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3325 {
3326         struct hci_acl_hdr *hdr = (void *) skb->data;
3327         struct hci_conn *conn;
3328         __u16 handle, flags;
3329
3330         skb_pull(skb, HCI_ACL_HDR_SIZE);
3331
3332         handle = __le16_to_cpu(hdr->handle);
3333         flags  = hci_flags(handle);
3334         handle = hci_handle(handle);
3335
3336         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3337                handle, flags);
3338
3339         hdev->stat.acl_rx++;
3340
3341         hci_dev_lock(hdev);
3342         conn = hci_conn_hash_lookup_handle(hdev, handle);
3343         hci_dev_unlock(hdev);
3344
3345         if (conn) {
3346                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3347
3348                 /* Send to upper protocol */
3349                 l2cap_recv_acldata(conn, skb, flags);
3350                 return;
3351         } else {
3352                 BT_ERR("%s ACL packet for unknown connection handle %d",
3353                        hdev->name, handle);
3354         }
3355
3356         kfree_skb(skb);
3357 }
3358
3359 /* SCO data packet */
3360 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3361 {
3362         struct hci_sco_hdr *hdr = (void *) skb->data;
3363         struct hci_conn *conn;
3364         __u16 handle;
3365
3366         skb_pull(skb, HCI_SCO_HDR_SIZE);
3367
3368         handle = __le16_to_cpu(hdr->handle);
3369
3370         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3371
3372         hdev->stat.sco_rx++;
3373
3374         hci_dev_lock(hdev);
3375         conn = hci_conn_hash_lookup_handle(hdev, handle);
3376         hci_dev_unlock(hdev);
3377
3378         if (conn) {
3379                 /* Send to upper protocol */
3380                 sco_recv_scodata(conn, skb);
3381                 return;
3382         } else {
3383                 BT_ERR("%s SCO packet for unknown connection handle %d",
3384                        hdev->name, handle);
3385         }
3386
3387         kfree_skb(skb);
3388 }
3389
3390 static bool hci_req_is_complete(struct hci_dev *hdev)
3391 {
3392         struct sk_buff *skb;
3393
3394         skb = skb_peek(&hdev->cmd_q);
3395         if (!skb)
3396                 return true;
3397
3398         return bt_cb(skb)->req.start;
3399 }
3400
3401 static void hci_resend_last(struct hci_dev *hdev)
3402 {
3403         struct hci_command_hdr *sent;
3404         struct sk_buff *skb;
3405         u16 opcode;
3406
3407         if (!hdev->sent_cmd)
3408                 return;
3409
3410         sent = (void *) hdev->sent_cmd->data;
3411         opcode = __le16_to_cpu(sent->opcode);
3412         if (opcode == HCI_OP_RESET)
3413                 return;
3414
3415         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3416         if (!skb)
3417                 return;
3418
3419         skb_queue_head(&hdev->cmd_q, skb);
3420         queue_work(hdev->workqueue, &hdev->cmd_work);
3421 }
3422
3423 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3424 {
3425         hci_req_complete_t req_complete = NULL;
3426         struct sk_buff *skb;
3427         unsigned long flags;
3428
3429         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3430
3431         /* If the completed command doesn't match the last one that was
3432          * sent we need to do special handling of it.
3433          */
3434         if (!hci_sent_cmd_data(hdev, opcode)) {
3435                 /* Some CSR based controllers generate a spontaneous
3436                  * reset complete event during init and any pending
3437                  * command will never be completed. In such a case we
3438                  * need to resend whatever was the last sent
3439                  * command.
3440                  */
3441                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3442                         hci_resend_last(hdev);
3443
3444                 return;
3445         }
3446
3447         /* If the command succeeded and there's still more commands in
3448          * this request the request is not yet complete.
3449          */
3450         if (!status && !hci_req_is_complete(hdev))
3451                 return;
3452
3453         /* If this was the last command in a request the complete
3454          * callback would be found in hdev->sent_cmd instead of the
3455          * command queue (hdev->cmd_q).
3456          */
3457         if (hdev->sent_cmd) {
3458                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3459
3460                 if (req_complete) {
3461                         /* We must set the complete callback to NULL to
3462                          * avoid calling the callback more than once if
3463                          * this function gets called again.
3464                          */
3465                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
3466
3467                         goto call_complete;
3468                 }
3469         }
3470
3471         /* Remove all pending commands belonging to this request */
3472         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3473         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3474                 if (bt_cb(skb)->req.start) {
3475                         __skb_queue_head(&hdev->cmd_q, skb);
3476                         break;
3477                 }
3478
3479                 req_complete = bt_cb(skb)->req.complete;
3480                 kfree_skb(skb);
3481         }
3482         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3483
3484 call_complete:
3485         if (req_complete)
3486                 req_complete(hdev, status);
3487 }
3488
3489 static void hci_rx_work(struct work_struct *work)
3490 {
3491         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3492         struct sk_buff *skb;
3493
3494         BT_DBG("%s", hdev->name);
3495
3496         while ((skb = skb_dequeue(&hdev->rx_q))) {
3497                 /* Send copy to monitor */
3498                 hci_send_to_monitor(hdev, skb);
3499
3500                 if (atomic_read(&hdev->promisc)) {
3501                         /* Send copy to the sockets */
3502                         hci_send_to_sock(hdev, skb);
3503                 }
3504
3505                 if (test_bit(HCI_RAW, &hdev->flags) ||
3506                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3507                         kfree_skb(skb);
3508                         continue;
3509                 }
3510
3511                 if (test_bit(HCI_INIT, &hdev->flags)) {
3512                         /* Don't process data packets in this states. */
3513                         switch (bt_cb(skb)->pkt_type) {
3514                         case HCI_ACLDATA_PKT:
3515                         case HCI_SCODATA_PKT:
3516                                 kfree_skb(skb);
3517                                 continue;
3518                         }
3519                 }
3520
3521                 /* Process frame */
3522                 switch (bt_cb(skb)->pkt_type) {
3523                 case HCI_EVENT_PKT:
3524                         BT_DBG("%s Event packet", hdev->name);
3525                         hci_event_packet(hdev, skb);
3526                         break;
3527
3528                 case HCI_ACLDATA_PKT:
3529                         BT_DBG("%s ACL data packet", hdev->name);
3530                         hci_acldata_packet(hdev, skb);
3531                         break;
3532
3533                 case HCI_SCODATA_PKT:
3534                         BT_DBG("%s SCO data packet", hdev->name);
3535                         hci_scodata_packet(hdev, skb);
3536                         break;
3537
3538                 default:
3539                         kfree_skb(skb);
3540                         break;
3541                 }
3542         }
3543 }
3544
3545 static void hci_cmd_work(struct work_struct *work)
3546 {
3547         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3548         struct sk_buff *skb;
3549
3550         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3551                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3552
3553         /* Send queued commands */
3554         if (atomic_read(&hdev->cmd_cnt)) {
3555                 skb = skb_dequeue(&hdev->cmd_q);
3556                 if (!skb)
3557                         return;
3558
3559                 kfree_skb(hdev->sent_cmd);
3560
3561                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3562                 if (hdev->sent_cmd) {
3563                         atomic_dec(&hdev->cmd_cnt);
3564                         hci_send_frame(skb);
3565                         if (test_bit(HCI_RESET, &hdev->flags))
3566                                 del_timer(&hdev->cmd_timer);
3567                         else
3568                                 mod_timer(&hdev->cmd_timer,
3569                                           jiffies + HCI_CMD_TIMEOUT);
3570                 } else {
3571                         skb_queue_head(&hdev->cmd_q, skb);
3572                         queue_work(hdev->workqueue, &hdev->cmd_work);
3573                 }
3574         }
3575 }
3576
3577 u8 bdaddr_to_le(u8 bdaddr_type)
3578 {
3579         switch (bdaddr_type) {
3580         case BDADDR_LE_PUBLIC:
3581                 return ADDR_LE_DEV_PUBLIC;
3582
3583         default:
3584                 /* Fallback to LE Random address type */
3585                 return ADDR_LE_DEV_RANDOM;
3586         }
3587 }