Bluetooth: Add MGMT interface for setting IRK
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 #ifdef CONFIG_SLEEP_MONITOR
45 #include <linux/power/sleep_monitor.h>
46 #endif
47
48 static void hci_rx_work(struct work_struct *work);
49 static void hci_cmd_work(struct work_struct *work);
50 static void hci_tx_work(struct work_struct *work);
51
52 /* HCI device list */
53 LIST_HEAD(hci_dev_list);
54 DEFINE_RWLOCK(hci_dev_list_lock);
55
56 /* HCI callback list */
57 LIST_HEAD(hci_cb_list);
58 DEFINE_RWLOCK(hci_cb_list_lock);
59
60 /* HCI ID Numbering */
61 static DEFINE_IDA(hci_index_ida);
62
63
64 /* ---- HCI notifications ---- */
65
66 #ifdef CONFIG_TIZEN_WIP
67 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
68
69 int hci_register_notifier(struct notifier_block *nb)
70 {
71         return atomic_notifier_chain_register(&hci_notifier, nb);
72 }
73
74 int hci_unregister_notifier(struct notifier_block *nb)
75 {
76         return atomic_notifier_chain_unregister(&hci_notifier, nb);
77 }
78 #endif
79
80 /* ----- HCI requests ----- */
81
82 #define HCI_REQ_DONE      0
83 #define HCI_REQ_PEND      1
84 #define HCI_REQ_CANCELED  2
85
86 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
87 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
88
89 /* ---- HCI notifications ---- */
90
91 static void hci_notify(struct hci_dev *hdev, int event)
92 {
93         hci_sock_dev_event(hdev, event);
94 #ifdef CONFIG_TIZEN_WIP
95         if (event == HCI_DEV_REG || event == HCI_DEV_UNREG
96                         || event == HCI_DEV_WRITE)
97                 atomic_notifier_call_chain(&hci_notifier, event, hdev);
98 #endif
99 }
100
101 /* ---- HCI debugfs entries ---- */
102
103 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
104                              size_t count, loff_t *ppos)
105 {
106         struct hci_dev *hdev = file->private_data;
107         char buf[3];
108
109         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
110         buf[1] = '\n';
111         buf[2] = '\0';
112         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
113 }
114
115 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
116                               size_t count, loff_t *ppos)
117 {
118         struct hci_dev *hdev = file->private_data;
119         struct sk_buff *skb;
120         char buf[32];
121         size_t buf_size = min(count, (sizeof(buf)-1));
122         bool enable;
123         int err;
124
125         if (!test_bit(HCI_UP, &hdev->flags))
126                 return -ENETDOWN;
127
128         if (copy_from_user(buf, user_buf, buf_size))
129                 return -EFAULT;
130
131         buf[buf_size] = '\0';
132         if (strtobool(buf, &enable))
133                 return -EINVAL;
134
135         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
136                 return -EALREADY;
137
138         hci_req_lock(hdev);
139         if (enable)
140                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
141                                      HCI_CMD_TIMEOUT);
142         else
143                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
144                                      HCI_CMD_TIMEOUT);
145         hci_req_unlock(hdev);
146
147         if (IS_ERR(skb))
148                 return PTR_ERR(skb);
149
150         err = -bt_to_errno(skb->data[0]);
151         kfree_skb(skb);
152
153         if (err < 0)
154                 return err;
155
156         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
157
158         return count;
159 }
160
161 static const struct file_operations dut_mode_fops = {
162         .open           = simple_open,
163         .read           = dut_mode_read,
164         .write          = dut_mode_write,
165         .llseek         = default_llseek,
166 };
167
168 /* ---- HCI requests ---- */
169
170 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
171 {
172         BT_DBG("%s result 0x%2.2x", hdev->name, result);
173
174         if (hdev->req_status == HCI_REQ_PEND) {
175                 hdev->req_result = result;
176                 hdev->req_status = HCI_REQ_DONE;
177                 wake_up_interruptible(&hdev->req_wait_q);
178         }
179 }
180
181 static void hci_req_cancel(struct hci_dev *hdev, int err)
182 {
183         BT_DBG("%s err 0x%2.2x", hdev->name, err);
184
185         if (hdev->req_status == HCI_REQ_PEND) {
186                 hdev->req_result = err;
187                 hdev->req_status = HCI_REQ_CANCELED;
188                 wake_up_interruptible(&hdev->req_wait_q);
189         }
190 }
191
192 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
193                                             u8 event)
194 {
195         struct hci_ev_cmd_complete *ev;
196         struct hci_event_hdr *hdr;
197         struct sk_buff *skb;
198
199         hci_dev_lock(hdev);
200
201         skb = hdev->recv_evt;
202         hdev->recv_evt = NULL;
203
204         hci_dev_unlock(hdev);
205
206         if (!skb)
207                 return ERR_PTR(-ENODATA);
208
209         if (skb->len < sizeof(*hdr)) {
210                 BT_ERR("Too short HCI event");
211                 goto failed;
212         }
213
214         hdr = (void *) skb->data;
215         skb_pull(skb, HCI_EVENT_HDR_SIZE);
216
217         if (event) {
218                 if (hdr->evt != event)
219                         goto failed;
220                 return skb;
221         }
222
223         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
224                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
225                 goto failed;
226         }
227
228         if (skb->len < sizeof(*ev)) {
229                 BT_ERR("Too short cmd_complete event");
230                 goto failed;
231         }
232
233         ev = (void *) skb->data;
234         skb_pull(skb, sizeof(*ev));
235
236         if (opcode == __le16_to_cpu(ev->opcode))
237                 return skb;
238
239         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
240                __le16_to_cpu(ev->opcode));
241
242 failed:
243         kfree_skb(skb);
244         return ERR_PTR(-ENODATA);
245 }
246
247 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
248                                   const void *param, u8 event, u32 timeout)
249 {
250         DECLARE_WAITQUEUE(wait, current);
251         struct hci_request req;
252         int err = 0;
253
254         BT_DBG("%s", hdev->name);
255
256         hci_req_init(&req, hdev);
257
258         hci_req_add_ev(&req, opcode, plen, param, event);
259
260         hdev->req_status = HCI_REQ_PEND;
261
262         add_wait_queue(&hdev->req_wait_q, &wait);
263         set_current_state(TASK_INTERRUPTIBLE);
264
265         err = hci_req_run(&req, hci_req_sync_complete);
266         if (err < 0) {
267                 remove_wait_queue(&hdev->req_wait_q, &wait);
268                 set_current_state(TASK_RUNNING);
269                 return ERR_PTR(err);
270         }
271
272         schedule_timeout(timeout);
273
274         remove_wait_queue(&hdev->req_wait_q, &wait);
275
276         if (signal_pending(current))
277                 return ERR_PTR(-EINTR);
278
279         switch (hdev->req_status) {
280         case HCI_REQ_DONE:
281                 err = -bt_to_errno(hdev->req_result);
282                 break;
283
284         case HCI_REQ_CANCELED:
285                 err = -hdev->req_result;
286                 break;
287
288         default:
289                 err = -ETIMEDOUT;
290                 break;
291         }
292
293         hdev->req_status = hdev->req_result = 0;
294
295         BT_DBG("%s end: err %d", hdev->name, err);
296
297         if (err < 0)
298                 return ERR_PTR(err);
299
300         return hci_get_cmd_complete(hdev, opcode, event);
301 }
302 EXPORT_SYMBOL(__hci_cmd_sync_ev);
303
304 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
305                                const void *param, u32 timeout)
306 {
307         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
308 }
309 EXPORT_SYMBOL(__hci_cmd_sync);
310
311 /* Execute request and wait for completion. */
312 static int __hci_req_sync(struct hci_dev *hdev,
313                           void (*func)(struct hci_request *req,
314                                       unsigned long opt),
315                           unsigned long opt, __u32 timeout)
316 {
317         struct hci_request req;
318         DECLARE_WAITQUEUE(wait, current);
319         int err = 0;
320
321         BT_DBG("%s start", hdev->name);
322
323         hci_req_init(&req, hdev);
324
325         hdev->req_status = HCI_REQ_PEND;
326
327         func(&req, opt);
328
329         add_wait_queue(&hdev->req_wait_q, &wait);
330         set_current_state(TASK_INTERRUPTIBLE);
331
332         err = hci_req_run(&req, hci_req_sync_complete);
333         if (err < 0) {
334                 hdev->req_status = 0;
335
336                 remove_wait_queue(&hdev->req_wait_q, &wait);
337                 set_current_state(TASK_RUNNING);
338
339                 /* ENODATA means the HCI request command queue is empty.
340                  * This can happen when a request with conditionals doesn't
341                  * trigger any commands to be sent. This is normal behavior
342                  * and should not trigger an error return.
343                  */
344                 if (err == -ENODATA)
345                         return 0;
346
347                 return err;
348         }
349
350         schedule_timeout(timeout);
351
352         remove_wait_queue(&hdev->req_wait_q, &wait);
353
354         if (signal_pending(current))
355                 return -EINTR;
356
357         switch (hdev->req_status) {
358         case HCI_REQ_DONE:
359                 err = -bt_to_errno(hdev->req_result);
360                 break;
361
362         case HCI_REQ_CANCELED:
363                 err = -hdev->req_result;
364                 break;
365
366         default:
367                 err = -ETIMEDOUT;
368                 break;
369         }
370
371         hdev->req_status = hdev->req_result = 0;
372
373         BT_DBG("%s end: err %d", hdev->name, err);
374
375         return err;
376 }
377
378 static int hci_req_sync(struct hci_dev *hdev,
379                         void (*req)(struct hci_request *req,
380                                     unsigned long opt),
381                         unsigned long opt, __u32 timeout)
382 {
383         int ret;
384
385         if (!test_bit(HCI_UP, &hdev->flags))
386                 return -ENETDOWN;
387
388         /* Serialize all requests */
389         hci_req_lock(hdev);
390         ret = __hci_req_sync(hdev, req, opt, timeout);
391         hci_req_unlock(hdev);
392
393         return ret;
394 }
395
396 static void hci_reset_req(struct hci_request *req, unsigned long opt)
397 {
398         BT_DBG("%s %ld", req->hdev->name, opt);
399
400         /* Reset device */
401         set_bit(HCI_RESET, &req->hdev->flags);
402         hci_req_add(req, HCI_OP_RESET, 0, NULL);
403 }
404
405 static void bredr_init(struct hci_request *req)
406 {
407         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
408
409         /* Read Local Supported Features */
410         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
411
412         /* Read Local Version */
413         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
414
415         /* Read BD Address */
416         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
417 }
418
419 static void amp_init(struct hci_request *req)
420 {
421         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
422
423         /* Read Local Version */
424         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
425
426         /* Read Local Supported Commands */
427         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
428
429         /* Read Local Supported Features */
430         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
431
432         /* Read Local AMP Info */
433         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
434
435         /* Read Data Blk size */
436         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
437
438         /* Read Flow Control Mode */
439         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
440
441         /* Read Location Data */
442         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
443 }
444
445 static void hci_init1_req(struct hci_request *req, unsigned long opt)
446 {
447         struct hci_dev *hdev = req->hdev;
448
449         BT_DBG("%s %ld", hdev->name, opt);
450
451         /* Reset */
452         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
453                 hci_reset_req(req, 0);
454
455         switch (hdev->dev_type) {
456         case HCI_BREDR:
457                 bredr_init(req);
458                 break;
459
460         case HCI_AMP:
461                 amp_init(req);
462                 break;
463
464         default:
465                 BT_ERR("Unknown device type %d", hdev->dev_type);
466                 break;
467         }
468 }
469
470 static void bredr_setup(struct hci_request *req)
471 {
472         __le16 param;
473         __u8 flt_type;
474
475         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
476         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
477
478         /* Read Class of Device */
479         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
480
481         /* Read Local Name */
482         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
483
484         /* Read Voice Setting */
485         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
486
487         /* Read Number of Supported IAC */
488         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
489
490         /* Read Current IAC LAP */
491         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
492
493         /* Clear Event Filters */
494         flt_type = HCI_FLT_CLEAR_ALL;
495         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
496
497         /* Connection accept timeout ~20 secs */
498         param = cpu_to_le16(0x7d00);
499         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
500 }
501
502 static void le_setup(struct hci_request *req)
503 {
504         struct hci_dev *hdev = req->hdev;
505
506         /* Read LE Buffer Size */
507         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
508
509         /* Read LE Local Supported Features */
510         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
511
512         /* Read LE Supported States */
513         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
514
515         /* Read LE White List Size */
516         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
517
518         /* Clear LE White List */
519         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
520
521         /* LE-only controllers have LE implicitly enabled */
522         if (!lmp_bredr_capable(hdev))
523                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
524 }
525
526 static void hci_setup_event_mask(struct hci_request *req)
527 {
528         struct hci_dev *hdev = req->hdev;
529
530         /* The second byte is 0xff instead of 0x9f (two reserved bits
531          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
532          * command otherwise.
533          */
534         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
535
536         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
537          * any event mask for pre 1.2 devices.
538          */
539         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
540                 return;
541
542         if (lmp_bredr_capable(hdev)) {
543                 events[4] |= 0x01; /* Flow Specification Complete */
544                 events[4] |= 0x02; /* Inquiry Result with RSSI */
545                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
546                 events[5] |= 0x08; /* Synchronous Connection Complete */
547                 events[5] |= 0x10; /* Synchronous Connection Changed */
548         } else {
549                 /* Use a different default for LE-only devices */
550                 memset(events, 0, sizeof(events));
551                 events[0] |= 0x10; /* Disconnection Complete */
552                 events[1] |= 0x08; /* Read Remote Version Information Complete */
553                 events[1] |= 0x20; /* Command Complete */
554                 events[1] |= 0x40; /* Command Status */
555                 events[1] |= 0x80; /* Hardware Error */
556                 events[2] |= 0x04; /* Number of Completed Packets */
557                 events[3] |= 0x02; /* Data Buffer Overflow */
558
559                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
560                         events[0] |= 0x80; /* Encryption Change */
561                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
562                 }
563         }
564
565         if (lmp_inq_rssi_capable(hdev))
566                 events[4] |= 0x02; /* Inquiry Result with RSSI */
567
568         if (lmp_sniffsubr_capable(hdev))
569                 events[5] |= 0x20; /* Sniff Subrating */
570
571         if (lmp_pause_enc_capable(hdev))
572                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
573
574         if (lmp_ext_inq_capable(hdev))
575                 events[5] |= 0x40; /* Extended Inquiry Result */
576
577         if (lmp_no_flush_capable(hdev))
578                 events[7] |= 0x01; /* Enhanced Flush Complete */
579
580         if (lmp_lsto_capable(hdev))
581                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
582
583         if (lmp_ssp_capable(hdev)) {
584                 events[6] |= 0x01;      /* IO Capability Request */
585                 events[6] |= 0x02;      /* IO Capability Response */
586                 events[6] |= 0x04;      /* User Confirmation Request */
587                 events[6] |= 0x08;      /* User Passkey Request */
588                 events[6] |= 0x10;      /* Remote OOB Data Request */
589                 events[6] |= 0x20;      /* Simple Pairing Complete */
590                 events[7] |= 0x04;      /* User Passkey Notification */
591                 events[7] |= 0x08;      /* Keypress Notification */
592                 events[7] |= 0x10;      /* Remote Host Supported
593                                          * Features Notification
594                                          */
595         }
596
597         if (lmp_le_capable(hdev))
598                 events[7] |= 0x20;      /* LE Meta-Event */
599
600         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
601 }
602
603 static void hci_init2_req(struct hci_request *req, unsigned long opt)
604 {
605         struct hci_dev *hdev = req->hdev;
606
607         if (lmp_bredr_capable(hdev))
608                 bredr_setup(req);
609         else
610                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
611
612         if (lmp_le_capable(hdev))
613                 le_setup(req);
614
615         /* All Bluetooth 1.2 and later controllers should support the
616          * HCI command for reading the local supported commands.
617          *
618          * Unfortunately some controllers indicate Bluetooth 1.2 support,
619          * but do not have support for this command. If that is the case,
620          * the driver can quirk the behavior and skip reading the local
621          * supported commands.
622          */
623         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
624             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
625                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
626
627         if (lmp_ssp_capable(hdev)) {
628                 /* When SSP is available, then the host features page
629                  * should also be available as well. However some
630                  * controllers list the max_page as 0 as long as SSP
631                  * has not been enabled. To achieve proper debugging
632                  * output, force the minimum max_page to 1 at least.
633                  */
634                 hdev->max_page = 0x01;
635
636                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
637                         u8 mode = 0x01;
638
639                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
640                                     sizeof(mode), &mode);
641                 } else {
642                         struct hci_cp_write_eir cp;
643
644                         memset(hdev->eir, 0, sizeof(hdev->eir));
645                         memset(&cp, 0, sizeof(cp));
646
647                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
648                 }
649         }
650
651         if (lmp_inq_rssi_capable(hdev) ||
652             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
653                 u8 mode;
654
655                 /* If Extended Inquiry Result events are supported, then
656                  * they are clearly preferred over Inquiry Result with RSSI
657                  * events.
658                  */
659                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
660
661                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
662         }
663
664         if (lmp_inq_tx_pwr_capable(hdev))
665                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
666
667         if (lmp_ext_feat_capable(hdev)) {
668                 struct hci_cp_read_local_ext_features cp;
669
670                 cp.page = 0x01;
671                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
672                             sizeof(cp), &cp);
673         }
674
675         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
676                 u8 enable = 1;
677                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
678                             &enable);
679         }
680 }
681
682 static void hci_setup_link_policy(struct hci_request *req)
683 {
684         struct hci_dev *hdev = req->hdev;
685         struct hci_cp_write_def_link_policy cp;
686         u16 link_policy = 0;
687
688         if (lmp_rswitch_capable(hdev))
689                 link_policy |= HCI_LP_RSWITCH;
690         if (lmp_hold_capable(hdev))
691                 link_policy |= HCI_LP_HOLD;
692         if (lmp_sniff_capable(hdev))
693                 link_policy |= HCI_LP_SNIFF;
694         if (lmp_park_capable(hdev))
695                 link_policy |= HCI_LP_PARK;
696
697         cp.policy = cpu_to_le16(link_policy);
698         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
699 }
700
701 static void hci_set_le_support(struct hci_request *req)
702 {
703         struct hci_dev *hdev = req->hdev;
704         struct hci_cp_write_le_host_supported cp;
705
706         /* LE-only devices do not support explicit enablement */
707         if (!lmp_bredr_capable(hdev))
708                 return;
709
710         memset(&cp, 0, sizeof(cp));
711
712         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
713                 cp.le = 0x01;
714                 cp.simul = 0x00;
715         }
716
717         if (cp.le != lmp_host_le_capable(hdev))
718                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
719                             &cp);
720 }
721
722 static void hci_set_event_mask_page_2(struct hci_request *req)
723 {
724         struct hci_dev *hdev = req->hdev;
725         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
726
727         /* If Connectionless Slave Broadcast master role is supported
728          * enable all necessary events for it.
729          */
730         if (lmp_csb_master_capable(hdev)) {
731                 events[1] |= 0x40;      /* Triggered Clock Capture */
732                 events[1] |= 0x80;      /* Synchronization Train Complete */
733                 events[2] |= 0x10;      /* Slave Page Response Timeout */
734                 events[2] |= 0x20;      /* CSB Channel Map Change */
735         }
736
737         /* If Connectionless Slave Broadcast slave role is supported
738          * enable all necessary events for it.
739          */
740         if (lmp_csb_slave_capable(hdev)) {
741                 events[2] |= 0x01;      /* Synchronization Train Received */
742                 events[2] |= 0x02;      /* CSB Receive */
743                 events[2] |= 0x04;      /* CSB Timeout */
744                 events[2] |= 0x08;      /* Truncated Page Complete */
745         }
746
747         /* Enable Authenticated Payload Timeout Expired event if supported */
748         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
749                 events[2] |= 0x80;
750
751         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
752 }
753
754 static void hci_init3_req(struct hci_request *req, unsigned long opt)
755 {
756         struct hci_dev *hdev = req->hdev;
757         u8 p;
758
759         hci_setup_event_mask(req);
760
761         if (hdev->commands[6] & 0x20) {
762                 struct hci_cp_read_stored_link_key cp;
763
764                 bacpy(&cp.bdaddr, BDADDR_ANY);
765                 cp.read_all = 0x01;
766                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
767         }
768
769         if (hdev->commands[5] & 0x10)
770                 hci_setup_link_policy(req);
771
772         if (hdev->commands[8] & 0x01)
773                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
774
775         /* Some older Broadcom based Bluetooth 1.2 controllers do not
776          * support the Read Page Scan Type command. Check support for
777          * this command in the bit mask of supported commands.
778          */
779         if (hdev->commands[13] & 0x01)
780                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
781
782         if (lmp_le_capable(hdev)) {
783                 u8 events[8];
784
785                 memset(events, 0, sizeof(events));
786                 events[0] = 0x0f;
787
788                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
789                         events[0] |= 0x10;      /* LE Long Term Key Request */
790
791                 /* If controller supports the Connection Parameters Request
792                  * Link Layer Procedure, enable the corresponding event.
793                  */
794                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
795                         events[0] |= 0x20;      /* LE Remote Connection
796                                                  * Parameter Request
797                                                  */
798
799                 /* If the controller supports the Data Length Extension
800                  * feature, enable the corresponding event.
801                  */
802                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
803                         events[0] |= 0x40;      /* LE Data Length Change */
804
805                 /* If the controller supports Extended Scanner Filter
806                  * Policies, enable the correspondig event.
807                  */
808                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
809                         events[1] |= 0x04;      /* LE Direct Advertising
810                                                  * Report
811                                                  */
812
813                 /* If the controller supports the LE Read Local P-256
814                  * Public Key command, enable the corresponding event.
815                  */
816                 if (hdev->commands[34] & 0x02)
817                         events[0] |= 0x80;      /* LE Read Local P-256
818                                                  * Public Key Complete
819                                                  */
820
821                 /* If the controller supports the LE Generate DHKey
822                  * command, enable the corresponding event.
823                  */
824                 if (hdev->commands[34] & 0x04)
825                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
826
827                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
828                             events);
829
830                 if (hdev->commands[25] & 0x40) {
831                         /* Read LE Advertising Channel TX Power */
832                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
833                 }
834
835                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
836                         /* Read LE Maximum Data Length */
837                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
838
839                         /* Read LE Suggested Default Data Length */
840                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
841                 }
842
843                 hci_set_le_support(req);
844         }
845
846         /* Read features beyond page 1 if available */
847         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
848                 struct hci_cp_read_local_ext_features cp;
849
850                 cp.page = p;
851                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
852                             sizeof(cp), &cp);
853         }
854 }
855
856 static void hci_init4_req(struct hci_request *req, unsigned long opt)
857 {
858         struct hci_dev *hdev = req->hdev;
859
860         /* Some Broadcom based Bluetooth controllers do not support the
861          * Delete Stored Link Key command. They are clearly indicating its
862          * absence in the bit mask of supported commands.
863          *
864          * Check the supported commands and only if the the command is marked
865          * as supported send it. If not supported assume that the controller
866          * does not have actual support for stored link keys which makes this
867          * command redundant anyway.
868          *
869          * Some controllers indicate that they support handling deleting
870          * stored link keys, but they don't. The quirk lets a driver
871          * just disable this command.
872          */
873         if (hdev->commands[6] & 0x80 &&
874             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
875                 struct hci_cp_delete_stored_link_key cp;
876
877                 bacpy(&cp.bdaddr, BDADDR_ANY);
878                 cp.delete_all = 0x01;
879                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
880                             sizeof(cp), &cp);
881         }
882
883         /* Set event mask page 2 if the HCI command for it is supported */
884         if (hdev->commands[22] & 0x04)
885                 hci_set_event_mask_page_2(req);
886
887         /* Read local codec list if the HCI command is supported */
888         if (hdev->commands[29] & 0x20)
889                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
890
891         /* Get MWS transport configuration if the HCI command is supported */
892         if (hdev->commands[30] & 0x08)
893                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
894
895         /* Check for Synchronization Train support */
896         if (lmp_sync_train_capable(hdev))
897                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
898
899 /* Disable Secure connection implementation now */
900 #ifdef CONFIG_TIZEN_WIP
901         /* Enable Secure Connections if supported and configured */
902         if ((lmp_sc_capable(hdev) ||
903              test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags)) &&
904             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
905                 u8 support = 0x01;
906
907                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
908                             sizeof(support), &support);
909         }
910 #endif
911 }
912
913 static int __hci_init(struct hci_dev *hdev)
914 {
915         int err;
916
917         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
918         if (err < 0)
919                 return err;
920
921         /* The Device Under Test (DUT) mode is special and available for
922          * all controller types. So just create it early on.
923          */
924         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
925                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
926                                     &dut_mode_fops);
927         }
928
929         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
930          * BR/EDR/LE type controllers. AMP controllers only need the
931          * first stage init.
932          */
933         if (hdev->dev_type != HCI_BREDR)
934                 return 0;
935
936         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
937         if (err < 0)
938                 return err;
939
940         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
941         if (err < 0)
942                 return err;
943
944         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
945         if (err < 0)
946                 return err;
947
948         /* This function is only called when the controller is actually in
949          * configured state. When the controller is marked as unconfigured,
950          * this initialization procedure is not run.
951          *
952          * It means that it is possible that a controller runs through its
953          * setup phase and then discovers missing settings. If that is the
954          * case, then this function will not be called. It then will only
955          * be called during the config phase.
956          *
957          * So only when in setup phase or config phase, create the debugfs
958          * entries and register the SMP channels.
959          */
960         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
961             !test_bit(HCI_CONFIG, &hdev->dev_flags))
962                 return 0;
963
964         hci_debugfs_create_common(hdev);
965
966         if (lmp_bredr_capable(hdev))
967                 hci_debugfs_create_bredr(hdev);
968
969         if (lmp_le_capable(hdev))
970                 hci_debugfs_create_le(hdev);
971
972         return 0;
973 }
974
975 static void hci_init0_req(struct hci_request *req, unsigned long opt)
976 {
977         struct hci_dev *hdev = req->hdev;
978
979         BT_DBG("%s %ld", hdev->name, opt);
980
981         /* Reset */
982         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
983                 hci_reset_req(req, 0);
984
985         /* Read Local Version */
986         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
987
988         /* Read BD Address */
989         if (hdev->set_bdaddr)
990                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
991 }
992
993 static int __hci_unconf_init(struct hci_dev *hdev)
994 {
995         int err;
996
997         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
998                 return 0;
999
1000         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1001         if (err < 0)
1002                 return err;
1003
1004         return 0;
1005 }
1006
1007 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1008 {
1009         __u8 scan = opt;
1010
1011         BT_DBG("%s %x", req->hdev->name, scan);
1012
1013         /* Inquiry and Page scans */
1014         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1015 }
1016
1017 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1018 {
1019         __u8 auth = opt;
1020
1021         BT_DBG("%s %x", req->hdev->name, auth);
1022
1023         /* Authentication */
1024         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1025 }
1026
1027 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1028 {
1029         __u8 encrypt = opt;
1030
1031         BT_DBG("%s %x", req->hdev->name, encrypt);
1032
1033         /* Encryption */
1034         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1035 }
1036
1037 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1038 {
1039         __le16 policy = cpu_to_le16(opt);
1040
1041         BT_DBG("%s %x", req->hdev->name, policy);
1042
1043         /* Default link policy */
1044         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1045 }
1046
1047 /* Get HCI device by index.
1048  * Device is held on return. */
1049 struct hci_dev *hci_dev_get(int index)
1050 {
1051         struct hci_dev *hdev = NULL, *d;
1052
1053         BT_DBG("%d", index);
1054
1055         if (index < 0)
1056                 return NULL;
1057
1058         read_lock(&hci_dev_list_lock);
1059         list_for_each_entry(d, &hci_dev_list, list) {
1060                 if (d->id == index) {
1061                         hdev = hci_dev_hold(d);
1062                         break;
1063                 }
1064         }
1065         read_unlock(&hci_dev_list_lock);
1066         return hdev;
1067 }
1068
1069 /* ---- Inquiry support ---- */
1070
1071 bool hci_discovery_active(struct hci_dev *hdev)
1072 {
1073         struct discovery_state *discov = &hdev->discovery;
1074
1075         switch (discov->state) {
1076         case DISCOVERY_FINDING:
1077         case DISCOVERY_RESOLVING:
1078                 return true;
1079
1080         default:
1081                 return false;
1082         }
1083 }
1084
1085 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1086 {
1087         int old_state = hdev->discovery.state;
1088
1089         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1090
1091         if (old_state == state)
1092                 return;
1093
1094         hdev->discovery.state = state;
1095
1096         switch (state) {
1097         case DISCOVERY_STOPPED:
1098 #ifndef CONFIG_TIZEN_WIP
1099                 hci_update_background_scan(hdev);
1100 #endif
1101
1102                 if (old_state != DISCOVERY_STARTING)
1103                         mgmt_discovering(hdev, 0);
1104                 break;
1105         case DISCOVERY_STARTING:
1106                 break;
1107         case DISCOVERY_FINDING:
1108                 mgmt_discovering(hdev, 1);
1109                 break;
1110         case DISCOVERY_RESOLVING:
1111                 break;
1112         case DISCOVERY_STOPPING:
1113                 break;
1114         }
1115 }
1116 #ifdef CONFIG_TIZEN_WIP
1117 /* BEGIN TIZEN_Bluetooth :: Seperate LE discovery */
1118 bool hci_le_discovery_active(struct hci_dev *hdev)
1119 {
1120         struct discovery_state *discov = &hdev->le_discovery;
1121
1122         switch (discov->state) {
1123         case DISCOVERY_FINDING:
1124         case DISCOVERY_RESOLVING:
1125                 return true;
1126
1127         default:
1128                 return false;
1129         }
1130 }
1131
1132 void hci_le_discovery_set_state(struct hci_dev *hdev, int state)
1133 {
1134         BT_DBG("%s state %u -> %u", hdev->name, hdev->le_discovery.state, state);
1135
1136         if (hdev->le_discovery.state == state)
1137                 return;
1138
1139         switch (state) {
1140         case DISCOVERY_STOPPED:
1141                 hci_update_background_scan(hdev);
1142
1143                 if (hdev->le_discovery.state != DISCOVERY_STARTING)
1144                         mgmt_le_discovering(hdev, 0);
1145                 break;
1146         case DISCOVERY_STARTING:
1147                 break;
1148         case DISCOVERY_FINDING:
1149                 mgmt_le_discovering(hdev, 1);
1150                 break;
1151         case DISCOVERY_RESOLVING:
1152                 break;
1153         case DISCOVERY_STOPPING:
1154                 break;
1155         }
1156
1157         hdev->le_discovery.state = state;
1158 }
1159 /* END TIZEN_Bluetooth */
1160 #endif
1161
1162 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1163 {
1164         struct discovery_state *cache = &hdev->discovery;
1165         struct inquiry_entry *p, *n;
1166
1167         list_for_each_entry_safe(p, n, &cache->all, all) {
1168                 list_del(&p->all);
1169                 kfree(p);
1170         }
1171
1172         INIT_LIST_HEAD(&cache->unknown);
1173         INIT_LIST_HEAD(&cache->resolve);
1174 }
1175
1176 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1177                                                bdaddr_t *bdaddr)
1178 {
1179         struct discovery_state *cache = &hdev->discovery;
1180         struct inquiry_entry *e;
1181
1182         BT_DBG("cache %p, %pMR", cache, bdaddr);
1183
1184         list_for_each_entry(e, &cache->all, all) {
1185                 if (!bacmp(&e->data.bdaddr, bdaddr))
1186                         return e;
1187         }
1188
1189         return NULL;
1190 }
1191
1192 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1193                                                        bdaddr_t *bdaddr)
1194 {
1195         struct discovery_state *cache = &hdev->discovery;
1196         struct inquiry_entry *e;
1197
1198         BT_DBG("cache %p, %pMR", cache, bdaddr);
1199
1200         list_for_each_entry(e, &cache->unknown, list) {
1201                 if (!bacmp(&e->data.bdaddr, bdaddr))
1202                         return e;
1203         }
1204
1205         return NULL;
1206 }
1207
1208 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1209                                                        bdaddr_t *bdaddr,
1210                                                        int state)
1211 {
1212         struct discovery_state *cache = &hdev->discovery;
1213         struct inquiry_entry *e;
1214
1215         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1216
1217         list_for_each_entry(e, &cache->resolve, list) {
1218                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1219                         return e;
1220                 if (!bacmp(&e->data.bdaddr, bdaddr))
1221                         return e;
1222         }
1223
1224         return NULL;
1225 }
1226
1227 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1228                                       struct inquiry_entry *ie)
1229 {
1230         struct discovery_state *cache = &hdev->discovery;
1231         struct list_head *pos = &cache->resolve;
1232         struct inquiry_entry *p;
1233
1234         list_del(&ie->list);
1235
1236         list_for_each_entry(p, &cache->resolve, list) {
1237                 if (p->name_state != NAME_PENDING &&
1238                     abs(p->data.rssi) >= abs(ie->data.rssi))
1239                         break;
1240                 pos = &p->list;
1241         }
1242
1243         list_add(&ie->list, pos);
1244 }
1245
1246 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1247                              bool name_known)
1248 {
1249         struct discovery_state *cache = &hdev->discovery;
1250         struct inquiry_entry *ie;
1251         u32 flags = 0;
1252
1253         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1254
1255         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1256
1257         if (!data->ssp_mode)
1258                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1259
1260         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1261         if (ie) {
1262                 if (!ie->data.ssp_mode)
1263                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1264
1265                 if (ie->name_state == NAME_NEEDED &&
1266                     data->rssi != ie->data.rssi) {
1267                         ie->data.rssi = data->rssi;
1268                         hci_inquiry_cache_update_resolve(hdev, ie);
1269                 }
1270
1271                 goto update;
1272         }
1273
1274         /* Entry not in the cache. Add new one. */
1275         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1276         if (!ie) {
1277                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1278                 goto done;
1279         }
1280
1281         list_add(&ie->all, &cache->all);
1282
1283         if (name_known) {
1284                 ie->name_state = NAME_KNOWN;
1285         } else {
1286                 ie->name_state = NAME_NOT_KNOWN;
1287                 list_add(&ie->list, &cache->unknown);
1288         }
1289
1290 update:
1291         if (name_known && ie->name_state != NAME_KNOWN &&
1292             ie->name_state != NAME_PENDING) {
1293                 ie->name_state = NAME_KNOWN;
1294                 list_del(&ie->list);
1295         }
1296
1297         memcpy(&ie->data, data, sizeof(*data));
1298         ie->timestamp = jiffies;
1299         cache->timestamp = jiffies;
1300
1301         if (ie->name_state == NAME_NOT_KNOWN)
1302                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1303
1304 done:
1305         return flags;
1306 }
1307
1308 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1309 {
1310         struct discovery_state *cache = &hdev->discovery;
1311         struct inquiry_info *info = (struct inquiry_info *) buf;
1312         struct inquiry_entry *e;
1313         int copied = 0;
1314
1315         list_for_each_entry(e, &cache->all, all) {
1316                 struct inquiry_data *data = &e->data;
1317
1318                 if (copied >= num)
1319                         break;
1320
1321                 bacpy(&info->bdaddr, &data->bdaddr);
1322                 info->pscan_rep_mode    = data->pscan_rep_mode;
1323                 info->pscan_period_mode = data->pscan_period_mode;
1324                 info->pscan_mode        = data->pscan_mode;
1325                 memcpy(info->dev_class, data->dev_class, 3);
1326                 info->clock_offset      = data->clock_offset;
1327
1328                 info++;
1329                 copied++;
1330         }
1331
1332         BT_DBG("cache %p, copied %d", cache, copied);
1333         return copied;
1334 }
1335
1336 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1337 {
1338         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1339         struct hci_dev *hdev = req->hdev;
1340         struct hci_cp_inquiry cp;
1341
1342         BT_DBG("%s", hdev->name);
1343
1344         if (test_bit(HCI_INQUIRY, &hdev->flags))
1345                 return;
1346
1347         /* Start Inquiry */
1348         memcpy(&cp.lap, &ir->lap, 3);
1349         cp.length  = ir->length;
1350         cp.num_rsp = ir->num_rsp;
1351         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1352 }
1353
1354 #ifdef CONFIG_TIZEN_WIP
1355 static int wait_inquiry(void *word)
1356 {
1357         schedule();
1358         return signal_pending(current);
1359 }
1360 #endif
1361
1362 int hci_inquiry(void __user *arg)
1363 {
1364         __u8 __user *ptr = arg;
1365         struct hci_inquiry_req ir;
1366         struct hci_dev *hdev;
1367         int err = 0, do_inquiry = 0, max_rsp;
1368         long timeo;
1369         __u8 *buf;
1370
1371         if (copy_from_user(&ir, ptr, sizeof(ir)))
1372                 return -EFAULT;
1373
1374         hdev = hci_dev_get(ir.dev_id);
1375         if (!hdev)
1376                 return -ENODEV;
1377
1378         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1379                 err = -EBUSY;
1380                 goto done;
1381         }
1382
1383         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1384                 err = -EOPNOTSUPP;
1385                 goto done;
1386         }
1387
1388         if (hdev->dev_type != HCI_BREDR) {
1389                 err = -EOPNOTSUPP;
1390                 goto done;
1391         }
1392
1393         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1394                 err = -EOPNOTSUPP;
1395                 goto done;
1396         }
1397
1398         hci_dev_lock(hdev);
1399         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1400             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1401                 hci_inquiry_cache_flush(hdev);
1402                 do_inquiry = 1;
1403         }
1404         hci_dev_unlock(hdev);
1405
1406         timeo = ir.length * msecs_to_jiffies(2000);
1407
1408         if (do_inquiry) {
1409                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1410                                    timeo);
1411                 if (err < 0)
1412                         goto done;
1413
1414                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1415                  * cleared). If it is interrupted by a signal, return -EINTR.
1416                  */
1417 #ifdef CONFIG_TIZEN_WIP
1418                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1419                         TASK_INTERRUPTIBLE))
1420 #else
1421                 /* Signature of the function "wait_on_bit" is changed in latest kernel
1422                  * So, if kernel is migrated to latest, then below code should be enabled
1423                  */
1424                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1425                                 TASK_INTERRUPTIBLE))
1426 #endif
1427                         return -EINTR;
1428         }
1429
1430         /* for unlimited number of responses we will use buffer with
1431          * 255 entries
1432          */
1433         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1434
1435         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1436          * copy it to the user space.
1437          */
1438         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1439         if (!buf) {
1440                 err = -ENOMEM;
1441                 goto done;
1442         }
1443
1444         hci_dev_lock(hdev);
1445         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1446         hci_dev_unlock(hdev);
1447
1448         BT_DBG("num_rsp %d", ir.num_rsp);
1449
1450         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1451                 ptr += sizeof(ir);
1452                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1453                                  ir.num_rsp))
1454                         err = -EFAULT;
1455         } else
1456                 err = -EFAULT;
1457
1458         kfree(buf);
1459
1460 done:
1461         hci_dev_put(hdev);
1462         return err;
1463 }
1464
1465 static int hci_dev_do_open(struct hci_dev *hdev)
1466 {
1467         int ret = 0;
1468
1469         BT_DBG("%s %p", hdev->name, hdev);
1470
1471         hci_req_lock(hdev);
1472
1473         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1474                 ret = -ENODEV;
1475                 goto done;
1476         }
1477
1478         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1479             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1480                 /* Check for rfkill but allow the HCI setup stage to
1481                  * proceed (which in itself doesn't cause any RF activity).
1482                  */
1483                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1484                         ret = -ERFKILL;
1485                         goto done;
1486                 }
1487
1488                 /* Check for valid public address or a configured static
1489                  * random adddress, but let the HCI setup proceed to
1490                  * be able to determine if there is a public address
1491                  * or not.
1492                  *
1493                  * In case of user channel usage, it is not important
1494                  * if a public address or static random address is
1495                  * available.
1496                  *
1497                  * This check is only valid for BR/EDR controllers
1498                  * since AMP controllers do not have an address.
1499                  */
1500                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1501                     hdev->dev_type == HCI_BREDR &&
1502                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1503                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1504                         ret = -EADDRNOTAVAIL;
1505                         goto done;
1506                 }
1507         }
1508
1509         if (test_bit(HCI_UP, &hdev->flags)) {
1510                 ret = -EALREADY;
1511                 goto done;
1512         }
1513
1514         if (hdev->open(hdev)) {
1515                 ret = -EIO;
1516                 goto done;
1517         }
1518
1519         atomic_set(&hdev->cmd_cnt, 1);
1520         set_bit(HCI_INIT, &hdev->flags);
1521
1522         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1523                 if (hdev->setup)
1524                         ret = hdev->setup(hdev);
1525
1526                 /* The transport driver can set these quirks before
1527                  * creating the HCI device or in its setup callback.
1528                  *
1529                  * In case any of them is set, the controller has to
1530                  * start up as unconfigured.
1531                  */
1532                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1533                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1534                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
1535
1536                 /* For an unconfigured controller it is required to
1537                  * read at least the version information provided by
1538                  * the Read Local Version Information command.
1539                  *
1540                  * If the set_bdaddr driver callback is provided, then
1541                  * also the original Bluetooth public device address
1542                  * will be read using the Read BD Address command.
1543                  */
1544                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1545                         ret = __hci_unconf_init(hdev);
1546         }
1547
1548         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1549                 /* If public address change is configured, ensure that
1550                  * the address gets programmed. If the driver does not
1551                  * support changing the public address, fail the power
1552                  * on procedure.
1553                  */
1554                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1555                     hdev->set_bdaddr)
1556                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1557                 else
1558                         ret = -EADDRNOTAVAIL;
1559         }
1560
1561         if (!ret) {
1562                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1563                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1564                         ret = __hci_init(hdev);
1565         }
1566
1567         clear_bit(HCI_INIT, &hdev->flags);
1568
1569         if (!ret) {
1570                 hci_dev_hold(hdev);
1571                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1572                 set_bit(HCI_UP, &hdev->flags);
1573                 hci_notify(hdev, HCI_DEV_UP);
1574                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1575                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
1576                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1577                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1578                     hdev->dev_type == HCI_BREDR) {
1579                         hci_dev_lock(hdev);
1580                         mgmt_powered(hdev, 1);
1581                         hci_dev_unlock(hdev);
1582                 }
1583         } else {
1584                 /* Init failed, cleanup */
1585                 flush_work(&hdev->tx_work);
1586                 flush_work(&hdev->cmd_work);
1587                 flush_work(&hdev->rx_work);
1588
1589                 skb_queue_purge(&hdev->cmd_q);
1590                 skb_queue_purge(&hdev->rx_q);
1591
1592                 if (hdev->flush)
1593                         hdev->flush(hdev);
1594
1595                 if (hdev->sent_cmd) {
1596                         kfree_skb(hdev->sent_cmd);
1597                         hdev->sent_cmd = NULL;
1598                 }
1599
1600                 hdev->close(hdev);
1601                 hdev->flags &= BIT(HCI_RAW);
1602         }
1603
1604 done:
1605         hci_req_unlock(hdev);
1606         return ret;
1607 }
1608
1609 /* ---- HCI ioctl helpers ---- */
1610
1611 int hci_dev_open(__u16 dev)
1612 {
1613         struct hci_dev *hdev;
1614         int err;
1615
1616         hdev = hci_dev_get(dev);
1617         if (!hdev)
1618                 return -ENODEV;
1619
1620         /* Devices that are marked as unconfigured can only be powered
1621          * up as user channel. Trying to bring them up as normal devices
1622          * will result into a failure. Only user channel operation is
1623          * possible.
1624          *
1625          * When this function is called for a user channel, the flag
1626          * HCI_USER_CHANNEL will be set first before attempting to
1627          * open the device.
1628          */
1629         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1630             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1631                 err = -EOPNOTSUPP;
1632                 goto done;
1633         }
1634
1635         /* We need to ensure that no other power on/off work is pending
1636          * before proceeding to call hci_dev_do_open. This is
1637          * particularly important if the setup procedure has not yet
1638          * completed.
1639          */
1640         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1641                 cancel_delayed_work(&hdev->power_off);
1642
1643         /* After this call it is guaranteed that the setup procedure
1644          * has finished. This means that error conditions like RFKILL
1645          * or no valid public or static random address apply.
1646          */
1647         flush_workqueue(hdev->req_workqueue);
1648
1649         /* For controllers not using the management interface and that
1650          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1651          * so that pairing works for them. Once the management interface
1652          * is in use this bit will be cleared again and userspace has
1653          * to explicitly enable it.
1654          */
1655         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1656             !test_bit(HCI_MGMT, &hdev->dev_flags))
1657                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
1658
1659         err = hci_dev_do_open(hdev);
1660
1661 done:
1662         hci_dev_put(hdev);
1663         return err;
1664 }
1665
1666 /* This function requires the caller holds hdev->lock */
1667 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1668 {
1669         struct hci_conn_params *p;
1670
1671         list_for_each_entry(p, &hdev->le_conn_params, list) {
1672                 if (p->conn) {
1673                         hci_conn_drop(p->conn);
1674                         hci_conn_put(p->conn);
1675                         p->conn = NULL;
1676                 }
1677                 list_del_init(&p->action);
1678         }
1679
1680         BT_DBG("All LE pending actions cleared");
1681 }
1682
1683 static int hci_dev_do_close(struct hci_dev *hdev)
1684 {
1685         BT_DBG("%s %p", hdev->name, hdev);
1686
1687         cancel_delayed_work(&hdev->power_off);
1688
1689         hci_req_cancel(hdev, ENODEV);
1690         hci_req_lock(hdev);
1691
1692         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1693                 cancel_delayed_work_sync(&hdev->cmd_timer);
1694                 hci_req_unlock(hdev);
1695                 return 0;
1696         }
1697
1698         /* Flush RX and TX works */
1699         flush_work(&hdev->tx_work);
1700         flush_work(&hdev->rx_work);
1701
1702         if (hdev->discov_timeout > 0) {
1703                 cancel_delayed_work(&hdev->discov_off);
1704                 hdev->discov_timeout = 0;
1705                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1706                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1707         }
1708
1709         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1710                 cancel_delayed_work(&hdev->service_cache);
1711
1712         cancel_delayed_work_sync(&hdev->le_scan_disable);
1713
1714         if (test_bit(HCI_MGMT, &hdev->dev_flags))
1715                 cancel_delayed_work_sync(&hdev->rpa_expired);
1716
1717         /* Avoid potential lockdep warnings from the *_flush() calls by
1718          * ensuring the workqueue is empty up front.
1719          */
1720         drain_workqueue(hdev->workqueue);
1721
1722         hci_dev_lock(hdev);
1723
1724         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1725                 if (hdev->dev_type == HCI_BREDR)
1726                         mgmt_powered(hdev, 0);
1727         }
1728
1729         hci_inquiry_cache_flush(hdev);
1730         hci_pend_le_actions_clear(hdev);
1731         hci_conn_hash_flush(hdev);
1732         hci_dev_unlock(hdev);
1733
1734         hci_notify(hdev, HCI_DEV_DOWN);
1735
1736         if (hdev->flush)
1737                 hdev->flush(hdev);
1738
1739         /* Reset device */
1740         skb_queue_purge(&hdev->cmd_q);
1741         atomic_set(&hdev->cmd_cnt, 1);
1742         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1743             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1744             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1745                 set_bit(HCI_INIT, &hdev->flags);
1746                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1747                 clear_bit(HCI_INIT, &hdev->flags);
1748         }
1749
1750         /* flush cmd  work */
1751         flush_work(&hdev->cmd_work);
1752
1753         /* Drop queues */
1754         skb_queue_purge(&hdev->rx_q);
1755         skb_queue_purge(&hdev->cmd_q);
1756         skb_queue_purge(&hdev->raw_q);
1757
1758         /* Drop last sent command */
1759         if (hdev->sent_cmd) {
1760                 cancel_delayed_work_sync(&hdev->cmd_timer);
1761                 kfree_skb(hdev->sent_cmd);
1762                 hdev->sent_cmd = NULL;
1763         }
1764
1765         kfree_skb(hdev->recv_evt);
1766         hdev->recv_evt = NULL;
1767
1768         /* After this point our queues are empty
1769          * and no tasks are scheduled. */
1770         hdev->close(hdev);
1771
1772         /* Clear flags */
1773         hdev->flags &= BIT(HCI_RAW);
1774         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1775
1776         /* Controller radio is available but is currently powered down */
1777         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1778
1779         memset(hdev->eir, 0, sizeof(hdev->eir));
1780         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1781         bacpy(&hdev->random_addr, BDADDR_ANY);
1782
1783         hci_req_unlock(hdev);
1784
1785         hci_dev_put(hdev);
1786         return 0;
1787 }
1788
1789 int hci_dev_close(__u16 dev)
1790 {
1791         struct hci_dev *hdev;
1792         int err;
1793
1794         hdev = hci_dev_get(dev);
1795         if (!hdev)
1796                 return -ENODEV;
1797
1798         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1799                 err = -EBUSY;
1800                 goto done;
1801         }
1802
1803         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1804                 cancel_delayed_work(&hdev->power_off);
1805
1806         err = hci_dev_do_close(hdev);
1807
1808 done:
1809         hci_dev_put(hdev);
1810         return err;
1811 }
1812
1813 int hci_dev_reset(__u16 dev)
1814 {
1815         struct hci_dev *hdev;
1816         int ret = 0;
1817
1818         hdev = hci_dev_get(dev);
1819         if (!hdev)
1820                 return -ENODEV;
1821
1822         hci_req_lock(hdev);
1823
1824         if (!test_bit(HCI_UP, &hdev->flags)) {
1825                 ret = -ENETDOWN;
1826                 goto done;
1827         }
1828
1829         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1830                 ret = -EBUSY;
1831                 goto done;
1832         }
1833
1834         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1835                 ret = -EOPNOTSUPP;
1836                 goto done;
1837         }
1838
1839         /* Drop queues */
1840         skb_queue_purge(&hdev->rx_q);
1841         skb_queue_purge(&hdev->cmd_q);
1842
1843         /* Avoid potential lockdep warnings from the *_flush() calls by
1844          * ensuring the workqueue is empty up front.
1845          */
1846         drain_workqueue(hdev->workqueue);
1847
1848         hci_dev_lock(hdev);
1849         hci_inquiry_cache_flush(hdev);
1850         hci_conn_hash_flush(hdev);
1851         hci_dev_unlock(hdev);
1852
1853         if (hdev->flush)
1854                 hdev->flush(hdev);
1855
1856         atomic_set(&hdev->cmd_cnt, 1);
1857         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1858
1859         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1860
1861 done:
1862         hci_req_unlock(hdev);
1863         hci_dev_put(hdev);
1864         return ret;
1865 }
1866
1867 int hci_dev_reset_stat(__u16 dev)
1868 {
1869         struct hci_dev *hdev;
1870         int ret = 0;
1871
1872         hdev = hci_dev_get(dev);
1873         if (!hdev)
1874                 return -ENODEV;
1875
1876         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1877                 ret = -EBUSY;
1878                 goto done;
1879         }
1880
1881         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1882                 ret = -EOPNOTSUPP;
1883                 goto done;
1884         }
1885
1886         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1887
1888 done:
1889         hci_dev_put(hdev);
1890         return ret;
1891 }
1892
1893 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1894 {
1895         bool conn_changed, discov_changed;
1896
1897         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1898
1899         if ((scan & SCAN_PAGE))
1900                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1901                                                  &hdev->dev_flags);
1902         else
1903                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1904                                                   &hdev->dev_flags);
1905
1906         if ((scan & SCAN_INQUIRY)) {
1907                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1908                                                    &hdev->dev_flags);
1909         } else {
1910                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1911                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1912                                                     &hdev->dev_flags);
1913         }
1914
1915         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1916                 return;
1917
1918         if (conn_changed || discov_changed) {
1919                 /* In case this was disabled through mgmt */
1920                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1921
1922                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1923                         mgmt_update_adv_data(hdev);
1924
1925                 mgmt_new_settings(hdev);
1926         }
1927 }
1928
1929 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1930 {
1931         struct hci_dev *hdev;
1932         struct hci_dev_req dr;
1933         int err = 0;
1934
1935         if (copy_from_user(&dr, arg, sizeof(dr)))
1936                 return -EFAULT;
1937
1938         hdev = hci_dev_get(dr.dev_id);
1939         if (!hdev)
1940                 return -ENODEV;
1941
1942         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1943                 err = -EBUSY;
1944                 goto done;
1945         }
1946
1947         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1948                 err = -EOPNOTSUPP;
1949                 goto done;
1950         }
1951
1952         if (hdev->dev_type != HCI_BREDR) {
1953                 err = -EOPNOTSUPP;
1954                 goto done;
1955         }
1956
1957         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1958                 err = -EOPNOTSUPP;
1959                 goto done;
1960         }
1961
1962         switch (cmd) {
1963         case HCISETAUTH:
1964                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1965                                    HCI_INIT_TIMEOUT);
1966                 break;
1967
1968         case HCISETENCRYPT:
1969                 if (!lmp_encrypt_capable(hdev)) {
1970                         err = -EOPNOTSUPP;
1971                         break;
1972                 }
1973
1974                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1975                         /* Auth must be enabled first */
1976                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1977                                            HCI_INIT_TIMEOUT);
1978                         if (err)
1979                                 break;
1980                 }
1981
1982                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1983                                    HCI_INIT_TIMEOUT);
1984                 break;
1985
1986         case HCISETSCAN:
1987                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1988                                    HCI_INIT_TIMEOUT);
1989
1990                 /* Ensure that the connectable and discoverable states
1991                  * get correctly modified as this was a non-mgmt change.
1992                  */
1993                 if (!err)
1994                         hci_update_scan_state(hdev, dr.dev_opt);
1995                 break;
1996
1997         case HCISETLINKPOL:
1998                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1999                                    HCI_INIT_TIMEOUT);
2000                 break;
2001
2002         case HCISETLINKMODE:
2003                 hdev->link_mode = ((__u16) dr.dev_opt) &
2004                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2005                 break;
2006
2007         case HCISETPTYPE:
2008                 hdev->pkt_type = (__u16) dr.dev_opt;
2009                 break;
2010
2011         case HCISETACLMTU:
2012                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2013                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2014                 break;
2015
2016         case HCISETSCOMTU:
2017                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2018                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2019                 break;
2020
2021         default:
2022                 err = -EINVAL;
2023                 break;
2024         }
2025
2026 done:
2027         hci_dev_put(hdev);
2028         return err;
2029 }
2030
2031 int hci_get_dev_list(void __user *arg)
2032 {
2033         struct hci_dev *hdev;
2034         struct hci_dev_list_req *dl;
2035         struct hci_dev_req *dr;
2036         int n = 0, size, err;
2037         __u16 dev_num;
2038
2039         if (get_user(dev_num, (__u16 __user *) arg))
2040                 return -EFAULT;
2041
2042         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2043                 return -EINVAL;
2044
2045         size = sizeof(*dl) + dev_num * sizeof(*dr);
2046
2047         dl = kzalloc(size, GFP_KERNEL);
2048         if (!dl)
2049                 return -ENOMEM;
2050
2051         dr = dl->dev_req;
2052
2053         read_lock(&hci_dev_list_lock);
2054         list_for_each_entry(hdev, &hci_dev_list, list) {
2055                 unsigned long flags = hdev->flags;
2056
2057                 /* When the auto-off is configured it means the transport
2058                  * is running, but in that case still indicate that the
2059                  * device is actually down.
2060                  */
2061                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2062                         flags &= ~BIT(HCI_UP);
2063
2064                 (dr + n)->dev_id  = hdev->id;
2065                 (dr + n)->dev_opt = flags;
2066
2067                 if (++n >= dev_num)
2068                         break;
2069         }
2070         read_unlock(&hci_dev_list_lock);
2071
2072         dl->dev_num = n;
2073         size = sizeof(*dl) + n * sizeof(*dr);
2074
2075         err = copy_to_user(arg, dl, size);
2076         kfree(dl);
2077
2078         return err ? -EFAULT : 0;
2079 }
2080
2081 int hci_get_dev_info(void __user *arg)
2082 {
2083         struct hci_dev *hdev;
2084         struct hci_dev_info di;
2085         unsigned long flags;
2086         int err = 0;
2087
2088         if (copy_from_user(&di, arg, sizeof(di)))
2089                 return -EFAULT;
2090
2091         hdev = hci_dev_get(di.dev_id);
2092         if (!hdev)
2093                 return -ENODEV;
2094
2095         /* When the auto-off is configured it means the transport
2096          * is running, but in that case still indicate that the
2097          * device is actually down.
2098          */
2099         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2100                 flags = hdev->flags & ~BIT(HCI_UP);
2101         else
2102                 flags = hdev->flags;
2103
2104         strcpy(di.name, hdev->name);
2105         di.bdaddr   = hdev->bdaddr;
2106         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2107         di.flags    = flags;
2108         di.pkt_type = hdev->pkt_type;
2109         if (lmp_bredr_capable(hdev)) {
2110                 di.acl_mtu  = hdev->acl_mtu;
2111                 di.acl_pkts = hdev->acl_pkts;
2112                 di.sco_mtu  = hdev->sco_mtu;
2113                 di.sco_pkts = hdev->sco_pkts;
2114         } else {
2115                 di.acl_mtu  = hdev->le_mtu;
2116                 di.acl_pkts = hdev->le_pkts;
2117                 di.sco_mtu  = 0;
2118                 di.sco_pkts = 0;
2119         }
2120         di.link_policy = hdev->link_policy;
2121         di.link_mode   = hdev->link_mode;
2122
2123         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2124         memcpy(&di.features, &hdev->features, sizeof(di.features));
2125
2126         if (copy_to_user(arg, &di, sizeof(di)))
2127                 err = -EFAULT;
2128
2129         hci_dev_put(hdev);
2130
2131         return err;
2132 }
2133
2134 /* ---- Interface to HCI drivers ---- */
2135
2136 static int hci_rfkill_set_block(void *data, bool blocked)
2137 {
2138         struct hci_dev *hdev = data;
2139
2140         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2141
2142         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2143                 return -EBUSY;
2144
2145         if (blocked) {
2146                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2147                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2148                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2149                         hci_dev_do_close(hdev);
2150         } else {
2151                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2152         }
2153
2154         return 0;
2155 }
2156
2157 static const struct rfkill_ops hci_rfkill_ops = {
2158         .set_block = hci_rfkill_set_block,
2159 };
2160
2161 static void hci_power_on(struct work_struct *work)
2162 {
2163         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2164         int err;
2165
2166         BT_DBG("%s", hdev->name);
2167
2168         err = hci_dev_do_open(hdev);
2169         if (err < 0) {
2170                 hci_dev_lock(hdev);
2171                 mgmt_set_powered_failed(hdev, err);
2172                 hci_dev_unlock(hdev);
2173                 return;
2174         }
2175
2176         /* During the HCI setup phase, a few error conditions are
2177          * ignored and they need to be checked now. If they are still
2178          * valid, it is important to turn the device back off.
2179          */
2180         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2181             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2182             (hdev->dev_type == HCI_BREDR &&
2183              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2184              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2185                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2186                 hci_dev_do_close(hdev);
2187         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2188                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2189                                    HCI_AUTO_OFF_TIMEOUT);
2190         }
2191
2192         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2193                 /* For unconfigured devices, set the HCI_RAW flag
2194                  * so that userspace can easily identify them.
2195                  */
2196                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2197                         set_bit(HCI_RAW, &hdev->flags);
2198
2199                 /* For fully configured devices, this will send
2200                  * the Index Added event. For unconfigured devices,
2201                  * it will send Unconfigued Index Added event.
2202                  *
2203                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2204                  * and no event will be send.
2205                  */
2206                 mgmt_index_added(hdev);
2207         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2208                 /* When the controller is now configured, then it
2209                  * is important to clear the HCI_RAW flag.
2210                  */
2211                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2212                         clear_bit(HCI_RAW, &hdev->flags);
2213
2214                 /* Powering on the controller with HCI_CONFIG set only
2215                  * happens with the transition from unconfigured to
2216                  * configured. This will send the Index Added event.
2217                  */
2218                 mgmt_index_added(hdev);
2219         }
2220 }
2221
2222 static void hci_power_off(struct work_struct *work)
2223 {
2224         struct hci_dev *hdev = container_of(work, struct hci_dev,
2225                                             power_off.work);
2226
2227         BT_DBG("%s", hdev->name);
2228
2229         hci_dev_do_close(hdev);
2230
2231         smp_unregister(hdev);
2232 }
2233
2234 static void hci_discov_off(struct work_struct *work)
2235 {
2236         struct hci_dev *hdev;
2237
2238         hdev = container_of(work, struct hci_dev, discov_off.work);
2239
2240         BT_DBG("%s", hdev->name);
2241
2242         mgmt_discoverable_timeout(hdev);
2243 }
2244
2245 void hci_uuids_clear(struct hci_dev *hdev)
2246 {
2247         struct bt_uuid *uuid, *tmp;
2248
2249         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2250                 list_del(&uuid->list);
2251                 kfree(uuid);
2252         }
2253 }
2254
2255 void hci_link_keys_clear(struct hci_dev *hdev)
2256 {
2257         struct link_key *key;
2258
2259         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2260                 list_del_rcu(&key->list);
2261                 kfree_rcu(key, rcu);
2262         }
2263 }
2264
2265 void hci_smp_ltks_clear(struct hci_dev *hdev)
2266 {
2267         struct smp_ltk *k;
2268
2269         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2270                 list_del_rcu(&k->list);
2271                 kfree_rcu(k, rcu);
2272         }
2273 }
2274
2275 void hci_smp_irks_clear(struct hci_dev *hdev)
2276 {
2277         struct smp_irk *k;
2278
2279         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2280                 list_del_rcu(&k->list);
2281                 kfree_rcu(k, rcu);
2282         }
2283 }
2284
2285 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2286 {
2287         struct link_key *k;
2288
2289         rcu_read_lock();
2290         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2291                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2292                         rcu_read_unlock();
2293                         return k;
2294                 }
2295         }
2296         rcu_read_unlock();
2297
2298         return NULL;
2299 }
2300
2301 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2302                                u8 key_type, u8 old_key_type)
2303 {
2304         /* Legacy key */
2305         if (key_type < 0x03)
2306                 return true;
2307
2308         /* Debug keys are insecure so don't store them persistently */
2309         if (key_type == HCI_LK_DEBUG_COMBINATION)
2310                 return false;
2311
2312         /* Changed combination key and there's no previous one */
2313         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2314                 return false;
2315
2316         /* Security mode 3 case */
2317         if (!conn)
2318                 return true;
2319
2320         /* BR/EDR key derived using SC from an LE link */
2321         if (conn->type == LE_LINK)
2322                 return true;
2323
2324         /* Neither local nor remote side had no-bonding as requirement */
2325         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2326                 return true;
2327
2328         /* Local side had dedicated bonding as requirement */
2329         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2330                 return true;
2331
2332         /* Remote side had dedicated bonding as requirement */
2333         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2334                 return true;
2335
2336 #ifdef CONFIG_TIZEN_WIP
2337         /* In case of auth_type '0x01'. It is authenticated by MITM, so store it */
2338         if (key_type == HCI_LK_AUTH_COMBINATION_P192)
2339                 return true;
2340 #endif
2341         /* If none of the above criteria match, then don't store the key
2342          * persistently */
2343         return false;
2344 }
2345
2346 static u8 ltk_role(u8 type)
2347 {
2348         if (type == SMP_LTK)
2349                 return HCI_ROLE_MASTER;
2350
2351         return HCI_ROLE_SLAVE;
2352 }
2353
2354 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2355                              u8 addr_type, u8 role)
2356 {
2357         struct smp_ltk *k;
2358
2359         rcu_read_lock();
2360         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2361                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2362                         continue;
2363
2364                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2365                         rcu_read_unlock();
2366                         return k;
2367                 }
2368         }
2369         rcu_read_unlock();
2370
2371         return NULL;
2372 }
2373
2374 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2375 {
2376         struct smp_irk *irk;
2377
2378         rcu_read_lock();
2379         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2380                 if (!bacmp(&irk->rpa, rpa)) {
2381                         rcu_read_unlock();
2382                         return irk;
2383                 }
2384         }
2385
2386         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2387                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2388                         bacpy(&irk->rpa, rpa);
2389                         rcu_read_unlock();
2390                         return irk;
2391                 }
2392         }
2393         rcu_read_unlock();
2394
2395         return NULL;
2396 }
2397
2398 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2399                                      u8 addr_type)
2400 {
2401         struct smp_irk *irk;
2402
2403         /* Identity Address must be public or static random */
2404         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2405                 return NULL;
2406
2407         rcu_read_lock();
2408         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2409                 if (addr_type == irk->addr_type &&
2410                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2411                         rcu_read_unlock();
2412                         return irk;
2413                 }
2414         }
2415         rcu_read_unlock();
2416
2417         return NULL;
2418 }
2419
2420 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2421                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2422                                   u8 pin_len, bool *persistent)
2423 {
2424         struct link_key *key, *old_key;
2425         u8 old_key_type;
2426
2427         old_key = hci_find_link_key(hdev, bdaddr);
2428         if (old_key) {
2429                 old_key_type = old_key->type;
2430                 key = old_key;
2431         } else {
2432                 old_key_type = conn ? conn->key_type : 0xff;
2433                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2434                 if (!key)
2435                         return NULL;
2436                 list_add_rcu(&key->list, &hdev->link_keys);
2437         }
2438
2439         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2440
2441         /* Some buggy controller combinations generate a changed
2442          * combination key for legacy pairing even when there's no
2443          * previous key */
2444         if (type == HCI_LK_CHANGED_COMBINATION &&
2445             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2446                 type = HCI_LK_COMBINATION;
2447                 if (conn)
2448                         conn->key_type = type;
2449         }
2450
2451         bacpy(&key->bdaddr, bdaddr);
2452         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2453         key->pin_len = pin_len;
2454
2455         if (type == HCI_LK_CHANGED_COMBINATION)
2456                 key->type = old_key_type;
2457         else
2458                 key->type = type;
2459
2460         if (persistent)
2461                 *persistent = hci_persistent_key(hdev, conn, type,
2462                                                  old_key_type);
2463
2464         return key;
2465 }
2466
2467 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2468                             u8 addr_type, u8 type, u8 authenticated,
2469                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2470 {
2471         struct smp_ltk *key, *old_key;
2472         u8 role = ltk_role(type);
2473
2474         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2475         if (old_key)
2476                 key = old_key;
2477         else {
2478                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2479                 if (!key)
2480                         return NULL;
2481                 list_add_rcu(&key->list, &hdev->long_term_keys);
2482         }
2483
2484         bacpy(&key->bdaddr, bdaddr);
2485         key->bdaddr_type = addr_type;
2486         memcpy(key->val, tk, sizeof(key->val));
2487         key->authenticated = authenticated;
2488         key->ediv = ediv;
2489         key->rand = rand;
2490         key->enc_size = enc_size;
2491         key->type = type;
2492
2493         return key;
2494 }
2495
2496 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2497                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2498 {
2499         struct smp_irk *irk;
2500
2501         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2502         if (!irk) {
2503                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2504                 if (!irk)
2505                         return NULL;
2506
2507                 bacpy(&irk->bdaddr, bdaddr);
2508                 irk->addr_type = addr_type;
2509
2510                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2511         }
2512
2513         memcpy(irk->val, val, 16);
2514         bacpy(&irk->rpa, rpa);
2515
2516         return irk;
2517 }
2518
2519 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2520 {
2521         struct link_key *key;
2522
2523         key = hci_find_link_key(hdev, bdaddr);
2524         if (!key)
2525                 return -ENOENT;
2526
2527         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2528
2529         list_del_rcu(&key->list);
2530         kfree_rcu(key, rcu);
2531
2532         return 0;
2533 }
2534
2535 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2536 {
2537         struct smp_ltk *k;
2538         int removed = 0;
2539
2540         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2541                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2542                         continue;
2543
2544                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2545
2546                 list_del_rcu(&k->list);
2547                 kfree_rcu(k, rcu);
2548                 removed++;
2549         }
2550
2551         return removed ? 0 : -ENOENT;
2552 }
2553
2554 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2555 {
2556         struct smp_irk *k;
2557
2558         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2559                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2560                         continue;
2561
2562                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2563
2564                 list_del_rcu(&k->list);
2565                 kfree_rcu(k, rcu);
2566         }
2567 }
2568
2569 #ifdef CONFIG_TIZEN_WIP
2570 int hci_set_rpa_res_support(struct hci_dev *hdev, bdaddr_t *bdaddr,
2571                             u8 addr_type, u8 enabled)
2572 {
2573         struct smp_irk *irk;
2574
2575         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2576         if (!irk)
2577                 return -ENOENT;
2578
2579         irk->rpa_res_support = enabled;
2580
2581         return 0;
2582 }
2583
2584 /* Timeout Error Event is being handled */
2585 static void hci_tx_timeout_error_evt(struct hci_dev *hdev)
2586 {
2587         BT_ERR("%s H/W TX Timeout error", hdev->name);
2588
2589         mgmt_tx_timeout_error(hdev);
2590 }
2591 #endif
2592
2593 /* HCI command timer function */
2594 static void hci_cmd_timeout(struct work_struct *work)
2595 {
2596         struct hci_dev *hdev = container_of(work, struct hci_dev,
2597                                             cmd_timer.work);
2598
2599         if (hdev->sent_cmd) {
2600                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2601                 u16 opcode = __le16_to_cpu(sent->opcode);
2602
2603                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2604         } else {
2605                 BT_ERR("%s command tx timeout", hdev->name);
2606         }
2607
2608 #ifdef CONFIG_TIZEN_WIP
2609         hci_tx_timeout_error_evt(hdev);
2610 #endif
2611         atomic_set(&hdev->cmd_cnt, 1);
2612         queue_work(hdev->workqueue, &hdev->cmd_work);
2613 }
2614
2615 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2616                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2617 {
2618         struct oob_data *data;
2619
2620         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2621                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2622                         continue;
2623                 if (data->bdaddr_type != bdaddr_type)
2624                         continue;
2625                 return data;
2626         }
2627
2628         return NULL;
2629 }
2630
2631 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2632                                u8 bdaddr_type)
2633 {
2634         struct oob_data *data;
2635
2636         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2637         if (!data)
2638                 return -ENOENT;
2639
2640         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2641
2642         list_del(&data->list);
2643         kfree(data);
2644
2645         return 0;
2646 }
2647
2648 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2649 {
2650         struct oob_data *data, *n;
2651
2652         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2653                 list_del(&data->list);
2654                 kfree(data);
2655         }
2656 }
2657
2658 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2659                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2660                             u8 *hash256, u8 *rand256)
2661 {
2662         struct oob_data *data;
2663
2664         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2665         if (!data) {
2666                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2667                 if (!data)
2668                         return -ENOMEM;
2669
2670                 bacpy(&data->bdaddr, bdaddr);
2671                 data->bdaddr_type = bdaddr_type;
2672                 list_add(&data->list, &hdev->remote_oob_data);
2673         }
2674
2675         if (hash192 && rand192) {
2676                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2677                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2678         } else {
2679                 memset(data->hash192, 0, sizeof(data->hash192));
2680                 memset(data->rand192, 0, sizeof(data->rand192));
2681         }
2682
2683         if (hash256 && rand256) {
2684                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2685                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2686         } else {
2687                 memset(data->hash256, 0, sizeof(data->hash256));
2688                 memset(data->rand256, 0, sizeof(data->rand256));
2689         }
2690
2691         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2692
2693         return 0;
2694 }
2695
2696 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2697                                          bdaddr_t *bdaddr, u8 type)
2698 {
2699         struct bdaddr_list *b;
2700
2701         list_for_each_entry(b, bdaddr_list, list) {
2702                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2703                         return b;
2704         }
2705
2706         return NULL;
2707 }
2708
2709 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2710 {
2711         struct list_head *p, *n;
2712
2713         list_for_each_safe(p, n, bdaddr_list) {
2714                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2715
2716                 list_del(p);
2717                 kfree(b);
2718         }
2719 }
2720
2721 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2722 {
2723         struct bdaddr_list *entry;
2724
2725         if (!bacmp(bdaddr, BDADDR_ANY))
2726                 return -EBADF;
2727
2728         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2729                 return -EEXIST;
2730
2731         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2732         if (!entry)
2733                 return -ENOMEM;
2734
2735         bacpy(&entry->bdaddr, bdaddr);
2736         entry->bdaddr_type = type;
2737
2738         list_add(&entry->list, list);
2739
2740         return 0;
2741 }
2742
2743 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2744 {
2745         struct bdaddr_list *entry;
2746
2747         if (!bacmp(bdaddr, BDADDR_ANY)) {
2748                 hci_bdaddr_list_clear(list);
2749                 return 0;
2750         }
2751
2752         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2753         if (!entry)
2754                 return -ENOENT;
2755
2756         list_del(&entry->list);
2757         kfree(entry);
2758
2759         return 0;
2760 }
2761
2762 /* This function requires the caller holds hdev->lock */
2763 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2764                                                bdaddr_t *addr, u8 addr_type)
2765 {
2766         struct hci_conn_params *params;
2767
2768         /* The conn params list only contains identity addresses */
2769         if (!hci_is_identity_address(addr, addr_type))
2770                 return NULL;
2771
2772         list_for_each_entry(params, &hdev->le_conn_params, list) {
2773                 if (bacmp(&params->addr, addr) == 0 &&
2774                     params->addr_type == addr_type) {
2775                         return params;
2776                 }
2777         }
2778
2779         return NULL;
2780 }
2781
2782 /* This function requires the caller holds hdev->lock */
2783 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2784                                                   bdaddr_t *addr, u8 addr_type)
2785 {
2786         struct hci_conn_params *param;
2787
2788         /* The list only contains identity addresses */
2789         if (!hci_is_identity_address(addr, addr_type))
2790                 return NULL;
2791
2792         list_for_each_entry(param, list, action) {
2793                 if (bacmp(&param->addr, addr) == 0 &&
2794                     param->addr_type == addr_type)
2795                         return param;
2796         }
2797
2798         return NULL;
2799 }
2800
2801 /* This function requires the caller holds hdev->lock */
2802 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2803                                             bdaddr_t *addr, u8 addr_type)
2804 {
2805         struct hci_conn_params *params;
2806
2807         if (!hci_is_identity_address(addr, addr_type))
2808                 return NULL;
2809
2810         params = hci_conn_params_lookup(hdev, addr, addr_type);
2811         if (params)
2812                 return params;
2813
2814         params = kzalloc(sizeof(*params), GFP_KERNEL);
2815         if (!params) {
2816                 BT_ERR("Out of memory");
2817                 return NULL;
2818         }
2819
2820         bacpy(&params->addr, addr);
2821         params->addr_type = addr_type;
2822
2823         list_add(&params->list, &hdev->le_conn_params);
2824         INIT_LIST_HEAD(&params->action);
2825
2826         params->conn_min_interval = hdev->le_conn_min_interval;
2827         params->conn_max_interval = hdev->le_conn_max_interval;
2828         params->conn_latency = hdev->le_conn_latency;
2829         params->supervision_timeout = hdev->le_supv_timeout;
2830         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2831
2832         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2833
2834         return params;
2835 }
2836
2837 static void hci_conn_params_free(struct hci_conn_params *params)
2838 {
2839         if (params->conn) {
2840                 hci_conn_drop(params->conn);
2841                 hci_conn_put(params->conn);
2842         }
2843
2844         list_del(&params->action);
2845         list_del(&params->list);
2846         kfree(params);
2847 }
2848
2849 /* This function requires the caller holds hdev->lock */
2850 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2851 {
2852         struct hci_conn_params *params;
2853
2854         params = hci_conn_params_lookup(hdev, addr, addr_type);
2855         if (!params)
2856                 return;
2857
2858         hci_conn_params_free(params);
2859
2860         hci_update_background_scan(hdev);
2861
2862         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2863 }
2864
2865 /* This function requires the caller holds hdev->lock */
2866 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2867 {
2868         struct hci_conn_params *params, *tmp;
2869
2870         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2871                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2872                         continue;
2873                 list_del(&params->list);
2874                 kfree(params);
2875         }
2876
2877         BT_DBG("All LE disabled connection parameters were removed");
2878 }
2879
2880 /* This function requires the caller holds hdev->lock */
2881 void hci_conn_params_clear_all(struct hci_dev *hdev)
2882 {
2883         struct hci_conn_params *params, *tmp;
2884
2885         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2886                 hci_conn_params_free(params);
2887
2888         hci_update_background_scan(hdev);
2889
2890         BT_DBG("All LE connection parameters were removed");
2891 }
2892
2893 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2894 {
2895         if (status) {
2896                 BT_ERR("Failed to start inquiry: status %d", status);
2897
2898                 hci_dev_lock(hdev);
2899                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2900                 hci_dev_unlock(hdev);
2901                 return;
2902         }
2903 }
2904
2905 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2906                                           u16 opcode)
2907 {
2908         /* General inquiry access code (GIAC) */
2909         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2910         struct hci_request req;
2911         struct hci_cp_inquiry cp;
2912         int err;
2913
2914         if (status) {
2915                 BT_ERR("Failed to disable LE scanning: status %d", status);
2916                 return;
2917         }
2918
2919         switch (hdev->discovery.type) {
2920         case DISCOV_TYPE_LE:
2921                 hci_dev_lock(hdev);
2922 #ifdef CONFIG_TIZEN_WIP
2923                 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
2924 #else
2925                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2926 #endif
2927                 hci_dev_unlock(hdev);
2928                 break;
2929
2930         case DISCOV_TYPE_INTERLEAVED:
2931                 hci_req_init(&req, hdev);
2932
2933                 memset(&cp, 0, sizeof(cp));
2934                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2935                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2936                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2937
2938                 hci_dev_lock(hdev);
2939
2940                 hci_inquiry_cache_flush(hdev);
2941
2942                 err = hci_req_run(&req, inquiry_complete);
2943                 if (err) {
2944                         BT_ERR("Inquiry request failed: err %d", err);
2945 #ifdef CONFIG_TIZEN_WIP
2946                         hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
2947 #else
2948                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2949 #endif
2950                 }
2951
2952                 hci_dev_unlock(hdev);
2953                 break;
2954         }
2955 }
2956
2957 static void le_scan_disable_work(struct work_struct *work)
2958 {
2959         struct hci_dev *hdev = container_of(work, struct hci_dev,
2960                                             le_scan_disable.work);
2961         struct hci_request req;
2962         int err;
2963
2964         BT_DBG("%s", hdev->name);
2965
2966         hci_req_init(&req, hdev);
2967
2968         hci_req_add_le_scan_disable(&req);
2969
2970         err = hci_req_run(&req, le_scan_disable_work_complete);
2971         if (err)
2972                 BT_ERR("Disable LE scanning request failed: err %d", err);
2973 }
2974
2975 /* Copy the Identity Address of the controller.
2976  *
2977  * If the controller has a public BD_ADDR, then by default use that one.
2978  * If this is a LE only controller without a public address, default to
2979  * the static random address.
2980  *
2981  * For debugging purposes it is possible to force controllers with a
2982  * public address to use the static random address instead.
2983  *
2984  * In case BR/EDR has been disabled on a dual-mode controller and
2985  * userspace has configured a static address, then that address
2986  * becomes the identity address instead of the public BR/EDR address.
2987  */
2988 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2989                                u8 *bdaddr_type)
2990 {
2991         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
2992             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2993             (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2994              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2995                 bacpy(bdaddr, &hdev->static_addr);
2996                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2997         } else {
2998                 bacpy(bdaddr, &hdev->bdaddr);
2999                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3000         }
3001 }
3002
3003 /* Alloc HCI device */
3004 struct hci_dev *hci_alloc_dev(void)
3005 {
3006         struct hci_dev *hdev;
3007
3008         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3009         if (!hdev)
3010                 return NULL;
3011
3012         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3013         hdev->esco_type = (ESCO_HV1);
3014         hdev->link_mode = (HCI_LM_ACCEPT);
3015         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3016         hdev->io_capability = 0x03;     /* No Input No Output */
3017         hdev->manufacturer = 0xffff;    /* Default to internal use */
3018         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3019         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3020
3021         hdev->sniff_max_interval = 800;
3022         hdev->sniff_min_interval = 80;
3023
3024         hdev->le_adv_channel_map = 0x07;
3025         hdev->le_adv_min_interval = 0x0800;
3026         hdev->le_adv_max_interval = 0x0800;
3027
3028 #ifdef CONFIG_TIZEN_WIP
3029         hdev->sniff_max_interval = 800;
3030         hdev->sniff_min_interval = 400;
3031
3032         /* automatically enable sniff mode for connection */
3033         hdev->idle_timeout = TIZEN_SNIFF_TIMEOUT * 1000; /* 2 Second */
3034
3035         hdev->adv_filter_policy = 0x00;
3036         hdev->adv_type = 0x00;
3037 #endif
3038         hdev->le_scan_type = LE_SCAN_PASSIVE;
3039         hdev->le_scan_interval = 0x0060;
3040         hdev->le_scan_window = 0x0030;
3041         hdev->le_conn_min_interval = 0x0028;
3042         hdev->le_conn_max_interval = 0x0038;
3043         hdev->le_conn_latency = 0x0000;
3044 #ifdef CONFIG_TIZEN_WIP
3045         hdev->le_supv_timeout = 0x0258;         /* 6000 msec */
3046 #else
3047         hdev->le_supv_timeout = 0x002a;         /* 420 msec */
3048 #endif
3049         hdev->le_def_tx_len = 0x001b;
3050         hdev->le_def_tx_time = 0x0148;
3051         hdev->le_max_tx_len = 0x001b;
3052         hdev->le_max_tx_time = 0x0148;
3053         hdev->le_max_rx_len = 0x001b;
3054         hdev->le_max_rx_time = 0x0148;
3055
3056         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3057         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3058         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3059         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3060
3061         mutex_init(&hdev->lock);
3062         mutex_init(&hdev->req_lock);
3063
3064         INIT_LIST_HEAD(&hdev->mgmt_pending);
3065         INIT_LIST_HEAD(&hdev->blacklist);
3066         INIT_LIST_HEAD(&hdev->whitelist);
3067         INIT_LIST_HEAD(&hdev->uuids);
3068         INIT_LIST_HEAD(&hdev->link_keys);
3069         INIT_LIST_HEAD(&hdev->long_term_keys);
3070         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3071         INIT_LIST_HEAD(&hdev->remote_oob_data);
3072         INIT_LIST_HEAD(&hdev->le_white_list);
3073         INIT_LIST_HEAD(&hdev->le_conn_params);
3074         INIT_LIST_HEAD(&hdev->pend_le_conns);
3075         INIT_LIST_HEAD(&hdev->pend_le_reports);
3076         INIT_LIST_HEAD(&hdev->conn_hash.list);
3077
3078         INIT_WORK(&hdev->rx_work, hci_rx_work);
3079         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3080         INIT_WORK(&hdev->tx_work, hci_tx_work);
3081         INIT_WORK(&hdev->power_on, hci_power_on);
3082
3083         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3084         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3085         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3086
3087         skb_queue_head_init(&hdev->rx_q);
3088         skb_queue_head_init(&hdev->cmd_q);
3089         skb_queue_head_init(&hdev->raw_q);
3090
3091         init_waitqueue_head(&hdev->req_wait_q);
3092
3093         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3094
3095         hci_init_sysfs(hdev);
3096         discovery_init(hdev);
3097
3098         return hdev;
3099 }
3100 EXPORT_SYMBOL(hci_alloc_dev);
3101
3102 /* Free HCI device */
3103 void hci_free_dev(struct hci_dev *hdev)
3104 {
3105         /* will free via device release */
3106         put_device(&hdev->dev);
3107 }
3108 EXPORT_SYMBOL(hci_free_dev);
3109
3110 #ifdef CONFIG_SLEEP_MONITOR
3111 struct _sleep_pkt {
3112         unsigned char common;
3113         unsigned char device[3];
3114 };
3115
3116 int bt_get_sleep_monitor_cb(void *priv, unsigned int *raw_val,
3117                 int check_level, int caller_type)
3118 {
3119         int state = 0;
3120         struct hci_dev *hdev = (struct hci_dev*)priv;
3121
3122         struct _sleep_pkt sleep_pkt = {0, };
3123         struct hci_conn *conn = NULL;
3124         int idx = 0;
3125
3126         if (test_bit(HCI_SC_ONLY, &hdev->dev_flags))
3127                 sleep_pkt.common |= 0x01;
3128
3129         if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
3130                 sleep_pkt.common |= 0x02;
3131
3132         if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3133                 sleep_pkt.common |= 0x04;
3134
3135         if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3136                 sleep_pkt.common |= 0x08;
3137
3138         if (test_bit(HCI_ISCAN, &hdev->flags))
3139                 sleep_pkt.common |= 0x10;
3140
3141         if (test_bit(HCI_PSCAN, &hdev->flags))
3142                 sleep_pkt.common |= 0x20;
3143
3144         hci_dev_lock(hdev);
3145         list_for_each_entry(conn, &hdev->conn_hash.list, list) {
3146                 if (idx > 2) {
3147                         hci_dev_unlock(hdev);
3148                         goto max_device;
3149                 }
3150
3151                 sleep_pkt.device[idx] |= (0x7f & conn->handle);
3152                 if (conn->mode != HCI_CM_SNIFF)
3153                         sleep_pkt.device[idx] |= 0x80;
3154
3155                 idx++;
3156         }
3157         hci_dev_unlock(hdev);
3158
3159 max_device:
3160         memcpy(raw_val, &sleep_pkt, sizeof(unsigned int));
3161
3162         if (test_bit(HCI_INQUIRY, &hdev->flags))
3163                 state |= 0x01;
3164
3165         if (test_bit(HCI_ISCAN, &hdev->flags) ||
3166                         test_bit(HCI_PSCAN, &hdev->flags))
3167                 state |= 0x02;
3168
3169         switch (idx) {
3170                 case 1:
3171                         state |= 0x04;
3172                         break;
3173                 case 2:
3174                         state |= 0x08;
3175                         break;
3176                 case 3:
3177                         state |= 0x0C;
3178                         break;
3179         }
3180
3181         BT_DBG("%s: check_level[%d], state[%d], raw[%X]\n", __func__,
3182                         check_level, state, *raw_val);
3183
3184         return state;
3185 }
3186
3187 static struct sleep_monitor_ops bt_sleep_monitor_ops = {
3188         .read_cb_func = bt_get_sleep_monitor_cb,
3189 };
3190 #endif
3191
3192 /* Register HCI device */
3193 int hci_register_dev(struct hci_dev *hdev)
3194 {
3195         int id, error;
3196
3197         if (!hdev->open || !hdev->close || !hdev->send)
3198                 return -EINVAL;
3199
3200         /* Do not allow HCI_AMP devices to register at index 0,
3201          * so the index can be used as the AMP controller ID.
3202          */
3203         switch (hdev->dev_type) {
3204         case HCI_BREDR:
3205                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3206                 break;
3207         case HCI_AMP:
3208                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3209                 break;
3210         default:
3211                 return -EINVAL;
3212         }
3213
3214         if (id < 0)
3215                 return id;
3216
3217         sprintf(hdev->name, "hci%d", id);
3218         hdev->id = id;
3219
3220         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3221
3222         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3223                                           WQ_MEM_RECLAIM, 1, hdev->name);
3224         if (!hdev->workqueue) {
3225                 error = -ENOMEM;
3226                 goto err;
3227         }
3228
3229         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3230                                               WQ_MEM_RECLAIM, 1, hdev->name);
3231         if (!hdev->req_workqueue) {
3232                 destroy_workqueue(hdev->workqueue);
3233                 error = -ENOMEM;
3234                 goto err;
3235         }
3236
3237         if (!IS_ERR_OR_NULL(bt_debugfs))
3238                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3239
3240         dev_set_name(&hdev->dev, "%s", hdev->name);
3241
3242         error = device_add(&hdev->dev);
3243         if (error < 0)
3244                 goto err_wqueue;
3245
3246         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3247                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3248                                     hdev);
3249         if (hdev->rfkill) {
3250                 if (rfkill_register(hdev->rfkill) < 0) {
3251                         rfkill_destroy(hdev->rfkill);
3252                         hdev->rfkill = NULL;
3253                 }
3254         }
3255
3256         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3257                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3258
3259         set_bit(HCI_SETUP, &hdev->dev_flags);
3260         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3261
3262         if (hdev->dev_type == HCI_BREDR) {
3263                 /* Assume BR/EDR support until proven otherwise (such as
3264                  * through reading supported features during init.
3265                  */
3266                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3267         }
3268
3269         write_lock(&hci_dev_list_lock);
3270         list_add(&hdev->list, &hci_dev_list);
3271         write_unlock(&hci_dev_list_lock);
3272
3273         /* Devices that are marked for raw-only usage are unconfigured
3274          * and should not be included in normal operation.
3275          */
3276         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3277                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3278
3279         hci_notify(hdev, HCI_DEV_REG);
3280         hci_dev_hold(hdev);
3281
3282         queue_work(hdev->req_workqueue, &hdev->power_on);
3283
3284 #ifdef CONFIG_SLEEP_MONITOR
3285         sleep_monitor_register_ops(hdev, &bt_sleep_monitor_ops,
3286                         SLEEP_MONITOR_BT);
3287 #endif
3288         return id;
3289
3290 err_wqueue:
3291         destroy_workqueue(hdev->workqueue);
3292         destroy_workqueue(hdev->req_workqueue);
3293 err:
3294         ida_simple_remove(&hci_index_ida, hdev->id);
3295
3296         return error;
3297 }
3298 EXPORT_SYMBOL(hci_register_dev);
3299
3300 /* Unregister HCI device */
3301 void hci_unregister_dev(struct hci_dev *hdev)
3302 {
3303         int i, id;
3304
3305         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3306
3307         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3308
3309         id = hdev->id;
3310
3311         write_lock(&hci_dev_list_lock);
3312         list_del(&hdev->list);
3313         write_unlock(&hci_dev_list_lock);
3314
3315         hci_dev_do_close(hdev);
3316
3317         for (i = 0; i < NUM_REASSEMBLY; i++)
3318                 kfree_skb(hdev->reassembly[i]);
3319
3320         cancel_work_sync(&hdev->power_on);
3321
3322         if (!test_bit(HCI_INIT, &hdev->flags) &&
3323             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3324             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
3325                 hci_dev_lock(hdev);
3326                 mgmt_index_removed(hdev);
3327                 hci_dev_unlock(hdev);
3328         }
3329
3330         /* mgmt_index_removed should take care of emptying the
3331          * pending list */
3332         BUG_ON(!list_empty(&hdev->mgmt_pending));
3333
3334         hci_notify(hdev, HCI_DEV_UNREG);
3335
3336         if (hdev->rfkill) {
3337                 rfkill_unregister(hdev->rfkill);
3338                 rfkill_destroy(hdev->rfkill);
3339         }
3340
3341         smp_unregister(hdev);
3342
3343         device_del(&hdev->dev);
3344
3345         debugfs_remove_recursive(hdev->debugfs);
3346
3347         destroy_workqueue(hdev->workqueue);
3348         destroy_workqueue(hdev->req_workqueue);
3349
3350         hci_dev_lock(hdev);
3351         hci_bdaddr_list_clear(&hdev->blacklist);
3352         hci_bdaddr_list_clear(&hdev->whitelist);
3353         hci_uuids_clear(hdev);
3354         hci_link_keys_clear(hdev);
3355         hci_smp_ltks_clear(hdev);
3356         hci_smp_irks_clear(hdev);
3357         hci_remote_oob_data_clear(hdev);
3358         hci_bdaddr_list_clear(&hdev->le_white_list);
3359         hci_conn_params_clear_all(hdev);
3360         hci_discovery_filter_clear(hdev);
3361         hci_dev_unlock(hdev);
3362
3363         hci_dev_put(hdev);
3364
3365         ida_simple_remove(&hci_index_ida, id);
3366
3367 #ifdef CONFIG_SLEEP_MONITOR
3368         sleep_monitor_unregister_ops(SLEEP_MONITOR_BT);
3369 #endif
3370 }
3371 EXPORT_SYMBOL(hci_unregister_dev);
3372
3373 /* Suspend HCI device */
3374 int hci_suspend_dev(struct hci_dev *hdev)
3375 {
3376         hci_notify(hdev, HCI_DEV_SUSPEND);
3377         return 0;
3378 }
3379 EXPORT_SYMBOL(hci_suspend_dev);
3380
3381 /* Resume HCI device */
3382 int hci_resume_dev(struct hci_dev *hdev)
3383 {
3384         hci_notify(hdev, HCI_DEV_RESUME);
3385         return 0;
3386 }
3387 EXPORT_SYMBOL(hci_resume_dev);
3388
3389 /* Reset HCI device */
3390 int hci_reset_dev(struct hci_dev *hdev)
3391 {
3392         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3393         struct sk_buff *skb;
3394
3395         skb = bt_skb_alloc(3, GFP_ATOMIC);
3396         if (!skb)
3397                 return -ENOMEM;
3398
3399         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3400         memcpy(skb_put(skb, 3), hw_err, 3);
3401
3402         /* Send Hardware Error to upper stack */
3403         return hci_recv_frame(hdev, skb);
3404 }
3405 EXPORT_SYMBOL(hci_reset_dev);
3406
3407 /* Receive frame from HCI drivers */
3408 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3409 {
3410         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3411                       && !test_bit(HCI_INIT, &hdev->flags))) {
3412                 kfree_skb(skb);
3413                 return -ENXIO;
3414         }
3415
3416         /* Incoming skb */
3417         bt_cb(skb)->incoming = 1;
3418
3419         /* Time stamp */
3420         __net_timestamp(skb);
3421
3422         skb_queue_tail(&hdev->rx_q, skb);
3423         queue_work(hdev->workqueue, &hdev->rx_work);
3424
3425         return 0;
3426 }
3427 EXPORT_SYMBOL(hci_recv_frame);
3428
3429 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3430                           int count, __u8 index)
3431 {
3432         int len = 0;
3433         int hlen = 0;
3434         int remain = count;
3435         struct sk_buff *skb;
3436         struct bt_skb_cb *scb;
3437
3438         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3439             index >= NUM_REASSEMBLY)
3440                 return -EILSEQ;
3441
3442         skb = hdev->reassembly[index];
3443
3444         if (!skb) {
3445                 switch (type) {
3446                 case HCI_ACLDATA_PKT:
3447                         len = HCI_MAX_FRAME_SIZE;
3448                         hlen = HCI_ACL_HDR_SIZE;
3449                         break;
3450                 case HCI_EVENT_PKT:
3451                         len = HCI_MAX_EVENT_SIZE;
3452                         hlen = HCI_EVENT_HDR_SIZE;
3453                         break;
3454                 case HCI_SCODATA_PKT:
3455                         len = HCI_MAX_SCO_SIZE;
3456                         hlen = HCI_SCO_HDR_SIZE;
3457                         break;
3458                 }
3459
3460                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3461                 if (!skb)
3462                         return -ENOMEM;
3463
3464                 scb = (void *) skb->cb;
3465                 scb->expect = hlen;
3466                 scb->pkt_type = type;
3467
3468                 hdev->reassembly[index] = skb;
3469         }
3470
3471         while (count) {
3472                 scb = (void *) skb->cb;
3473                 len = min_t(uint, scb->expect, count);
3474
3475                 memcpy(skb_put(skb, len), data, len);
3476
3477                 count -= len;
3478                 data += len;
3479                 scb->expect -= len;
3480                 remain = count;
3481
3482                 switch (type) {
3483                 case HCI_EVENT_PKT:
3484                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3485                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3486                                 scb->expect = h->plen;
3487
3488                                 if (skb_tailroom(skb) < scb->expect) {
3489                                         kfree_skb(skb);
3490                                         hdev->reassembly[index] = NULL;
3491                                         return -ENOMEM;
3492                                 }
3493                         }
3494                         break;
3495
3496                 case HCI_ACLDATA_PKT:
3497                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3498                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3499                                 scb->expect = __le16_to_cpu(h->dlen);
3500
3501                                 if (skb_tailroom(skb) < scb->expect) {
3502                                         kfree_skb(skb);
3503                                         hdev->reassembly[index] = NULL;
3504                                         return -ENOMEM;
3505                                 }
3506                         }
3507                         break;
3508
3509                 case HCI_SCODATA_PKT:
3510                         if (skb->len == HCI_SCO_HDR_SIZE) {
3511                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3512                                 scb->expect = h->dlen;
3513
3514                                 if (skb_tailroom(skb) < scb->expect) {
3515                                         kfree_skb(skb);
3516                                         hdev->reassembly[index] = NULL;
3517                                         return -ENOMEM;
3518                                 }
3519                         }
3520                         break;
3521                 }
3522
3523                 if (scb->expect == 0) {
3524                         /* Complete frame */
3525
3526                         bt_cb(skb)->pkt_type = type;
3527                         hci_recv_frame(hdev, skb);
3528
3529                         hdev->reassembly[index] = NULL;
3530                         return remain;
3531                 }
3532         }
3533
3534         return remain;
3535 }
3536
3537 #define STREAM_REASSEMBLY 0
3538
3539 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3540 {
3541         int type;
3542         int rem = 0;
3543
3544         while (count) {
3545                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3546
3547                 if (!skb) {
3548                         struct { char type; } *pkt;
3549
3550                         /* Start of the frame */
3551                         pkt = data;
3552                         type = pkt->type;
3553
3554                         data++;
3555                         count--;
3556                 } else
3557                         type = bt_cb(skb)->pkt_type;
3558
3559                 rem = hci_reassembly(hdev, type, data, count,
3560                                      STREAM_REASSEMBLY);
3561                 if (rem < 0)
3562                         return rem;
3563
3564                 data += (count - rem);
3565                 count = rem;
3566         }
3567
3568         return rem;
3569 }
3570 EXPORT_SYMBOL(hci_recv_stream_fragment);
3571
3572 /* ---- Interface to upper protocols ---- */
3573
3574 int hci_register_cb(struct hci_cb *cb)
3575 {
3576         BT_DBG("%p name %s", cb, cb->name);
3577
3578         write_lock(&hci_cb_list_lock);
3579         list_add(&cb->list, &hci_cb_list);
3580         write_unlock(&hci_cb_list_lock);
3581
3582         return 0;
3583 }
3584 EXPORT_SYMBOL(hci_register_cb);
3585
3586 int hci_unregister_cb(struct hci_cb *cb)
3587 {
3588         BT_DBG("%p name %s", cb, cb->name);
3589
3590         write_lock(&hci_cb_list_lock);
3591         list_del(&cb->list);
3592         write_unlock(&hci_cb_list_lock);
3593
3594         return 0;
3595 }
3596 EXPORT_SYMBOL(hci_unregister_cb);
3597
3598 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3599 {
3600         int err;
3601
3602         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3603
3604         /* Time stamp */
3605         __net_timestamp(skb);
3606
3607         /* Send copy to monitor */
3608         hci_send_to_monitor(hdev, skb);
3609
3610         if (atomic_read(&hdev->promisc)) {
3611                 /* Send copy to the sockets */
3612                 hci_send_to_sock(hdev, skb);
3613         }
3614
3615         /* Get rid of skb owner, prior to sending to the driver. */
3616         skb_orphan(skb);
3617
3618 #ifdef CONFIG_TIZEN_WIP
3619         hci_notify(hdev, HCI_DEV_WRITE);
3620 #endif
3621
3622         err = hdev->send(hdev, skb);
3623         if (err < 0) {
3624                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3625                 kfree_skb(skb);
3626         }
3627 }
3628
3629 bool hci_req_pending(struct hci_dev *hdev)
3630 {
3631         return (hdev->req_status == HCI_REQ_PEND);
3632 }
3633
3634 /* Send HCI command */
3635 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3636                  const void *param)
3637 {
3638         struct sk_buff *skb;
3639
3640         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3641
3642         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3643         if (!skb) {
3644                 BT_ERR("%s no memory for command", hdev->name);
3645                 return -ENOMEM;
3646         }
3647
3648         /* Stand-alone HCI commands must be flagged as
3649          * single-command requests.
3650          */
3651         bt_cb(skb)->req.start = true;
3652
3653         skb_queue_tail(&hdev->cmd_q, skb);
3654         queue_work(hdev->workqueue, &hdev->cmd_work);
3655
3656         return 0;
3657 }
3658
3659 /* Get data from the previously sent command */
3660 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3661 {
3662         struct hci_command_hdr *hdr;
3663
3664         if (!hdev->sent_cmd)
3665                 return NULL;
3666
3667         hdr = (void *) hdev->sent_cmd->data;
3668
3669         if (hdr->opcode != cpu_to_le16(opcode))
3670                 return NULL;
3671
3672         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3673
3674         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3675 }
3676
3677 /* Send ACL data */
3678 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3679 {
3680         struct hci_acl_hdr *hdr;
3681         int len = skb->len;
3682
3683         skb_push(skb, HCI_ACL_HDR_SIZE);
3684         skb_reset_transport_header(skb);
3685         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3686         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3687         hdr->dlen   = cpu_to_le16(len);
3688 }
3689
3690 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3691                           struct sk_buff *skb, __u16 flags)
3692 {
3693         struct hci_conn *conn = chan->conn;
3694         struct hci_dev *hdev = conn->hdev;
3695         struct sk_buff *list;
3696
3697         skb->len = skb_headlen(skb);
3698         skb->data_len = 0;
3699
3700         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3701
3702         switch (hdev->dev_type) {
3703         case HCI_BREDR:
3704                 hci_add_acl_hdr(skb, conn->handle, flags);
3705                 break;
3706         case HCI_AMP:
3707                 hci_add_acl_hdr(skb, chan->handle, flags);
3708                 break;
3709         default:
3710                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3711                 return;
3712         }
3713
3714         list = skb_shinfo(skb)->frag_list;
3715         if (!list) {
3716                 /* Non fragmented */
3717                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3718
3719                 skb_queue_tail(queue, skb);
3720         } else {
3721                 /* Fragmented */
3722                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3723
3724                 skb_shinfo(skb)->frag_list = NULL;
3725
3726                 /* Queue all fragments atomically. We need to use spin_lock_bh
3727                  * here because of 6LoWPAN links, as there this function is
3728                  * called from softirq and using normal spin lock could cause
3729                  * deadlocks.
3730                  */
3731                 spin_lock_bh(&queue->lock);
3732
3733                 __skb_queue_tail(queue, skb);
3734
3735                 flags &= ~ACL_START;
3736                 flags |= ACL_CONT;
3737                 do {
3738                         skb = list; list = list->next;
3739
3740                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3741                         hci_add_acl_hdr(skb, conn->handle, flags);
3742
3743                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3744
3745                         __skb_queue_tail(queue, skb);
3746                 } while (list);
3747
3748                 spin_unlock_bh(&queue->lock);
3749         }
3750 }
3751
3752 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3753 {
3754         struct hci_dev *hdev = chan->conn->hdev;
3755
3756         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3757
3758         hci_queue_acl(chan, &chan->data_q, skb, flags);
3759
3760         queue_work(hdev->workqueue, &hdev->tx_work);
3761 }
3762
3763 /* Send SCO data */
3764 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3765 {
3766         struct hci_dev *hdev = conn->hdev;
3767         struct hci_sco_hdr hdr;
3768
3769         BT_DBG("%s len %d", hdev->name, skb->len);
3770
3771         hdr.handle = cpu_to_le16(conn->handle);
3772         hdr.dlen   = skb->len;
3773
3774         skb_push(skb, HCI_SCO_HDR_SIZE);
3775         skb_reset_transport_header(skb);
3776         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3777
3778         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3779
3780         skb_queue_tail(&conn->data_q, skb);
3781         queue_work(hdev->workqueue, &hdev->tx_work);
3782 }
3783
3784 /* ---- HCI TX task (outgoing data) ---- */
3785
3786 /* HCI Connection scheduler */
3787 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3788                                      int *quote)
3789 {
3790         struct hci_conn_hash *h = &hdev->conn_hash;
3791         struct hci_conn *conn = NULL, *c;
3792         unsigned int num = 0, min = ~0;
3793
3794         /* We don't have to lock device here. Connections are always
3795          * added and removed with TX task disabled. */
3796
3797         rcu_read_lock();
3798
3799         list_for_each_entry_rcu(c, &h->list, list) {
3800                 if (c->type != type || skb_queue_empty(&c->data_q))
3801                         continue;
3802
3803                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3804                         continue;
3805
3806                 num++;
3807
3808                 if (c->sent < min) {
3809                         min  = c->sent;
3810                         conn = c;
3811                 }
3812
3813                 if (hci_conn_num(hdev, type) == num)
3814                         break;
3815         }
3816
3817         rcu_read_unlock();
3818
3819         if (conn) {
3820                 int cnt, q;
3821
3822                 switch (conn->type) {
3823                 case ACL_LINK:
3824                         cnt = hdev->acl_cnt;
3825                         break;
3826                 case SCO_LINK:
3827                 case ESCO_LINK:
3828                         cnt = hdev->sco_cnt;
3829                         break;
3830                 case LE_LINK:
3831                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3832                         break;
3833                 default:
3834                         cnt = 0;
3835                         BT_ERR("Unknown link type");
3836                 }
3837
3838                 q = cnt / num;
3839                 *quote = q ? q : 1;
3840         } else
3841                 *quote = 0;
3842
3843         BT_DBG("conn %p quote %d", conn, *quote);
3844         return conn;
3845 }
3846
3847 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3848 {
3849         struct hci_conn_hash *h = &hdev->conn_hash;
3850         struct hci_conn *c;
3851
3852         BT_ERR("%s link tx timeout", hdev->name);
3853
3854         rcu_read_lock();
3855
3856         /* Kill stalled connections */
3857         list_for_each_entry_rcu(c, &h->list, list) {
3858                 if (c->type == type && c->sent) {
3859                         BT_ERR("%s killing stalled connection %pMR",
3860                                hdev->name, &c->dst);
3861                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3862                 }
3863         }
3864
3865         rcu_read_unlock();
3866 }
3867
3868 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3869                                       int *quote)
3870 {
3871         struct hci_conn_hash *h = &hdev->conn_hash;
3872         struct hci_chan *chan = NULL;
3873         unsigned int num = 0, min = ~0, cur_prio = 0;
3874         struct hci_conn *conn;
3875         int cnt, q, conn_num = 0;
3876
3877         BT_DBG("%s", hdev->name);
3878
3879         rcu_read_lock();
3880
3881         list_for_each_entry_rcu(conn, &h->list, list) {
3882                 struct hci_chan *tmp;
3883
3884                 if (conn->type != type)
3885                         continue;
3886
3887                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3888                         continue;
3889
3890                 conn_num++;
3891
3892                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3893                         struct sk_buff *skb;
3894
3895                         if (skb_queue_empty(&tmp->data_q))
3896                                 continue;
3897
3898                         skb = skb_peek(&tmp->data_q);
3899                         if (skb->priority < cur_prio)
3900                                 continue;
3901
3902                         if (skb->priority > cur_prio) {
3903                                 num = 0;
3904                                 min = ~0;
3905                                 cur_prio = skb->priority;
3906                         }
3907
3908                         num++;
3909
3910                         if (conn->sent < min) {
3911                                 min  = conn->sent;
3912                                 chan = tmp;
3913                         }
3914                 }
3915
3916                 if (hci_conn_num(hdev, type) == conn_num)
3917                         break;
3918         }
3919
3920         rcu_read_unlock();
3921
3922         if (!chan)
3923                 return NULL;
3924
3925         switch (chan->conn->type) {
3926         case ACL_LINK:
3927                 cnt = hdev->acl_cnt;
3928                 break;
3929         case AMP_LINK:
3930                 cnt = hdev->block_cnt;
3931                 break;
3932         case SCO_LINK:
3933         case ESCO_LINK:
3934                 cnt = hdev->sco_cnt;
3935                 break;
3936         case LE_LINK:
3937                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3938                 break;
3939         default:
3940                 cnt = 0;
3941                 BT_ERR("Unknown link type");
3942         }
3943
3944         q = cnt / num;
3945         *quote = q ? q : 1;
3946         BT_DBG("chan %p quote %d", chan, *quote);
3947         return chan;
3948 }
3949
3950 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3951 {
3952         struct hci_conn_hash *h = &hdev->conn_hash;
3953         struct hci_conn *conn;
3954         int num = 0;
3955
3956         BT_DBG("%s", hdev->name);
3957
3958         rcu_read_lock();
3959
3960         list_for_each_entry_rcu(conn, &h->list, list) {
3961                 struct hci_chan *chan;
3962
3963                 if (conn->type != type)
3964                         continue;
3965
3966                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3967                         continue;
3968
3969                 num++;
3970
3971                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3972                         struct sk_buff *skb;
3973
3974                         if (chan->sent) {
3975                                 chan->sent = 0;
3976                                 continue;
3977                         }
3978
3979                         if (skb_queue_empty(&chan->data_q))
3980                                 continue;
3981
3982                         skb = skb_peek(&chan->data_q);
3983                         if (skb->priority >= HCI_PRIO_MAX - 1)
3984                                 continue;
3985
3986                         skb->priority = HCI_PRIO_MAX - 1;
3987
3988                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3989                                skb->priority);
3990                 }
3991
3992                 if (hci_conn_num(hdev, type) == num)
3993                         break;
3994         }
3995
3996         rcu_read_unlock();
3997
3998 }
3999
4000 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4001 {
4002         /* Calculate count of blocks used by this packet */
4003         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4004 }
4005
4006 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4007 {
4008         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4009                 /* ACL tx timeout must be longer than maximum
4010                  * link supervision timeout (40.9 seconds) */
4011                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4012                                        HCI_ACL_TX_TIMEOUT))
4013                         hci_link_tx_to(hdev, ACL_LINK);
4014         }
4015 }
4016
4017 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4018 {
4019         unsigned int cnt = hdev->acl_cnt;
4020         struct hci_chan *chan;
4021         struct sk_buff *skb;
4022         int quote;
4023
4024         __check_timeout(hdev, cnt);
4025
4026         while (hdev->acl_cnt &&
4027                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4028                 u32 priority = (skb_peek(&chan->data_q))->priority;
4029                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4030                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4031                                skb->len, skb->priority);
4032
4033                         /* Stop if priority has changed */
4034                         if (skb->priority < priority)
4035                                 break;
4036
4037                         skb = skb_dequeue(&chan->data_q);
4038
4039                         hci_conn_enter_active_mode(chan->conn,
4040                                                    bt_cb(skb)->force_active);
4041
4042                         hci_send_frame(hdev, skb);
4043                         hdev->acl_last_tx = jiffies;
4044
4045                         hdev->acl_cnt--;
4046                         chan->sent++;
4047                         chan->conn->sent++;
4048                 }
4049         }
4050
4051         if (cnt != hdev->acl_cnt)
4052                 hci_prio_recalculate(hdev, ACL_LINK);
4053 }
4054
4055 static void hci_sched_acl_blk(struct hci_dev *hdev)
4056 {
4057         unsigned int cnt = hdev->block_cnt;
4058         struct hci_chan *chan;
4059         struct sk_buff *skb;
4060         int quote;
4061         u8 type;
4062
4063         __check_timeout(hdev, cnt);
4064
4065         BT_DBG("%s", hdev->name);
4066
4067         if (hdev->dev_type == HCI_AMP)
4068                 type = AMP_LINK;
4069         else
4070                 type = ACL_LINK;
4071
4072         while (hdev->block_cnt > 0 &&
4073                (chan = hci_chan_sent(hdev, type, &quote))) {
4074                 u32 priority = (skb_peek(&chan->data_q))->priority;
4075                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4076                         int blocks;
4077
4078                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4079                                skb->len, skb->priority);
4080
4081                         /* Stop if priority has changed */
4082                         if (skb->priority < priority)
4083                                 break;
4084
4085                         skb = skb_dequeue(&chan->data_q);
4086
4087                         blocks = __get_blocks(hdev, skb);
4088                         if (blocks > hdev->block_cnt)
4089                                 return;
4090
4091                         hci_conn_enter_active_mode(chan->conn,
4092                                                    bt_cb(skb)->force_active);
4093
4094                         hci_send_frame(hdev, skb);
4095                         hdev->acl_last_tx = jiffies;
4096
4097                         hdev->block_cnt -= blocks;
4098                         quote -= blocks;
4099
4100                         chan->sent += blocks;
4101                         chan->conn->sent += blocks;
4102                 }
4103         }
4104
4105         if (cnt != hdev->block_cnt)
4106                 hci_prio_recalculate(hdev, type);
4107 }
4108
4109 static void hci_sched_acl(struct hci_dev *hdev)
4110 {
4111         BT_DBG("%s", hdev->name);
4112
4113         /* No ACL link over BR/EDR controller */
4114         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4115                 return;
4116
4117         /* No AMP link over AMP controller */
4118         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4119                 return;
4120
4121         switch (hdev->flow_ctl_mode) {
4122         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4123                 hci_sched_acl_pkt(hdev);
4124                 break;
4125
4126         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4127                 hci_sched_acl_blk(hdev);
4128                 break;
4129         }
4130 }
4131
4132 /* Schedule SCO */
4133 static void hci_sched_sco(struct hci_dev *hdev)
4134 {
4135         struct hci_conn *conn;
4136         struct sk_buff *skb;
4137         int quote;
4138
4139         BT_DBG("%s", hdev->name);
4140
4141         if (!hci_conn_num(hdev, SCO_LINK))
4142                 return;
4143
4144         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4145                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4146                         BT_DBG("skb %p len %d", skb, skb->len);
4147                         hci_send_frame(hdev, skb);
4148
4149                         conn->sent++;
4150                         if (conn->sent == ~0)
4151                                 conn->sent = 0;
4152                 }
4153         }
4154 }
4155
4156 static void hci_sched_esco(struct hci_dev *hdev)
4157 {
4158         struct hci_conn *conn;
4159         struct sk_buff *skb;
4160         int quote;
4161
4162         BT_DBG("%s", hdev->name);
4163
4164         if (!hci_conn_num(hdev, ESCO_LINK))
4165                 return;
4166
4167         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4168                                                      &quote))) {
4169                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4170                         BT_DBG("skb %p len %d", skb, skb->len);
4171                         hci_send_frame(hdev, skb);
4172
4173                         conn->sent++;
4174                         if (conn->sent == ~0)
4175                                 conn->sent = 0;
4176                 }
4177         }
4178 }
4179
4180 static void hci_sched_le(struct hci_dev *hdev)
4181 {
4182         struct hci_chan *chan;
4183         struct sk_buff *skb;
4184         int quote, cnt, tmp;
4185
4186         BT_DBG("%s", hdev->name);
4187
4188         if (!hci_conn_num(hdev, LE_LINK))
4189                 return;
4190
4191         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4192                 /* LE tx timeout must be longer than maximum
4193                  * link supervision timeout (40.9 seconds) */
4194                 if (!hdev->le_cnt && hdev->le_pkts &&
4195                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4196                         hci_link_tx_to(hdev, LE_LINK);
4197         }
4198
4199         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4200         tmp = cnt;
4201         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4202                 u32 priority = (skb_peek(&chan->data_q))->priority;
4203                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4204                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4205                                skb->len, skb->priority);
4206
4207                         /* Stop if priority has changed */
4208                         if (skb->priority < priority)
4209                                 break;
4210
4211                         skb = skb_dequeue(&chan->data_q);
4212
4213                         hci_send_frame(hdev, skb);
4214                         hdev->le_last_tx = jiffies;
4215
4216                         cnt--;
4217                         chan->sent++;
4218                         chan->conn->sent++;
4219                 }
4220         }
4221
4222         if (hdev->le_pkts)
4223                 hdev->le_cnt = cnt;
4224         else
4225                 hdev->acl_cnt = cnt;
4226
4227         if (cnt != tmp)
4228                 hci_prio_recalculate(hdev, LE_LINK);
4229 }
4230
4231 static void hci_tx_work(struct work_struct *work)
4232 {
4233         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4234         struct sk_buff *skb;
4235
4236         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4237                hdev->sco_cnt, hdev->le_cnt);
4238
4239         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4240                 /* Schedule queues and send stuff to HCI driver */
4241                 hci_sched_acl(hdev);
4242                 hci_sched_sco(hdev);
4243                 hci_sched_esco(hdev);
4244                 hci_sched_le(hdev);
4245         }
4246
4247         /* Send next queued raw (unknown type) packet */
4248         while ((skb = skb_dequeue(&hdev->raw_q)))
4249                 hci_send_frame(hdev, skb);
4250 }
4251
4252 /* ----- HCI RX task (incoming data processing) ----- */
4253
4254 /* ACL data packet */
4255 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4256 {
4257         struct hci_acl_hdr *hdr = (void *) skb->data;
4258         struct hci_conn *conn;
4259         __u16 handle, flags;
4260
4261         skb_pull(skb, HCI_ACL_HDR_SIZE);
4262
4263         handle = __le16_to_cpu(hdr->handle);
4264         flags  = hci_flags(handle);
4265         handle = hci_handle(handle);
4266
4267         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4268                handle, flags);
4269
4270         hdev->stat.acl_rx++;
4271
4272         hci_dev_lock(hdev);
4273         conn = hci_conn_hash_lookup_handle(hdev, handle);
4274         hci_dev_unlock(hdev);
4275
4276         if (conn) {
4277                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4278
4279                 /* Send to upper protocol */
4280                 l2cap_recv_acldata(conn, skb, flags);
4281                 return;
4282         } else {
4283                 BT_ERR("%s ACL packet for unknown connection handle %d",
4284                        hdev->name, handle);
4285         }
4286
4287         kfree_skb(skb);
4288 }
4289
4290 /* SCO data packet */
4291 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4292 {
4293         struct hci_sco_hdr *hdr = (void *) skb->data;
4294         struct hci_conn *conn;
4295         __u16 handle;
4296
4297         skb_pull(skb, HCI_SCO_HDR_SIZE);
4298
4299         handle = __le16_to_cpu(hdr->handle);
4300
4301         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4302
4303         hdev->stat.sco_rx++;
4304
4305         hci_dev_lock(hdev);
4306         conn = hci_conn_hash_lookup_handle(hdev, handle);
4307         hci_dev_unlock(hdev);
4308
4309         if (conn) {
4310                 /* Send to upper protocol */
4311                 sco_recv_scodata(conn, skb);
4312                 return;
4313         } else {
4314                 BT_ERR("%s SCO packet for unknown connection handle %d",
4315                        hdev->name, handle);
4316         }
4317
4318         kfree_skb(skb);
4319 }
4320
4321 static bool hci_req_is_complete(struct hci_dev *hdev)
4322 {
4323         struct sk_buff *skb;
4324
4325         skb = skb_peek(&hdev->cmd_q);
4326         if (!skb)
4327                 return true;
4328
4329         return bt_cb(skb)->req.start;
4330 }
4331
4332 static void hci_resend_last(struct hci_dev *hdev)
4333 {
4334         struct hci_command_hdr *sent;
4335         struct sk_buff *skb;
4336         u16 opcode;
4337
4338         if (!hdev->sent_cmd)
4339                 return;
4340
4341         sent = (void *) hdev->sent_cmd->data;
4342         opcode = __le16_to_cpu(sent->opcode);
4343         if (opcode == HCI_OP_RESET)
4344                 return;
4345
4346         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4347         if (!skb)
4348                 return;
4349
4350         skb_queue_head(&hdev->cmd_q, skb);
4351         queue_work(hdev->workqueue, &hdev->cmd_work);
4352 }
4353
4354 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4355 {
4356         hci_req_complete_t req_complete = NULL;
4357         struct sk_buff *skb;
4358         unsigned long flags;
4359
4360         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4361
4362         /* If the completed command doesn't match the last one that was
4363          * sent we need to do special handling of it.
4364          */
4365         if (!hci_sent_cmd_data(hdev, opcode)) {
4366                 /* Some CSR based controllers generate a spontaneous
4367                  * reset complete event during init and any pending
4368                  * command will never be completed. In such a case we
4369                  * need to resend whatever was the last sent
4370                  * command.
4371                  */
4372                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4373                         hci_resend_last(hdev);
4374
4375                 return;
4376         }
4377
4378         /* If the command succeeded and there's still more commands in
4379          * this request the request is not yet complete.
4380          */
4381         if (!status && !hci_req_is_complete(hdev))
4382                 return;
4383
4384         /* If this was the last command in a request the complete
4385          * callback would be found in hdev->sent_cmd instead of the
4386          * command queue (hdev->cmd_q).
4387          */
4388         if (hdev->sent_cmd) {
4389                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4390
4391                 if (req_complete) {
4392                         /* We must set the complete callback to NULL to
4393                          * avoid calling the callback more than once if
4394                          * this function gets called again.
4395                          */
4396                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4397
4398                         goto call_complete;
4399                 }
4400         }
4401
4402         /* Remove all pending commands belonging to this request */
4403         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4404         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4405                 if (bt_cb(skb)->req.start) {
4406                         __skb_queue_head(&hdev->cmd_q, skb);
4407                         break;
4408                 }
4409
4410                 req_complete = bt_cb(skb)->req.complete;
4411                 kfree_skb(skb);
4412         }
4413         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4414
4415 call_complete:
4416         if (req_complete)
4417                 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
4418 }
4419
4420 static void hci_rx_work(struct work_struct *work)
4421 {
4422         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4423         struct sk_buff *skb;
4424
4425         BT_DBG("%s", hdev->name);
4426
4427         while ((skb = skb_dequeue(&hdev->rx_q))) {
4428                 /* Send copy to monitor */
4429                 hci_send_to_monitor(hdev, skb);
4430
4431                 if (atomic_read(&hdev->promisc)) {
4432                         /* Send copy to the sockets */
4433                         hci_send_to_sock(hdev, skb);
4434                 }
4435
4436                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4437                         kfree_skb(skb);
4438                         continue;
4439                 }
4440
4441                 if (test_bit(HCI_INIT, &hdev->flags)) {
4442                         /* Don't process data packets in this states. */
4443                         switch (bt_cb(skb)->pkt_type) {
4444                         case HCI_ACLDATA_PKT:
4445                         case HCI_SCODATA_PKT:
4446                                 kfree_skb(skb);
4447                                 continue;
4448                         }
4449                 }
4450
4451                 /* Process frame */
4452                 switch (bt_cb(skb)->pkt_type) {
4453                 case HCI_EVENT_PKT:
4454                         BT_DBG("%s Event packet", hdev->name);
4455                         hci_event_packet(hdev, skb);
4456                         break;
4457
4458                 case HCI_ACLDATA_PKT:
4459                         BT_DBG("%s ACL data packet", hdev->name);
4460                         hci_acldata_packet(hdev, skb);
4461                         break;
4462
4463                 case HCI_SCODATA_PKT:
4464                         BT_DBG("%s SCO data packet", hdev->name);
4465                         hci_scodata_packet(hdev, skb);
4466                         break;
4467
4468                 default:
4469                         kfree_skb(skb);
4470                         break;
4471                 }
4472         }
4473 }
4474
4475 static void hci_cmd_work(struct work_struct *work)
4476 {
4477         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4478         struct sk_buff *skb;
4479
4480         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4481                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4482
4483         /* Send queued commands */
4484         if (atomic_read(&hdev->cmd_cnt)) {
4485                 skb = skb_dequeue(&hdev->cmd_q);
4486                 if (!skb)
4487                         return;
4488
4489                 kfree_skb(hdev->sent_cmd);
4490
4491                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4492                 if (hdev->sent_cmd) {
4493                         atomic_dec(&hdev->cmd_cnt);
4494                         hci_send_frame(hdev, skb);
4495                         if (test_bit(HCI_RESET, &hdev->flags))
4496                                 cancel_delayed_work(&hdev->cmd_timer);
4497                         else
4498                                 schedule_delayed_work(&hdev->cmd_timer,
4499                                                       HCI_CMD_TIMEOUT);
4500                 } else {
4501                         skb_queue_head(&hdev->cmd_q, skb);
4502                         queue_work(hdev->workqueue, &hdev->cmd_work);
4503                 }
4504         }
4505 }