3bfc81544bc003424f893c8cba0715737c489753
[platform/kernel/linux-rpi.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63                              size_t count, loff_t *ppos)
64 {
65         struct hci_dev *hdev = file->private_data;
66         char buf[3];
67
68         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69         buf[1] = '\n';
70         buf[2] = '\0';
71         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75                               size_t count, loff_t *ppos)
76 {
77         struct hci_dev *hdev = file->private_data;
78         struct sk_buff *skb;
79         bool enable;
80         int err;
81
82         if (!test_bit(HCI_UP, &hdev->flags))
83                 return -ENETDOWN;
84
85         err = kstrtobool_from_user(user_buf, count, &enable);
86         if (err)
87                 return err;
88
89         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
90                 return -EALREADY;
91
92         hci_req_sync_lock(hdev);
93         if (enable)
94                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
95                                      HCI_CMD_TIMEOUT);
96         else
97                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
98                                      HCI_CMD_TIMEOUT);
99         hci_req_sync_unlock(hdev);
100
101         if (IS_ERR(skb))
102                 return PTR_ERR(skb);
103
104         kfree_skb(skb);
105
106         hci_dev_change_flag(hdev, HCI_DUT_MODE);
107
108         return count;
109 }
110
111 static const struct file_operations dut_mode_fops = {
112         .open           = simple_open,
113         .read           = dut_mode_read,
114         .write          = dut_mode_write,
115         .llseek         = default_llseek,
116 };
117
118 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
119                                 size_t count, loff_t *ppos)
120 {
121         struct hci_dev *hdev = file->private_data;
122         char buf[3];
123
124         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
125         buf[1] = '\n';
126         buf[2] = '\0';
127         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
128 }
129
130 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
131                                  size_t count, loff_t *ppos)
132 {
133         struct hci_dev *hdev = file->private_data;
134         bool enable;
135         int err;
136
137         err = kstrtobool_from_user(user_buf, count, &enable);
138         if (err)
139                 return err;
140
141         /* When the diagnostic flags are not persistent and the transport
142          * is not active or in user channel operation, then there is no need
143          * for the vendor callback. Instead just store the desired value and
144          * the setting will be programmed when the controller gets powered on.
145          */
146         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
147             (!test_bit(HCI_RUNNING, &hdev->flags) ||
148              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
149                 goto done;
150
151         hci_req_sync_lock(hdev);
152         err = hdev->set_diag(hdev, enable);
153         hci_req_sync_unlock(hdev);
154
155         if (err < 0)
156                 return err;
157
158 done:
159         if (enable)
160                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
161         else
162                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
163
164         return count;
165 }
166
167 static const struct file_operations vendor_diag_fops = {
168         .open           = simple_open,
169         .read           = vendor_diag_read,
170         .write          = vendor_diag_write,
171         .llseek         = default_llseek,
172 };
173
174 static void hci_debugfs_create_basic(struct hci_dev *hdev)
175 {
176         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
177                             &dut_mode_fops);
178
179         if (hdev->set_diag)
180                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
181                                     &vendor_diag_fops);
182 }
183
184 static int hci_reset_req(struct hci_request *req, unsigned long opt)
185 {
186         BT_DBG("%s %ld", req->hdev->name, opt);
187
188         /* Reset device */
189         set_bit(HCI_RESET, &req->hdev->flags);
190         hci_req_add(req, HCI_OP_RESET, 0, NULL);
191         return 0;
192 }
193
194 static void bredr_init(struct hci_request *req)
195 {
196         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198         /* Read Local Supported Features */
199         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
200
201         /* Read Local Version */
202         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
203
204         /* Read BD Address */
205         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
206 }
207
208 static void amp_init1(struct hci_request *req)
209 {
210         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
211
212         /* Read Local Version */
213         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215         /* Read Local Supported Commands */
216         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
217
218         /* Read Local AMP Info */
219         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
220
221         /* Read Data Blk size */
222         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
223
224         /* Read Flow Control Mode */
225         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
226
227         /* Read Location Data */
228         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
229 }
230
231 static int amp_init2(struct hci_request *req)
232 {
233         /* Read Local Supported Features. Not all AMP controllers
234          * support this so it's placed conditionally in the second
235          * stage init.
236          */
237         if (req->hdev->commands[14] & 0x20)
238                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
239
240         return 0;
241 }
242
243 static int hci_init1_req(struct hci_request *req, unsigned long opt)
244 {
245         struct hci_dev *hdev = req->hdev;
246
247         BT_DBG("%s %ld", hdev->name, opt);
248
249         /* Reset */
250         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
251                 hci_reset_req(req, 0);
252
253         switch (hdev->dev_type) {
254         case HCI_PRIMARY:
255                 bredr_init(req);
256                 break;
257         case HCI_AMP:
258                 amp_init1(req);
259                 break;
260         default:
261                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
262                 break;
263         }
264
265         return 0;
266 }
267
268 static void bredr_setup(struct hci_request *req)
269 {
270         __le16 param;
271         __u8 flt_type;
272
273         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
274         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
275
276         /* Read Class of Device */
277         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
278
279         /* Read Local Name */
280         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
281
282         /* Read Voice Setting */
283         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
284
285         /* Read Number of Supported IAC */
286         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
287
288         /* Read Current IAC LAP */
289         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
290
291         /* Clear Event Filters */
292         flt_type = HCI_FLT_CLEAR_ALL;
293         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
294
295         /* Connection accept timeout ~20 secs */
296         param = cpu_to_le16(0x7d00);
297         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
298 }
299
300 static void le_setup(struct hci_request *req)
301 {
302         struct hci_dev *hdev = req->hdev;
303
304         /* Read LE Buffer Size */
305         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
306
307         /* Read LE Local Supported Features */
308         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
309
310         /* Read LE Supported States */
311         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
312
313         /* LE-only controllers have LE implicitly enabled */
314         if (!lmp_bredr_capable(hdev))
315                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
316 }
317
318 static void hci_setup_event_mask(struct hci_request *req)
319 {
320         struct hci_dev *hdev = req->hdev;
321
322         /* The second byte is 0xff instead of 0x9f (two reserved bits
323          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
324          * command otherwise.
325          */
326         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
327
328         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
329          * any event mask for pre 1.2 devices.
330          */
331         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
332                 return;
333
334         if (lmp_bredr_capable(hdev)) {
335                 events[4] |= 0x01; /* Flow Specification Complete */
336         } else {
337                 /* Use a different default for LE-only devices */
338                 memset(events, 0, sizeof(events));
339                 events[1] |= 0x20; /* Command Complete */
340                 events[1] |= 0x40; /* Command Status */
341                 events[1] |= 0x80; /* Hardware Error */
342
343                 /* If the controller supports the Disconnect command, enable
344                  * the corresponding event. In addition enable packet flow
345                  * control related events.
346                  */
347                 if (hdev->commands[0] & 0x20) {
348                         events[0] |= 0x10; /* Disconnection Complete */
349                         events[2] |= 0x04; /* Number of Completed Packets */
350                         events[3] |= 0x02; /* Data Buffer Overflow */
351                 }
352
353                 /* If the controller supports the Read Remote Version
354                  * Information command, enable the corresponding event.
355                  */
356                 if (hdev->commands[2] & 0x80)
357                         events[1] |= 0x08; /* Read Remote Version Information
358                                             * Complete
359                                             */
360
361                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
362                         events[0] |= 0x80; /* Encryption Change */
363                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
364                 }
365         }
366
367         if (lmp_inq_rssi_capable(hdev) ||
368             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
369                 events[4] |= 0x02; /* Inquiry Result with RSSI */
370
371         if (lmp_ext_feat_capable(hdev))
372                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
373
374         if (lmp_esco_capable(hdev)) {
375                 events[5] |= 0x08; /* Synchronous Connection Complete */
376                 events[5] |= 0x10; /* Synchronous Connection Changed */
377         }
378
379         if (lmp_sniffsubr_capable(hdev))
380                 events[5] |= 0x20; /* Sniff Subrating */
381
382         if (lmp_pause_enc_capable(hdev))
383                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
384
385         if (lmp_ext_inq_capable(hdev))
386                 events[5] |= 0x40; /* Extended Inquiry Result */
387
388         if (lmp_no_flush_capable(hdev))
389                 events[7] |= 0x01; /* Enhanced Flush Complete */
390
391         if (lmp_lsto_capable(hdev))
392                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
393
394         if (lmp_ssp_capable(hdev)) {
395                 events[6] |= 0x01;      /* IO Capability Request */
396                 events[6] |= 0x02;      /* IO Capability Response */
397                 events[6] |= 0x04;      /* User Confirmation Request */
398                 events[6] |= 0x08;      /* User Passkey Request */
399                 events[6] |= 0x10;      /* Remote OOB Data Request */
400                 events[6] |= 0x20;      /* Simple Pairing Complete */
401                 events[7] |= 0x04;      /* User Passkey Notification */
402                 events[7] |= 0x08;      /* Keypress Notification */
403                 events[7] |= 0x10;      /* Remote Host Supported
404                                          * Features Notification
405                                          */
406         }
407
408         if (lmp_le_capable(hdev))
409                 events[7] |= 0x20;      /* LE Meta-Event */
410
411         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
412 }
413
414 static int hci_init2_req(struct hci_request *req, unsigned long opt)
415 {
416         struct hci_dev *hdev = req->hdev;
417
418         if (hdev->dev_type == HCI_AMP)
419                 return amp_init2(req);
420
421         if (lmp_bredr_capable(hdev))
422                 bredr_setup(req);
423         else
424                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
425
426         if (lmp_le_capable(hdev))
427                 le_setup(req);
428
429         /* All Bluetooth 1.2 and later controllers should support the
430          * HCI command for reading the local supported commands.
431          *
432          * Unfortunately some controllers indicate Bluetooth 1.2 support,
433          * but do not have support for this command. If that is the case,
434          * the driver can quirk the behavior and skip reading the local
435          * supported commands.
436          */
437         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
438             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
439                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
440
441         if (lmp_ssp_capable(hdev)) {
442                 /* When SSP is available, then the host features page
443                  * should also be available as well. However some
444                  * controllers list the max_page as 0 as long as SSP
445                  * has not been enabled. To achieve proper debugging
446                  * output, force the minimum max_page to 1 at least.
447                  */
448                 hdev->max_page = 0x01;
449
450                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
451                         u8 mode = 0x01;
452
453                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
454                                     sizeof(mode), &mode);
455                 } else {
456                         struct hci_cp_write_eir cp;
457
458                         memset(hdev->eir, 0, sizeof(hdev->eir));
459                         memset(&cp, 0, sizeof(cp));
460
461                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
462                 }
463         }
464
465         if (lmp_inq_rssi_capable(hdev) ||
466             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
467                 u8 mode;
468
469                 /* If Extended Inquiry Result events are supported, then
470                  * they are clearly preferred over Inquiry Result with RSSI
471                  * events.
472                  */
473                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
474
475                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
476         }
477
478         if (lmp_inq_tx_pwr_capable(hdev))
479                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
480
481         if (lmp_ext_feat_capable(hdev)) {
482                 struct hci_cp_read_local_ext_features cp;
483
484                 cp.page = 0x01;
485                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
486                             sizeof(cp), &cp);
487         }
488
489         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
490                 u8 enable = 1;
491                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
492                             &enable);
493         }
494
495         return 0;
496 }
497
498 static void hci_setup_link_policy(struct hci_request *req)
499 {
500         struct hci_dev *hdev = req->hdev;
501         struct hci_cp_write_def_link_policy cp;
502         u16 link_policy = 0;
503
504         if (lmp_rswitch_capable(hdev))
505                 link_policy |= HCI_LP_RSWITCH;
506         if (lmp_hold_capable(hdev))
507                 link_policy |= HCI_LP_HOLD;
508         if (lmp_sniff_capable(hdev))
509                 link_policy |= HCI_LP_SNIFF;
510         if (lmp_park_capable(hdev))
511                 link_policy |= HCI_LP_PARK;
512
513         cp.policy = cpu_to_le16(link_policy);
514         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
515 }
516
517 static void hci_set_le_support(struct hci_request *req)
518 {
519         struct hci_dev *hdev = req->hdev;
520         struct hci_cp_write_le_host_supported cp;
521
522         /* LE-only devices do not support explicit enablement */
523         if (!lmp_bredr_capable(hdev))
524                 return;
525
526         memset(&cp, 0, sizeof(cp));
527
528         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
529                 cp.le = 0x01;
530                 cp.simul = 0x00;
531         }
532
533         if (cp.le != lmp_host_le_capable(hdev))
534                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
535                             &cp);
536 }
537
538 static void hci_set_event_mask_page_2(struct hci_request *req)
539 {
540         struct hci_dev *hdev = req->hdev;
541         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
542         bool changed = false;
543
544         /* If Connectionless Slave Broadcast master role is supported
545          * enable all necessary events for it.
546          */
547         if (lmp_csb_master_capable(hdev)) {
548                 events[1] |= 0x40;      /* Triggered Clock Capture */
549                 events[1] |= 0x80;      /* Synchronization Train Complete */
550                 events[2] |= 0x10;      /* Slave Page Response Timeout */
551                 events[2] |= 0x20;      /* CSB Channel Map Change */
552                 changed = true;
553         }
554
555         /* If Connectionless Slave Broadcast slave role is supported
556          * enable all necessary events for it.
557          */
558         if (lmp_csb_slave_capable(hdev)) {
559                 events[2] |= 0x01;      /* Synchronization Train Received */
560                 events[2] |= 0x02;      /* CSB Receive */
561                 events[2] |= 0x04;      /* CSB Timeout */
562                 events[2] |= 0x08;      /* Truncated Page Complete */
563                 changed = true;
564         }
565
566         /* Enable Authenticated Payload Timeout Expired event if supported */
567         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
568                 events[2] |= 0x80;
569                 changed = true;
570         }
571
572         /* Some Broadcom based controllers indicate support for Set Event
573          * Mask Page 2 command, but then actually do not support it. Since
574          * the default value is all bits set to zero, the command is only
575          * required if the event mask has to be changed. In case no change
576          * to the event mask is needed, skip this command.
577          */
578         if (changed)
579                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
580                             sizeof(events), events);
581 }
582
583 static int hci_init3_req(struct hci_request *req, unsigned long opt)
584 {
585         struct hci_dev *hdev = req->hdev;
586         u8 p;
587
588         hci_setup_event_mask(req);
589
590         if (hdev->commands[6] & 0x20 &&
591             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
592                 struct hci_cp_read_stored_link_key cp;
593
594                 bacpy(&cp.bdaddr, BDADDR_ANY);
595                 cp.read_all = 0x01;
596                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
597         }
598
599         if (hdev->commands[5] & 0x10)
600                 hci_setup_link_policy(req);
601
602         if (hdev->commands[8] & 0x01)
603                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
604
605         /* Some older Broadcom based Bluetooth 1.2 controllers do not
606          * support the Read Page Scan Type command. Check support for
607          * this command in the bit mask of supported commands.
608          */
609         if (hdev->commands[13] & 0x01)
610                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
611
612         if (lmp_le_capable(hdev)) {
613                 u8 events[8];
614
615                 memset(events, 0, sizeof(events));
616
617                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
618                         events[0] |= 0x10;      /* LE Long Term Key Request */
619
620                 /* If controller supports the Connection Parameters Request
621                  * Link Layer Procedure, enable the corresponding event.
622                  */
623                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
624                         events[0] |= 0x20;      /* LE Remote Connection
625                                                  * Parameter Request
626                                                  */
627
628                 /* If the controller supports the Data Length Extension
629                  * feature, enable the corresponding event.
630                  */
631                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
632                         events[0] |= 0x40;      /* LE Data Length Change */
633
634                 /* If the controller supports Extended Scanner Filter
635                  * Policies, enable the correspondig event.
636                  */
637                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
638                         events[1] |= 0x04;      /* LE Direct Advertising
639                                                  * Report
640                                                  */
641
642                 /* If the controller supports Channel Selection Algorithm #2
643                  * feature, enable the corresponding event.
644                  */
645                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
646                         events[2] |= 0x08;      /* LE Channel Selection
647                                                  * Algorithm
648                                                  */
649
650                 /* If the controller supports the LE Set Scan Enable command,
651                  * enable the corresponding advertising report event.
652                  */
653                 if (hdev->commands[26] & 0x08)
654                         events[0] |= 0x02;      /* LE Advertising Report */
655
656                 /* If the controller supports the LE Create Connection
657                  * command, enable the corresponding event.
658                  */
659                 if (hdev->commands[26] & 0x10)
660                         events[0] |= 0x01;      /* LE Connection Complete */
661
662                 /* If the controller supports the LE Connection Update
663                  * command, enable the corresponding event.
664                  */
665                 if (hdev->commands[27] & 0x04)
666                         events[0] |= 0x04;      /* LE Connection Update
667                                                  * Complete
668                                                  */
669
670                 /* If the controller supports the LE Read Remote Used Features
671                  * command, enable the corresponding event.
672                  */
673                 if (hdev->commands[27] & 0x20)
674                         events[0] |= 0x08;      /* LE Read Remote Used
675                                                  * Features Complete
676                                                  */
677
678                 /* If the controller supports the LE Read Local P-256
679                  * Public Key command, enable the corresponding event.
680                  */
681                 if (hdev->commands[34] & 0x02)
682                         events[0] |= 0x80;      /* LE Read Local P-256
683                                                  * Public Key Complete
684                                                  */
685
686                 /* If the controller supports the LE Generate DHKey
687                  * command, enable the corresponding event.
688                  */
689                 if (hdev->commands[34] & 0x04)
690                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
691
692                 /* If the controller supports the LE Set Default PHY or
693                  * LE Set PHY commands, enable the corresponding event.
694                  */
695                 if (hdev->commands[35] & (0x20 | 0x40))
696                         events[1] |= 0x08;        /* LE PHY Update Complete */
697
698                 /* If the controller supports LE Set Extended Scan Parameters
699                  * and LE Set Extended Scan Enable commands, enable the
700                  * corresponding event.
701                  */
702                 if (use_ext_scan(hdev))
703                         events[1] |= 0x10;      /* LE Extended Advertising
704                                                  * Report
705                                                  */
706
707                 /* If the controller supports the LE Extended Create Connection
708                  * command, enable the corresponding event.
709                  */
710                 if (use_ext_conn(hdev))
711                         events[1] |= 0x02;      /* LE Enhanced Connection
712                                                  * Complete
713                                                  */
714
715                 /* If the controller supports the LE Extended Advertising
716                  * command, enable the corresponding event.
717                  */
718                 if (ext_adv_capable(hdev))
719                         events[2] |= 0x02;      /* LE Advertising Set
720                                                  * Terminated
721                                                  */
722
723                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
724                             events);
725
726                 /* Read LE Advertising Channel TX Power */
727                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
728                         /* HCI TS spec forbids mixing of legacy and extended
729                          * advertising commands wherein READ_ADV_TX_POWER is
730                          * also included. So do not call it if extended adv
731                          * is supported otherwise controller will return
732                          * COMMAND_DISALLOWED for extended commands.
733                          */
734                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
735                 }
736
737                 if (hdev->commands[26] & 0x40) {
738                         /* Read LE White List Size */
739                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
740                                     0, NULL);
741                 }
742
743                 if (hdev->commands[26] & 0x80) {
744                         /* Clear LE White List */
745                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
746                 }
747
748                 if (hdev->commands[34] & 0x40) {
749                         /* Read LE Resolving List Size */
750                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
751                                     0, NULL);
752                 }
753
754                 if (hdev->commands[34] & 0x20) {
755                         /* Clear LE Resolving List */
756                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
757                 }
758
759                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
760                         /* Read LE Maximum Data Length */
761                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
762
763                         /* Read LE Suggested Default Data Length */
764                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
765                 }
766
767                 if (ext_adv_capable(hdev)) {
768                         /* Read LE Number of Supported Advertising Sets */
769                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
770                                     0, NULL);
771                 }
772
773                 hci_set_le_support(req);
774         }
775
776         /* Read features beyond page 1 if available */
777         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
778                 struct hci_cp_read_local_ext_features cp;
779
780                 cp.page = p;
781                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
782                             sizeof(cp), &cp);
783         }
784
785         return 0;
786 }
787
788 static int hci_init4_req(struct hci_request *req, unsigned long opt)
789 {
790         struct hci_dev *hdev = req->hdev;
791
792         /* Some Broadcom based Bluetooth controllers do not support the
793          * Delete Stored Link Key command. They are clearly indicating its
794          * absence in the bit mask of supported commands.
795          *
796          * Check the supported commands and only if the the command is marked
797          * as supported send it. If not supported assume that the controller
798          * does not have actual support for stored link keys which makes this
799          * command redundant anyway.
800          *
801          * Some controllers indicate that they support handling deleting
802          * stored link keys, but they don't. The quirk lets a driver
803          * just disable this command.
804          */
805         if (hdev->commands[6] & 0x80 &&
806             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
807                 struct hci_cp_delete_stored_link_key cp;
808
809                 bacpy(&cp.bdaddr, BDADDR_ANY);
810                 cp.delete_all = 0x01;
811                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
812                             sizeof(cp), &cp);
813         }
814
815         /* Set event mask page 2 if the HCI command for it is supported */
816         if (hdev->commands[22] & 0x04)
817                 hci_set_event_mask_page_2(req);
818
819         /* Read local codec list if the HCI command is supported */
820         if (hdev->commands[29] & 0x20)
821                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
822
823         /* Get MWS transport configuration if the HCI command is supported */
824         if (hdev->commands[30] & 0x08)
825                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
826
827         /* Check for Synchronization Train support */
828         if (lmp_sync_train_capable(hdev))
829                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
830
831         /* Enable Secure Connections if supported and configured */
832         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
833             bredr_sc_enabled(hdev)) {
834                 u8 support = 0x01;
835
836                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
837                             sizeof(support), &support);
838         }
839
840         /* Set Suggested Default Data Length to maximum if supported */
841         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
842                 struct hci_cp_le_write_def_data_len cp;
843
844                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
845                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
846                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
847         }
848
849         /* Set Default PHY parameters if command is supported */
850         if (hdev->commands[35] & 0x20) {
851                 struct hci_cp_le_set_default_phy cp;
852
853                 cp.all_phys = 0x00;
854                 cp.tx_phys = hdev->le_tx_def_phys;
855                 cp.rx_phys = hdev->le_rx_def_phys;
856
857                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
858         }
859
860         return 0;
861 }
862
863 static int __hci_init(struct hci_dev *hdev)
864 {
865         int err;
866
867         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
868         if (err < 0)
869                 return err;
870
871         if (hci_dev_test_flag(hdev, HCI_SETUP))
872                 hci_debugfs_create_basic(hdev);
873
874         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
875         if (err < 0)
876                 return err;
877
878         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
879          * BR/EDR/LE type controllers. AMP controllers only need the
880          * first two stages of init.
881          */
882         if (hdev->dev_type != HCI_PRIMARY)
883                 return 0;
884
885         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
886         if (err < 0)
887                 return err;
888
889         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
890         if (err < 0)
891                 return err;
892
893         /* This function is only called when the controller is actually in
894          * configured state. When the controller is marked as unconfigured,
895          * this initialization procedure is not run.
896          *
897          * It means that it is possible that a controller runs through its
898          * setup phase and then discovers missing settings. If that is the
899          * case, then this function will not be called. It then will only
900          * be called during the config phase.
901          *
902          * So only when in setup phase or config phase, create the debugfs
903          * entries and register the SMP channels.
904          */
905         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
906             !hci_dev_test_flag(hdev, HCI_CONFIG))
907                 return 0;
908
909         hci_debugfs_create_common(hdev);
910
911         if (lmp_bredr_capable(hdev))
912                 hci_debugfs_create_bredr(hdev);
913
914         if (lmp_le_capable(hdev))
915                 hci_debugfs_create_le(hdev);
916
917         return 0;
918 }
919
920 static int hci_init0_req(struct hci_request *req, unsigned long opt)
921 {
922         struct hci_dev *hdev = req->hdev;
923
924         BT_DBG("%s %ld", hdev->name, opt);
925
926         /* Reset */
927         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
928                 hci_reset_req(req, 0);
929
930         /* Read Local Version */
931         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
932
933         /* Read BD Address */
934         if (hdev->set_bdaddr)
935                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
936
937         return 0;
938 }
939
940 static int __hci_unconf_init(struct hci_dev *hdev)
941 {
942         int err;
943
944         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
945                 return 0;
946
947         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
948         if (err < 0)
949                 return err;
950
951         if (hci_dev_test_flag(hdev, HCI_SETUP))
952                 hci_debugfs_create_basic(hdev);
953
954         return 0;
955 }
956
957 static int hci_scan_req(struct hci_request *req, unsigned long opt)
958 {
959         __u8 scan = opt;
960
961         BT_DBG("%s %x", req->hdev->name, scan);
962
963         /* Inquiry and Page scans */
964         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
965         return 0;
966 }
967
968 static int hci_auth_req(struct hci_request *req, unsigned long opt)
969 {
970         __u8 auth = opt;
971
972         BT_DBG("%s %x", req->hdev->name, auth);
973
974         /* Authentication */
975         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
976         return 0;
977 }
978
979 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
980 {
981         __u8 encrypt = opt;
982
983         BT_DBG("%s %x", req->hdev->name, encrypt);
984
985         /* Encryption */
986         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
987         return 0;
988 }
989
990 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
991 {
992         __le16 policy = cpu_to_le16(opt);
993
994         BT_DBG("%s %x", req->hdev->name, policy);
995
996         /* Default link policy */
997         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
998         return 0;
999 }
1000
1001 /* Get HCI device by index.
1002  * Device is held on return. */
1003 struct hci_dev *hci_dev_get(int index)
1004 {
1005         struct hci_dev *hdev = NULL, *d;
1006
1007         BT_DBG("%d", index);
1008
1009         if (index < 0)
1010                 return NULL;
1011
1012         read_lock(&hci_dev_list_lock);
1013         list_for_each_entry(d, &hci_dev_list, list) {
1014                 if (d->id == index) {
1015                         hdev = hci_dev_hold(d);
1016                         break;
1017                 }
1018         }
1019         read_unlock(&hci_dev_list_lock);
1020         return hdev;
1021 }
1022
1023 /* ---- Inquiry support ---- */
1024
1025 bool hci_discovery_active(struct hci_dev *hdev)
1026 {
1027         struct discovery_state *discov = &hdev->discovery;
1028
1029         switch (discov->state) {
1030         case DISCOVERY_FINDING:
1031         case DISCOVERY_RESOLVING:
1032                 return true;
1033
1034         default:
1035                 return false;
1036         }
1037 }
1038
1039 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1040 {
1041         int old_state = hdev->discovery.state;
1042
1043         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1044
1045         if (old_state == state)
1046                 return;
1047
1048         hdev->discovery.state = state;
1049
1050         switch (state) {
1051         case DISCOVERY_STOPPED:
1052                 hci_update_background_scan(hdev);
1053
1054                 if (old_state != DISCOVERY_STARTING)
1055                         mgmt_discovering(hdev, 0);
1056                 break;
1057         case DISCOVERY_STARTING:
1058                 break;
1059         case DISCOVERY_FINDING:
1060                 mgmt_discovering(hdev, 1);
1061                 break;
1062         case DISCOVERY_RESOLVING:
1063                 break;
1064         case DISCOVERY_STOPPING:
1065                 break;
1066         }
1067 }
1068
1069 #ifdef TIZEN_BT
1070 bool hci_le_discovery_active(struct hci_dev *hdev)
1071 {
1072         struct discovery_state *discov = &hdev->le_discovery;
1073
1074         switch (discov->state) {
1075         case DISCOVERY_FINDING:
1076         case DISCOVERY_RESOLVING:
1077                 return true;
1078
1079         default:
1080                 return false;
1081         }
1082 }
1083
1084 void hci_le_discovery_set_state(struct hci_dev *hdev, int state)
1085 {
1086         BT_DBG("%s state %u -> %u", hdev->name,
1087                         hdev->le_discovery.state, state);
1088
1089         if (hdev->le_discovery.state == state)
1090                 return;
1091
1092         switch (state) {
1093         case DISCOVERY_STOPPED:
1094                 hci_update_background_scan(hdev);
1095
1096                 if (hdev->le_discovery.state != DISCOVERY_STARTING)
1097                         mgmt_le_discovering(hdev, 0);
1098                 break;
1099         case DISCOVERY_STARTING:
1100                 break;
1101         case DISCOVERY_FINDING:
1102                 mgmt_le_discovering(hdev, 1);
1103                 break;
1104         case DISCOVERY_RESOLVING:
1105                 break;
1106         case DISCOVERY_STOPPING:
1107                 break;
1108         }
1109
1110         hdev->le_discovery.state = state;
1111 }
1112 #endif
1113
1114 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1115 {
1116         struct discovery_state *cache = &hdev->discovery;
1117         struct inquiry_entry *p, *n;
1118
1119         list_for_each_entry_safe(p, n, &cache->all, all) {
1120                 list_del(&p->all);
1121                 kfree(p);
1122         }
1123
1124         INIT_LIST_HEAD(&cache->unknown);
1125         INIT_LIST_HEAD(&cache->resolve);
1126 }
1127
1128 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1129                                                bdaddr_t *bdaddr)
1130 {
1131         struct discovery_state *cache = &hdev->discovery;
1132         struct inquiry_entry *e;
1133
1134         BT_DBG("cache %p, %pMR", cache, bdaddr);
1135
1136         list_for_each_entry(e, &cache->all, all) {
1137                 if (!bacmp(&e->data.bdaddr, bdaddr))
1138                         return e;
1139         }
1140
1141         return NULL;
1142 }
1143
1144 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1145                                                        bdaddr_t *bdaddr)
1146 {
1147         struct discovery_state *cache = &hdev->discovery;
1148         struct inquiry_entry *e;
1149
1150         BT_DBG("cache %p, %pMR", cache, bdaddr);
1151
1152         list_for_each_entry(e, &cache->unknown, list) {
1153                 if (!bacmp(&e->data.bdaddr, bdaddr))
1154                         return e;
1155         }
1156
1157         return NULL;
1158 }
1159
1160 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1161                                                        bdaddr_t *bdaddr,
1162                                                        int state)
1163 {
1164         struct discovery_state *cache = &hdev->discovery;
1165         struct inquiry_entry *e;
1166
1167         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1168
1169         list_for_each_entry(e, &cache->resolve, list) {
1170                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1171                         return e;
1172                 if (!bacmp(&e->data.bdaddr, bdaddr))
1173                         return e;
1174         }
1175
1176         return NULL;
1177 }
1178
1179 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1180                                       struct inquiry_entry *ie)
1181 {
1182         struct discovery_state *cache = &hdev->discovery;
1183         struct list_head *pos = &cache->resolve;
1184         struct inquiry_entry *p;
1185
1186         list_del(&ie->list);
1187
1188         list_for_each_entry(p, &cache->resolve, list) {
1189                 if (p->name_state != NAME_PENDING &&
1190                     abs(p->data.rssi) >= abs(ie->data.rssi))
1191                         break;
1192                 pos = &p->list;
1193         }
1194
1195         list_add(&ie->list, pos);
1196 }
1197
1198 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1199                              bool name_known)
1200 {
1201         struct discovery_state *cache = &hdev->discovery;
1202         struct inquiry_entry *ie;
1203         u32 flags = 0;
1204
1205         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1206
1207         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1208
1209         if (!data->ssp_mode)
1210                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1211
1212         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1213         if (ie) {
1214                 if (!ie->data.ssp_mode)
1215                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1216
1217                 if (ie->name_state == NAME_NEEDED &&
1218                     data->rssi != ie->data.rssi) {
1219                         ie->data.rssi = data->rssi;
1220                         hci_inquiry_cache_update_resolve(hdev, ie);
1221                 }
1222
1223                 goto update;
1224         }
1225
1226         /* Entry not in the cache. Add new one. */
1227         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1228         if (!ie) {
1229                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1230                 goto done;
1231         }
1232
1233         list_add(&ie->all, &cache->all);
1234
1235         if (name_known) {
1236                 ie->name_state = NAME_KNOWN;
1237         } else {
1238                 ie->name_state = NAME_NOT_KNOWN;
1239                 list_add(&ie->list, &cache->unknown);
1240         }
1241
1242 update:
1243         if (name_known && ie->name_state != NAME_KNOWN &&
1244             ie->name_state != NAME_PENDING) {
1245                 ie->name_state = NAME_KNOWN;
1246                 list_del(&ie->list);
1247         }
1248
1249         memcpy(&ie->data, data, sizeof(*data));
1250         ie->timestamp = jiffies;
1251         cache->timestamp = jiffies;
1252
1253         if (ie->name_state == NAME_NOT_KNOWN)
1254                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1255
1256 done:
1257         return flags;
1258 }
1259
1260 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1261 {
1262         struct discovery_state *cache = &hdev->discovery;
1263         struct inquiry_info *info = (struct inquiry_info *) buf;
1264         struct inquiry_entry *e;
1265         int copied = 0;
1266
1267         list_for_each_entry(e, &cache->all, all) {
1268                 struct inquiry_data *data = &e->data;
1269
1270                 if (copied >= num)
1271                         break;
1272
1273                 bacpy(&info->bdaddr, &data->bdaddr);
1274                 info->pscan_rep_mode    = data->pscan_rep_mode;
1275                 info->pscan_period_mode = data->pscan_period_mode;
1276                 info->pscan_mode        = data->pscan_mode;
1277                 memcpy(info->dev_class, data->dev_class, 3);
1278                 info->clock_offset      = data->clock_offset;
1279
1280                 info++;
1281                 copied++;
1282         }
1283
1284         BT_DBG("cache %p, copied %d", cache, copied);
1285         return copied;
1286 }
1287
1288 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1289 {
1290         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1291         struct hci_dev *hdev = req->hdev;
1292         struct hci_cp_inquiry cp;
1293
1294         BT_DBG("%s", hdev->name);
1295
1296         if (test_bit(HCI_INQUIRY, &hdev->flags))
1297                 return 0;
1298
1299         /* Start Inquiry */
1300         memcpy(&cp.lap, &ir->lap, 3);
1301         cp.length  = ir->length;
1302         cp.num_rsp = ir->num_rsp;
1303         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1304
1305         return 0;
1306 }
1307
1308 int hci_inquiry(void __user *arg)
1309 {
1310         __u8 __user *ptr = arg;
1311         struct hci_inquiry_req ir;
1312         struct hci_dev *hdev;
1313         int err = 0, do_inquiry = 0, max_rsp;
1314         long timeo;
1315         __u8 *buf;
1316
1317         if (copy_from_user(&ir, ptr, sizeof(ir)))
1318                 return -EFAULT;
1319
1320         hdev = hci_dev_get(ir.dev_id);
1321         if (!hdev)
1322                 return -ENODEV;
1323
1324         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1325                 err = -EBUSY;
1326                 goto done;
1327         }
1328
1329         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1330                 err = -EOPNOTSUPP;
1331                 goto done;
1332         }
1333
1334         if (hdev->dev_type != HCI_PRIMARY) {
1335                 err = -EOPNOTSUPP;
1336                 goto done;
1337         }
1338
1339         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1340                 err = -EOPNOTSUPP;
1341                 goto done;
1342         }
1343
1344         hci_dev_lock(hdev);
1345         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1346             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1347                 hci_inquiry_cache_flush(hdev);
1348                 do_inquiry = 1;
1349         }
1350         hci_dev_unlock(hdev);
1351
1352         timeo = ir.length * msecs_to_jiffies(2000);
1353
1354         if (do_inquiry) {
1355                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1356                                    timeo, NULL);
1357                 if (err < 0)
1358                         goto done;
1359
1360                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1361                  * cleared). If it is interrupted by a signal, return -EINTR.
1362                  */
1363                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1364                                 TASK_INTERRUPTIBLE))
1365                         return -EINTR;
1366         }
1367
1368         /* for unlimited number of responses we will use buffer with
1369          * 255 entries
1370          */
1371         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1372
1373         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1374          * copy it to the user space.
1375          */
1376         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1377         if (!buf) {
1378                 err = -ENOMEM;
1379                 goto done;
1380         }
1381
1382         hci_dev_lock(hdev);
1383         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1384         hci_dev_unlock(hdev);
1385
1386         BT_DBG("num_rsp %d", ir.num_rsp);
1387
1388         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1389                 ptr += sizeof(ir);
1390                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1391                                  ir.num_rsp))
1392                         err = -EFAULT;
1393         } else
1394                 err = -EFAULT;
1395
1396         kfree(buf);
1397
1398 done:
1399         hci_dev_put(hdev);
1400         return err;
1401 }
1402
1403 static int hci_dev_do_open(struct hci_dev *hdev)
1404 {
1405         int ret = 0;
1406
1407         BT_DBG("%s %p", hdev->name, hdev);
1408
1409         hci_req_sync_lock(hdev);
1410
1411         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1412                 ret = -ENODEV;
1413                 goto done;
1414         }
1415
1416         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1417             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1418                 /* Check for rfkill but allow the HCI setup stage to
1419                  * proceed (which in itself doesn't cause any RF activity).
1420                  */
1421                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1422                         ret = -ERFKILL;
1423                         goto done;
1424                 }
1425
1426                 /* Check for valid public address or a configured static
1427                  * random adddress, but let the HCI setup proceed to
1428                  * be able to determine if there is a public address
1429                  * or not.
1430                  *
1431                  * In case of user channel usage, it is not important
1432                  * if a public address or static random address is
1433                  * available.
1434                  *
1435                  * This check is only valid for BR/EDR controllers
1436                  * since AMP controllers do not have an address.
1437                  */
1438                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1439                     hdev->dev_type == HCI_PRIMARY &&
1440                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1441                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1442                         ret = -EADDRNOTAVAIL;
1443                         goto done;
1444                 }
1445         }
1446
1447         if (test_bit(HCI_UP, &hdev->flags)) {
1448                 ret = -EALREADY;
1449                 goto done;
1450         }
1451
1452         if (hdev->open(hdev)) {
1453                 ret = -EIO;
1454                 goto done;
1455         }
1456
1457         set_bit(HCI_RUNNING, &hdev->flags);
1458         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1459
1460         atomic_set(&hdev->cmd_cnt, 1);
1461         set_bit(HCI_INIT, &hdev->flags);
1462
1463         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1464             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1465                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1466
1467                 if (hdev->setup)
1468                         ret = hdev->setup(hdev);
1469
1470                 /* The transport driver can set these quirks before
1471                  * creating the HCI device or in its setup callback.
1472                  *
1473                  * In case any of them is set, the controller has to
1474                  * start up as unconfigured.
1475                  */
1476                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1477                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1478                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1479
1480                 /* For an unconfigured controller it is required to
1481                  * read at least the version information provided by
1482                  * the Read Local Version Information command.
1483                  *
1484                  * If the set_bdaddr driver callback is provided, then
1485                  * also the original Bluetooth public device address
1486                  * will be read using the Read BD Address command.
1487                  */
1488                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1489                         ret = __hci_unconf_init(hdev);
1490         }
1491
1492         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1493                 /* If public address change is configured, ensure that
1494                  * the address gets programmed. If the driver does not
1495                  * support changing the public address, fail the power
1496                  * on procedure.
1497                  */
1498                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1499                     hdev->set_bdaddr)
1500                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1501                 else
1502                         ret = -EADDRNOTAVAIL;
1503         }
1504
1505         if (!ret) {
1506                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1507                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1508                         ret = __hci_init(hdev);
1509                         if (!ret && hdev->post_init)
1510                                 ret = hdev->post_init(hdev);
1511                 }
1512         }
1513
1514         /* If the HCI Reset command is clearing all diagnostic settings,
1515          * then they need to be reprogrammed after the init procedure
1516          * completed.
1517          */
1518         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1519             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1520             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1521                 ret = hdev->set_diag(hdev, true);
1522
1523         clear_bit(HCI_INIT, &hdev->flags);
1524
1525         if (!ret) {
1526                 hci_dev_hold(hdev);
1527                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1528                 hci_adv_instances_set_rpa_expired(hdev, true);
1529                 set_bit(HCI_UP, &hdev->flags);
1530                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1531                 hci_leds_update_powered(hdev, true);
1532                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1533                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1534                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1535                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1536                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1537                     hdev->dev_type == HCI_PRIMARY) {
1538                         ret = __hci_req_hci_power_on(hdev);
1539                         mgmt_power_on(hdev, ret);
1540                 }
1541         } else {
1542                 /* Init failed, cleanup */
1543                 flush_work(&hdev->tx_work);
1544                 flush_work(&hdev->cmd_work);
1545                 flush_work(&hdev->rx_work);
1546
1547                 skb_queue_purge(&hdev->cmd_q);
1548                 skb_queue_purge(&hdev->rx_q);
1549
1550                 if (hdev->flush)
1551                         hdev->flush(hdev);
1552
1553                 if (hdev->sent_cmd) {
1554                         kfree_skb(hdev->sent_cmd);
1555                         hdev->sent_cmd = NULL;
1556                 }
1557
1558                 clear_bit(HCI_RUNNING, &hdev->flags);
1559                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1560
1561                 hdev->close(hdev);
1562                 hdev->flags &= BIT(HCI_RAW);
1563         }
1564
1565 done:
1566         hci_req_sync_unlock(hdev);
1567         return ret;
1568 }
1569
1570 /* ---- HCI ioctl helpers ---- */
1571
1572 int hci_dev_open(__u16 dev)
1573 {
1574         struct hci_dev *hdev;
1575         int err;
1576
1577         hdev = hci_dev_get(dev);
1578         if (!hdev)
1579                 return -ENODEV;
1580
1581         /* Devices that are marked as unconfigured can only be powered
1582          * up as user channel. Trying to bring them up as normal devices
1583          * will result into a failure. Only user channel operation is
1584          * possible.
1585          *
1586          * When this function is called for a user channel, the flag
1587          * HCI_USER_CHANNEL will be set first before attempting to
1588          * open the device.
1589          */
1590         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1591             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1592                 err = -EOPNOTSUPP;
1593                 goto done;
1594         }
1595
1596         /* We need to ensure that no other power on/off work is pending
1597          * before proceeding to call hci_dev_do_open. This is
1598          * particularly important if the setup procedure has not yet
1599          * completed.
1600          */
1601         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1602                 cancel_delayed_work(&hdev->power_off);
1603
1604         /* After this call it is guaranteed that the setup procedure
1605          * has finished. This means that error conditions like RFKILL
1606          * or no valid public or static random address apply.
1607          */
1608         flush_workqueue(hdev->req_workqueue);
1609
1610         /* For controllers not using the management interface and that
1611          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1612          * so that pairing works for them. Once the management interface
1613          * is in use this bit will be cleared again and userspace has
1614          * to explicitly enable it.
1615          */
1616         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1617             !hci_dev_test_flag(hdev, HCI_MGMT))
1618                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1619
1620         err = hci_dev_do_open(hdev);
1621
1622 done:
1623         hci_dev_put(hdev);
1624         return err;
1625 }
1626
1627 /* This function requires the caller holds hdev->lock */
1628 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1629 {
1630         struct hci_conn_params *p;
1631
1632         list_for_each_entry(p, &hdev->le_conn_params, list) {
1633                 if (p->conn) {
1634                         hci_conn_drop(p->conn);
1635                         hci_conn_put(p->conn);
1636                         p->conn = NULL;
1637                 }
1638                 list_del_init(&p->action);
1639         }
1640
1641         BT_DBG("All LE pending actions cleared");
1642 }
1643
1644 int hci_dev_do_close(struct hci_dev *hdev)
1645 {
1646         bool auto_off;
1647
1648         BT_DBG("%s %p", hdev->name, hdev);
1649
1650         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1651             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1652             test_bit(HCI_UP, &hdev->flags)) {
1653                 /* Execute vendor specific shutdown routine */
1654                 if (hdev->shutdown)
1655                         hdev->shutdown(hdev);
1656         }
1657
1658         cancel_delayed_work(&hdev->power_off);
1659
1660         hci_request_cancel_all(hdev);
1661         hci_req_sync_lock(hdev);
1662
1663         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1664                 cancel_delayed_work_sync(&hdev->cmd_timer);
1665                 hci_req_sync_unlock(hdev);
1666                 return 0;
1667         }
1668
1669         hci_leds_update_powered(hdev, false);
1670
1671         /* Flush RX and TX works */
1672         flush_work(&hdev->tx_work);
1673         flush_work(&hdev->rx_work);
1674
1675         if (hdev->discov_timeout > 0) {
1676                 hdev->discov_timeout = 0;
1677                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1678                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1679         }
1680
1681         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1682                 cancel_delayed_work(&hdev->service_cache);
1683
1684         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1685                 struct adv_info *adv_instance;
1686
1687                 cancel_delayed_work_sync(&hdev->rpa_expired);
1688
1689                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1690                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1691         }
1692
1693         /* Avoid potential lockdep warnings from the *_flush() calls by
1694          * ensuring the workqueue is empty up front.
1695          */
1696         drain_workqueue(hdev->workqueue);
1697
1698         hci_dev_lock(hdev);
1699
1700         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1701
1702         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1703
1704         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1705             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1706             hci_dev_test_flag(hdev, HCI_MGMT))
1707                 __mgmt_power_off(hdev);
1708
1709         hci_inquiry_cache_flush(hdev);
1710         hci_pend_le_actions_clear(hdev);
1711         hci_conn_hash_flush(hdev);
1712         hci_dev_unlock(hdev);
1713
1714         smp_unregister(hdev);
1715
1716         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1717
1718         if (hdev->flush)
1719                 hdev->flush(hdev);
1720
1721         /* Reset device */
1722         skb_queue_purge(&hdev->cmd_q);
1723         atomic_set(&hdev->cmd_cnt, 1);
1724         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1725             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1726                 set_bit(HCI_INIT, &hdev->flags);
1727                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1728                 clear_bit(HCI_INIT, &hdev->flags);
1729         }
1730
1731         /* flush cmd  work */
1732         flush_work(&hdev->cmd_work);
1733
1734         /* Drop queues */
1735         skb_queue_purge(&hdev->rx_q);
1736         skb_queue_purge(&hdev->cmd_q);
1737         skb_queue_purge(&hdev->raw_q);
1738
1739         /* Drop last sent command */
1740         if (hdev->sent_cmd) {
1741                 cancel_delayed_work_sync(&hdev->cmd_timer);
1742                 kfree_skb(hdev->sent_cmd);
1743                 hdev->sent_cmd = NULL;
1744         }
1745
1746         clear_bit(HCI_RUNNING, &hdev->flags);
1747         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1748
1749         /* After this point our queues are empty
1750          * and no tasks are scheduled. */
1751         hdev->close(hdev);
1752
1753         /* Clear flags */
1754         hdev->flags &= BIT(HCI_RAW);
1755         hci_dev_clear_volatile_flags(hdev);
1756
1757         /* Controller radio is available but is currently powered down */
1758         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1759
1760         memset(hdev->eir, 0, sizeof(hdev->eir));
1761         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1762         bacpy(&hdev->random_addr, BDADDR_ANY);
1763
1764         hci_req_sync_unlock(hdev);
1765
1766         hci_dev_put(hdev);
1767         return 0;
1768 }
1769
1770 int hci_dev_close(__u16 dev)
1771 {
1772         struct hci_dev *hdev;
1773         int err;
1774
1775         hdev = hci_dev_get(dev);
1776         if (!hdev)
1777                 return -ENODEV;
1778
1779         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1780                 err = -EBUSY;
1781                 goto done;
1782         }
1783
1784         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1785                 cancel_delayed_work(&hdev->power_off);
1786
1787         err = hci_dev_do_close(hdev);
1788
1789 done:
1790         hci_dev_put(hdev);
1791         return err;
1792 }
1793
1794 static int hci_dev_do_reset(struct hci_dev *hdev)
1795 {
1796         int ret;
1797
1798         BT_DBG("%s %p", hdev->name, hdev);
1799
1800         hci_req_sync_lock(hdev);
1801
1802         /* Drop queues */
1803         skb_queue_purge(&hdev->rx_q);
1804         skb_queue_purge(&hdev->cmd_q);
1805
1806         /* Avoid potential lockdep warnings from the *_flush() calls by
1807          * ensuring the workqueue is empty up front.
1808          */
1809         drain_workqueue(hdev->workqueue);
1810
1811         hci_dev_lock(hdev);
1812         hci_inquiry_cache_flush(hdev);
1813         hci_conn_hash_flush(hdev);
1814         hci_dev_unlock(hdev);
1815
1816         if (hdev->flush)
1817                 hdev->flush(hdev);
1818
1819         atomic_set(&hdev->cmd_cnt, 1);
1820         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1821
1822         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1823
1824         hci_req_sync_unlock(hdev);
1825         return ret;
1826 }
1827
1828 int hci_dev_reset(__u16 dev)
1829 {
1830         struct hci_dev *hdev;
1831         int err;
1832
1833         hdev = hci_dev_get(dev);
1834         if (!hdev)
1835                 return -ENODEV;
1836
1837         if (!test_bit(HCI_UP, &hdev->flags)) {
1838                 err = -ENETDOWN;
1839                 goto done;
1840         }
1841
1842         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1843                 err = -EBUSY;
1844                 goto done;
1845         }
1846
1847         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1848                 err = -EOPNOTSUPP;
1849                 goto done;
1850         }
1851
1852         err = hci_dev_do_reset(hdev);
1853
1854 done:
1855         hci_dev_put(hdev);
1856         return err;
1857 }
1858
1859 int hci_dev_reset_stat(__u16 dev)
1860 {
1861         struct hci_dev *hdev;
1862         int ret = 0;
1863
1864         hdev = hci_dev_get(dev);
1865         if (!hdev)
1866                 return -ENODEV;
1867
1868         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1869                 ret = -EBUSY;
1870                 goto done;
1871         }
1872
1873         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1874                 ret = -EOPNOTSUPP;
1875                 goto done;
1876         }
1877
1878         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1879
1880 done:
1881         hci_dev_put(hdev);
1882         return ret;
1883 }
1884
1885 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1886 {
1887         bool conn_changed, discov_changed;
1888
1889         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1890
1891         if ((scan & SCAN_PAGE))
1892                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1893                                                           HCI_CONNECTABLE);
1894         else
1895                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1896                                                            HCI_CONNECTABLE);
1897
1898         if ((scan & SCAN_INQUIRY)) {
1899                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1900                                                             HCI_DISCOVERABLE);
1901         } else {
1902                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1903                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1904                                                              HCI_DISCOVERABLE);
1905         }
1906
1907         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1908                 return;
1909
1910         if (conn_changed || discov_changed) {
1911                 /* In case this was disabled through mgmt */
1912                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1913
1914                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1915                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1916
1917                 mgmt_new_settings(hdev);
1918         }
1919 }
1920
1921 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1922 {
1923         struct hci_dev *hdev;
1924         struct hci_dev_req dr;
1925         int err = 0;
1926
1927         if (copy_from_user(&dr, arg, sizeof(dr)))
1928                 return -EFAULT;
1929
1930         hdev = hci_dev_get(dr.dev_id);
1931         if (!hdev)
1932                 return -ENODEV;
1933
1934         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1935                 err = -EBUSY;
1936                 goto done;
1937         }
1938
1939         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1940                 err = -EOPNOTSUPP;
1941                 goto done;
1942         }
1943
1944         if (hdev->dev_type != HCI_PRIMARY) {
1945                 err = -EOPNOTSUPP;
1946                 goto done;
1947         }
1948
1949         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1950                 err = -EOPNOTSUPP;
1951                 goto done;
1952         }
1953
1954         switch (cmd) {
1955         case HCISETAUTH:
1956                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1957                                    HCI_INIT_TIMEOUT, NULL);
1958                 break;
1959
1960         case HCISETENCRYPT:
1961                 if (!lmp_encrypt_capable(hdev)) {
1962                         err = -EOPNOTSUPP;
1963                         break;
1964                 }
1965
1966                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1967                         /* Auth must be enabled first */
1968                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1969                                            HCI_INIT_TIMEOUT, NULL);
1970                         if (err)
1971                                 break;
1972                 }
1973
1974                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1975                                    HCI_INIT_TIMEOUT, NULL);
1976                 break;
1977
1978         case HCISETSCAN:
1979                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1980                                    HCI_INIT_TIMEOUT, NULL);
1981
1982                 /* Ensure that the connectable and discoverable states
1983                  * get correctly modified as this was a non-mgmt change.
1984                  */
1985                 if (!err)
1986                         hci_update_scan_state(hdev, dr.dev_opt);
1987                 break;
1988
1989         case HCISETLINKPOL:
1990                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1991                                    HCI_INIT_TIMEOUT, NULL);
1992                 break;
1993
1994         case HCISETLINKMODE:
1995                 hdev->link_mode = ((__u16) dr.dev_opt) &
1996                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1997                 break;
1998
1999         case HCISETPTYPE:
2000                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2001                         break;
2002
2003                 hdev->pkt_type = (__u16) dr.dev_opt;
2004                 mgmt_phy_configuration_changed(hdev, NULL);
2005                 break;
2006
2007         case HCISETACLMTU:
2008                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2009                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2010                 break;
2011
2012         case HCISETSCOMTU:
2013                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2014                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2015                 break;
2016
2017         default:
2018                 err = -EINVAL;
2019                 break;
2020         }
2021
2022 done:
2023         hci_dev_put(hdev);
2024         return err;
2025 }
2026
2027 int hci_get_dev_list(void __user *arg)
2028 {
2029         struct hci_dev *hdev;
2030         struct hci_dev_list_req *dl;
2031         struct hci_dev_req *dr;
2032         int n = 0, size, err;
2033         __u16 dev_num;
2034
2035         if (get_user(dev_num, (__u16 __user *) arg))
2036                 return -EFAULT;
2037
2038         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2039                 return -EINVAL;
2040
2041         size = sizeof(*dl) + dev_num * sizeof(*dr);
2042
2043         dl = kzalloc(size, GFP_KERNEL);
2044         if (!dl)
2045                 return -ENOMEM;
2046
2047         dr = dl->dev_req;
2048
2049         read_lock(&hci_dev_list_lock);
2050         list_for_each_entry(hdev, &hci_dev_list, list) {
2051                 unsigned long flags = hdev->flags;
2052
2053                 /* When the auto-off is configured it means the transport
2054                  * is running, but in that case still indicate that the
2055                  * device is actually down.
2056                  */
2057                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2058                         flags &= ~BIT(HCI_UP);
2059
2060                 (dr + n)->dev_id  = hdev->id;
2061                 (dr + n)->dev_opt = flags;
2062
2063                 if (++n >= dev_num)
2064                         break;
2065         }
2066         read_unlock(&hci_dev_list_lock);
2067
2068         dl->dev_num = n;
2069         size = sizeof(*dl) + n * sizeof(*dr);
2070
2071         err = copy_to_user(arg, dl, size);
2072         kfree(dl);
2073
2074         return err ? -EFAULT : 0;
2075 }
2076
2077 int hci_get_dev_info(void __user *arg)
2078 {
2079         struct hci_dev *hdev;
2080         struct hci_dev_info di;
2081         unsigned long flags;
2082         int err = 0;
2083
2084         if (copy_from_user(&di, arg, sizeof(di)))
2085                 return -EFAULT;
2086
2087         hdev = hci_dev_get(di.dev_id);
2088         if (!hdev)
2089                 return -ENODEV;
2090
2091         /* When the auto-off is configured it means the transport
2092          * is running, but in that case still indicate that the
2093          * device is actually down.
2094          */
2095         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2096                 flags = hdev->flags & ~BIT(HCI_UP);
2097         else
2098                 flags = hdev->flags;
2099
2100         strcpy(di.name, hdev->name);
2101         di.bdaddr   = hdev->bdaddr;
2102         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2103         di.flags    = flags;
2104         di.pkt_type = hdev->pkt_type;
2105         if (lmp_bredr_capable(hdev)) {
2106                 di.acl_mtu  = hdev->acl_mtu;
2107                 di.acl_pkts = hdev->acl_pkts;
2108                 di.sco_mtu  = hdev->sco_mtu;
2109                 di.sco_pkts = hdev->sco_pkts;
2110         } else {
2111                 di.acl_mtu  = hdev->le_mtu;
2112                 di.acl_pkts = hdev->le_pkts;
2113                 di.sco_mtu  = 0;
2114                 di.sco_pkts = 0;
2115         }
2116         di.link_policy = hdev->link_policy;
2117         di.link_mode   = hdev->link_mode;
2118
2119         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2120         memcpy(&di.features, &hdev->features, sizeof(di.features));
2121
2122         if (copy_to_user(arg, &di, sizeof(di)))
2123                 err = -EFAULT;
2124
2125         hci_dev_put(hdev);
2126
2127         return err;
2128 }
2129
2130 /* ---- Interface to HCI drivers ---- */
2131
2132 static int hci_rfkill_set_block(void *data, bool blocked)
2133 {
2134         struct hci_dev *hdev = data;
2135
2136         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2137
2138         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2139                 return -EBUSY;
2140
2141         if (blocked) {
2142                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2143                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2144                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2145                         hci_dev_do_close(hdev);
2146         } else {
2147                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2148         }
2149
2150         return 0;
2151 }
2152
2153 static const struct rfkill_ops hci_rfkill_ops = {
2154         .set_block = hci_rfkill_set_block,
2155 };
2156
2157 static void hci_power_on(struct work_struct *work)
2158 {
2159         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2160         int err;
2161
2162         BT_DBG("%s", hdev->name);
2163
2164         if (test_bit(HCI_UP, &hdev->flags) &&
2165             hci_dev_test_flag(hdev, HCI_MGMT) &&
2166             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2167                 cancel_delayed_work(&hdev->power_off);
2168                 hci_req_sync_lock(hdev);
2169                 err = __hci_req_hci_power_on(hdev);
2170                 hci_req_sync_unlock(hdev);
2171                 mgmt_power_on(hdev, err);
2172                 return;
2173         }
2174
2175         err = hci_dev_do_open(hdev);
2176         if (err < 0) {
2177                 hci_dev_lock(hdev);
2178                 mgmt_set_powered_failed(hdev, err);
2179                 hci_dev_unlock(hdev);
2180                 return;
2181         }
2182
2183         /* During the HCI setup phase, a few error conditions are
2184          * ignored and they need to be checked now. If they are still
2185          * valid, it is important to turn the device back off.
2186          */
2187         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2188             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2189             (hdev->dev_type == HCI_PRIMARY &&
2190              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2191              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2192                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2193                 hci_dev_do_close(hdev);
2194         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2195                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2196                                    HCI_AUTO_OFF_TIMEOUT);
2197         }
2198
2199         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2200                 /* For unconfigured devices, set the HCI_RAW flag
2201                  * so that userspace can easily identify them.
2202                  */
2203                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2204                         set_bit(HCI_RAW, &hdev->flags);
2205
2206                 /* For fully configured devices, this will send
2207                  * the Index Added event. For unconfigured devices,
2208                  * it will send Unconfigued Index Added event.
2209                  *
2210                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2211                  * and no event will be send.
2212                  */
2213                 mgmt_index_added(hdev);
2214         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2215                 /* When the controller is now configured, then it
2216                  * is important to clear the HCI_RAW flag.
2217                  */
2218                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2219                         clear_bit(HCI_RAW, &hdev->flags);
2220
2221                 /* Powering on the controller with HCI_CONFIG set only
2222                  * happens with the transition from unconfigured to
2223                  * configured. This will send the Index Added event.
2224                  */
2225                 mgmt_index_added(hdev);
2226         }
2227 }
2228
2229 static void hci_power_off(struct work_struct *work)
2230 {
2231         struct hci_dev *hdev = container_of(work, struct hci_dev,
2232                                             power_off.work);
2233
2234         BT_DBG("%s", hdev->name);
2235
2236         hci_dev_do_close(hdev);
2237 }
2238
2239 static void hci_error_reset(struct work_struct *work)
2240 {
2241         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2242
2243         BT_DBG("%s", hdev->name);
2244
2245         if (hdev->hw_error)
2246                 hdev->hw_error(hdev, hdev->hw_error_code);
2247         else
2248                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2249
2250         if (hci_dev_do_close(hdev))
2251                 return;
2252
2253         hci_dev_do_open(hdev);
2254 }
2255
2256 void hci_uuids_clear(struct hci_dev *hdev)
2257 {
2258         struct bt_uuid *uuid, *tmp;
2259
2260         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2261                 list_del(&uuid->list);
2262                 kfree(uuid);
2263         }
2264 }
2265
2266 void hci_link_keys_clear(struct hci_dev *hdev)
2267 {
2268         struct link_key *key;
2269
2270         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2271                 list_del_rcu(&key->list);
2272                 kfree_rcu(key, rcu);
2273         }
2274 }
2275
2276 void hci_smp_ltks_clear(struct hci_dev *hdev)
2277 {
2278         struct smp_ltk *k;
2279
2280         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2281                 list_del_rcu(&k->list);
2282                 kfree_rcu(k, rcu);
2283         }
2284 }
2285
2286 void hci_smp_irks_clear(struct hci_dev *hdev)
2287 {
2288         struct smp_irk *k;
2289
2290         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2291                 list_del_rcu(&k->list);
2292                 kfree_rcu(k, rcu);
2293         }
2294 }
2295
2296 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2297 {
2298         struct link_key *k;
2299
2300         rcu_read_lock();
2301         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2302                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2303                         rcu_read_unlock();
2304                         return k;
2305                 }
2306         }
2307         rcu_read_unlock();
2308
2309         return NULL;
2310 }
2311
2312 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2313                                u8 key_type, u8 old_key_type)
2314 {
2315         /* Legacy key */
2316         if (key_type < 0x03)
2317                 return true;
2318
2319         /* Debug keys are insecure so don't store them persistently */
2320         if (key_type == HCI_LK_DEBUG_COMBINATION)
2321                 return false;
2322
2323         /* Changed combination key and there's no previous one */
2324         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2325                 return false;
2326
2327         /* Security mode 3 case */
2328         if (!conn)
2329                 return true;
2330
2331         /* BR/EDR key derived using SC from an LE link */
2332         if (conn->type == LE_LINK)
2333                 return true;
2334
2335         /* Neither local nor remote side had no-bonding as requirement */
2336         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2337                 return true;
2338
2339         /* Local side had dedicated bonding as requirement */
2340         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2341                 return true;
2342
2343         /* Remote side had dedicated bonding as requirement */
2344         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2345                 return true;
2346
2347         /* If none of the above criteria match, then don't store the key
2348          * persistently */
2349         return false;
2350 }
2351
2352 static u8 ltk_role(u8 type)
2353 {
2354         if (type == SMP_LTK)
2355                 return HCI_ROLE_MASTER;
2356
2357         return HCI_ROLE_SLAVE;
2358 }
2359
2360 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2361                              u8 addr_type, u8 role)
2362 {
2363         struct smp_ltk *k;
2364
2365         rcu_read_lock();
2366         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2367                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2368                         continue;
2369
2370                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2371                         rcu_read_unlock();
2372                         return k;
2373                 }
2374         }
2375         rcu_read_unlock();
2376
2377         return NULL;
2378 }
2379
2380 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2381 {
2382         struct smp_irk *irk;
2383
2384         rcu_read_lock();
2385         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2386                 if (!bacmp(&irk->rpa, rpa)) {
2387                         rcu_read_unlock();
2388                         return irk;
2389                 }
2390         }
2391
2392         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2393                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2394                         bacpy(&irk->rpa, rpa);
2395                         rcu_read_unlock();
2396                         return irk;
2397                 }
2398         }
2399         rcu_read_unlock();
2400
2401         return NULL;
2402 }
2403
2404 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2405                                      u8 addr_type)
2406 {
2407         struct smp_irk *irk;
2408
2409         /* Identity Address must be public or static random */
2410         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2411                 return NULL;
2412
2413         rcu_read_lock();
2414         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2415                 if (addr_type == irk->addr_type &&
2416                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2417                         rcu_read_unlock();
2418                         return irk;
2419                 }
2420         }
2421         rcu_read_unlock();
2422
2423         return NULL;
2424 }
2425
2426 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2427                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2428                                   u8 pin_len, bool *persistent)
2429 {
2430         struct link_key *key, *old_key;
2431         u8 old_key_type;
2432
2433         old_key = hci_find_link_key(hdev, bdaddr);
2434         if (old_key) {
2435                 old_key_type = old_key->type;
2436                 key = old_key;
2437         } else {
2438                 old_key_type = conn ? conn->key_type : 0xff;
2439                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2440                 if (!key)
2441                         return NULL;
2442                 list_add_rcu(&key->list, &hdev->link_keys);
2443         }
2444
2445         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2446
2447         /* Some buggy controller combinations generate a changed
2448          * combination key for legacy pairing even when there's no
2449          * previous key */
2450         if (type == HCI_LK_CHANGED_COMBINATION &&
2451             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2452                 type = HCI_LK_COMBINATION;
2453                 if (conn)
2454                         conn->key_type = type;
2455         }
2456
2457         bacpy(&key->bdaddr, bdaddr);
2458         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2459         key->pin_len = pin_len;
2460
2461         if (type == HCI_LK_CHANGED_COMBINATION)
2462                 key->type = old_key_type;
2463         else
2464                 key->type = type;
2465
2466         if (persistent)
2467                 *persistent = hci_persistent_key(hdev, conn, type,
2468                                                  old_key_type);
2469
2470         return key;
2471 }
2472
2473 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2474                             u8 addr_type, u8 type, u8 authenticated,
2475                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2476 {
2477         struct smp_ltk *key, *old_key;
2478         u8 role = ltk_role(type);
2479
2480         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2481         if (old_key)
2482                 key = old_key;
2483         else {
2484                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2485                 if (!key)
2486                         return NULL;
2487                 list_add_rcu(&key->list, &hdev->long_term_keys);
2488         }
2489
2490         bacpy(&key->bdaddr, bdaddr);
2491         key->bdaddr_type = addr_type;
2492         memcpy(key->val, tk, sizeof(key->val));
2493         key->authenticated = authenticated;
2494         key->ediv = ediv;
2495         key->rand = rand;
2496         key->enc_size = enc_size;
2497         key->type = type;
2498
2499         return key;
2500 }
2501
2502 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2503                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2504 {
2505         struct smp_irk *irk;
2506
2507         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2508         if (!irk) {
2509                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2510                 if (!irk)
2511                         return NULL;
2512
2513                 bacpy(&irk->bdaddr, bdaddr);
2514                 irk->addr_type = addr_type;
2515
2516                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2517         }
2518
2519         memcpy(irk->val, val, 16);
2520         bacpy(&irk->rpa, rpa);
2521
2522         return irk;
2523 }
2524
2525 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2526 {
2527         struct link_key *key;
2528
2529         key = hci_find_link_key(hdev, bdaddr);
2530         if (!key)
2531                 return -ENOENT;
2532
2533         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2534
2535         list_del_rcu(&key->list);
2536         kfree_rcu(key, rcu);
2537
2538         return 0;
2539 }
2540
2541 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2542 {
2543         struct smp_ltk *k;
2544         int removed = 0;
2545
2546         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2547                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2548                         continue;
2549
2550                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2551
2552                 list_del_rcu(&k->list);
2553                 kfree_rcu(k, rcu);
2554                 removed++;
2555         }
2556
2557         return removed ? 0 : -ENOENT;
2558 }
2559
2560 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2561 {
2562         struct smp_irk *k;
2563
2564         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2565                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2566                         continue;
2567
2568                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2569
2570                 list_del_rcu(&k->list);
2571                 kfree_rcu(k, rcu);
2572         }
2573 }
2574
2575 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2576 {
2577         struct smp_ltk *k;
2578         struct smp_irk *irk;
2579         u8 addr_type;
2580
2581         if (type == BDADDR_BREDR) {
2582                 if (hci_find_link_key(hdev, bdaddr))
2583                         return true;
2584                 return false;
2585         }
2586
2587         /* Convert to HCI addr type which struct smp_ltk uses */
2588         if (type == BDADDR_LE_PUBLIC)
2589                 addr_type = ADDR_LE_DEV_PUBLIC;
2590         else
2591                 addr_type = ADDR_LE_DEV_RANDOM;
2592
2593         irk = hci_get_irk(hdev, bdaddr, addr_type);
2594         if (irk) {
2595                 bdaddr = &irk->bdaddr;
2596                 addr_type = irk->addr_type;
2597         }
2598
2599         rcu_read_lock();
2600         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2601                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2602                         rcu_read_unlock();
2603                         return true;
2604                 }
2605         }
2606         rcu_read_unlock();
2607
2608         return false;
2609 }
2610
2611 /* HCI command timer function */
2612 static void hci_cmd_timeout(struct work_struct *work)
2613 {
2614         struct hci_dev *hdev = container_of(work, struct hci_dev,
2615                                             cmd_timer.work);
2616
2617         if (hdev->sent_cmd) {
2618                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2619                 u16 opcode = __le16_to_cpu(sent->opcode);
2620
2621                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2622         } else {
2623                 bt_dev_err(hdev, "command tx timeout");
2624         }
2625
2626         atomic_set(&hdev->cmd_cnt, 1);
2627         queue_work(hdev->workqueue, &hdev->cmd_work);
2628 }
2629
2630 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2631                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2632 {
2633         struct oob_data *data;
2634
2635         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2636                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2637                         continue;
2638                 if (data->bdaddr_type != bdaddr_type)
2639                         continue;
2640                 return data;
2641         }
2642
2643         return NULL;
2644 }
2645
2646 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2647                                u8 bdaddr_type)
2648 {
2649         struct oob_data *data;
2650
2651         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2652         if (!data)
2653                 return -ENOENT;
2654
2655         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2656
2657         list_del(&data->list);
2658         kfree(data);
2659
2660         return 0;
2661 }
2662
2663 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2664 {
2665         struct oob_data *data, *n;
2666
2667         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2668                 list_del(&data->list);
2669                 kfree(data);
2670         }
2671 }
2672
2673 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2674                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2675                             u8 *hash256, u8 *rand256)
2676 {
2677         struct oob_data *data;
2678
2679         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2680         if (!data) {
2681                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2682                 if (!data)
2683                         return -ENOMEM;
2684
2685                 bacpy(&data->bdaddr, bdaddr);
2686                 data->bdaddr_type = bdaddr_type;
2687                 list_add(&data->list, &hdev->remote_oob_data);
2688         }
2689
2690         if (hash192 && rand192) {
2691                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2692                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2693                 if (hash256 && rand256)
2694                         data->present = 0x03;
2695         } else {
2696                 memset(data->hash192, 0, sizeof(data->hash192));
2697                 memset(data->rand192, 0, sizeof(data->rand192));
2698                 if (hash256 && rand256)
2699                         data->present = 0x02;
2700                 else
2701                         data->present = 0x00;
2702         }
2703
2704         if (hash256 && rand256) {
2705                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2706                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2707         } else {
2708                 memset(data->hash256, 0, sizeof(data->hash256));
2709                 memset(data->rand256, 0, sizeof(data->rand256));
2710                 if (hash192 && rand192)
2711                         data->present = 0x01;
2712         }
2713
2714         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2715
2716         return 0;
2717 }
2718
2719 /* This function requires the caller holds hdev->lock */
2720 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2721 {
2722         struct adv_info *adv_instance;
2723
2724         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2725                 if (adv_instance->instance == instance)
2726                         return adv_instance;
2727         }
2728
2729         return NULL;
2730 }
2731
2732 /* This function requires the caller holds hdev->lock */
2733 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2734 {
2735         struct adv_info *cur_instance;
2736
2737         cur_instance = hci_find_adv_instance(hdev, instance);
2738         if (!cur_instance)
2739                 return NULL;
2740
2741         if (cur_instance == list_last_entry(&hdev->adv_instances,
2742                                             struct adv_info, list))
2743                 return list_first_entry(&hdev->adv_instances,
2744                                                  struct adv_info, list);
2745         else
2746                 return list_next_entry(cur_instance, list);
2747 }
2748
2749 /* This function requires the caller holds hdev->lock */
2750 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2751 {
2752         struct adv_info *adv_instance;
2753
2754         adv_instance = hci_find_adv_instance(hdev, instance);
2755         if (!adv_instance)
2756                 return -ENOENT;
2757
2758         BT_DBG("%s removing %dMR", hdev->name, instance);
2759
2760         if (hdev->cur_adv_instance == instance) {
2761                 if (hdev->adv_instance_timeout) {
2762                         cancel_delayed_work(&hdev->adv_instance_expire);
2763                         hdev->adv_instance_timeout = 0;
2764                 }
2765                 hdev->cur_adv_instance = 0x00;
2766         }
2767
2768         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2769
2770         list_del(&adv_instance->list);
2771         kfree(adv_instance);
2772
2773         hdev->adv_instance_cnt--;
2774
2775         return 0;
2776 }
2777
2778 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2779 {
2780         struct adv_info *adv_instance, *n;
2781
2782         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2783                 adv_instance->rpa_expired = rpa_expired;
2784 }
2785
2786 /* This function requires the caller holds hdev->lock */
2787 void hci_adv_instances_clear(struct hci_dev *hdev)
2788 {
2789         struct adv_info *adv_instance, *n;
2790
2791         if (hdev->adv_instance_timeout) {
2792                 cancel_delayed_work(&hdev->adv_instance_expire);
2793                 hdev->adv_instance_timeout = 0;
2794         }
2795
2796         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2797                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2798                 list_del(&adv_instance->list);
2799                 kfree(adv_instance);
2800         }
2801
2802         hdev->adv_instance_cnt = 0;
2803         hdev->cur_adv_instance = 0x00;
2804 }
2805
2806 static void adv_instance_rpa_expired(struct work_struct *work)
2807 {
2808         struct adv_info *adv_instance = container_of(work, struct adv_info,
2809                                                      rpa_expired_cb.work);
2810
2811         BT_DBG("");
2812
2813         adv_instance->rpa_expired = true;
2814 }
2815
2816 /* This function requires the caller holds hdev->lock */
2817 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2818                          u16 adv_data_len, u8 *adv_data,
2819                          u16 scan_rsp_len, u8 *scan_rsp_data,
2820                          u16 timeout, u16 duration)
2821 {
2822         struct adv_info *adv_instance;
2823
2824         adv_instance = hci_find_adv_instance(hdev, instance);
2825         if (adv_instance) {
2826                 memset(adv_instance->adv_data, 0,
2827                        sizeof(adv_instance->adv_data));
2828                 memset(adv_instance->scan_rsp_data, 0,
2829                        sizeof(adv_instance->scan_rsp_data));
2830         } else {
2831                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2832                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2833                         return -EOVERFLOW;
2834
2835                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2836                 if (!adv_instance)
2837                         return -ENOMEM;
2838
2839                 adv_instance->pending = true;
2840                 adv_instance->instance = instance;
2841                 list_add(&adv_instance->list, &hdev->adv_instances);
2842                 hdev->adv_instance_cnt++;
2843         }
2844
2845         adv_instance->flags = flags;
2846         adv_instance->adv_data_len = adv_data_len;
2847         adv_instance->scan_rsp_len = scan_rsp_len;
2848
2849         if (adv_data_len)
2850                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2851
2852         if (scan_rsp_len)
2853                 memcpy(adv_instance->scan_rsp_data,
2854                        scan_rsp_data, scan_rsp_len);
2855
2856         adv_instance->timeout = timeout;
2857         adv_instance->remaining_time = timeout;
2858
2859         if (duration == 0)
2860                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2861         else
2862                 adv_instance->duration = duration;
2863
2864         adv_instance->tx_power = HCI_TX_POWER_INVALID;
2865
2866         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2867                           adv_instance_rpa_expired);
2868
2869         BT_DBG("%s for %dMR", hdev->name, instance);
2870
2871         return 0;
2872 }
2873
2874 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2875                                          bdaddr_t *bdaddr, u8 type)
2876 {
2877         struct bdaddr_list *b;
2878
2879         list_for_each_entry(b, bdaddr_list, list) {
2880                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2881                         return b;
2882         }
2883
2884         return NULL;
2885 }
2886
2887 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2888 {
2889         struct bdaddr_list *b, *n;
2890
2891         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2892                 list_del(&b->list);
2893                 kfree(b);
2894         }
2895 }
2896
2897 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2898 {
2899         struct bdaddr_list *entry;
2900
2901         if (!bacmp(bdaddr, BDADDR_ANY))
2902                 return -EBADF;
2903
2904         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2905                 return -EEXIST;
2906
2907         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2908         if (!entry)
2909                 return -ENOMEM;
2910
2911         bacpy(&entry->bdaddr, bdaddr);
2912         entry->bdaddr_type = type;
2913
2914         list_add(&entry->list, list);
2915
2916         return 0;
2917 }
2918
2919 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2920 {
2921         struct bdaddr_list *entry;
2922
2923         if (!bacmp(bdaddr, BDADDR_ANY)) {
2924                 hci_bdaddr_list_clear(list);
2925                 return 0;
2926         }
2927
2928         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2929         if (!entry)
2930                 return -ENOENT;
2931
2932         list_del(&entry->list);
2933         kfree(entry);
2934
2935         return 0;
2936 }
2937
2938 /* This function requires the caller holds hdev->lock */
2939 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2940                                                bdaddr_t *addr, u8 addr_type)
2941 {
2942         struct hci_conn_params *params;
2943
2944         list_for_each_entry(params, &hdev->le_conn_params, list) {
2945                 if (bacmp(&params->addr, addr) == 0 &&
2946                     params->addr_type == addr_type) {
2947                         return params;
2948                 }
2949         }
2950
2951         return NULL;
2952 }
2953
2954 /* This function requires the caller holds hdev->lock */
2955 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2956                                                   bdaddr_t *addr, u8 addr_type)
2957 {
2958         struct hci_conn_params *param;
2959
2960         list_for_each_entry(param, list, action) {
2961                 if (bacmp(&param->addr, addr) == 0 &&
2962                     param->addr_type == addr_type)
2963                         return param;
2964         }
2965
2966         return NULL;
2967 }
2968
2969 /* This function requires the caller holds hdev->lock */
2970 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2971                                             bdaddr_t *addr, u8 addr_type)
2972 {
2973         struct hci_conn_params *params;
2974
2975         params = hci_conn_params_lookup(hdev, addr, addr_type);
2976         if (params)
2977                 return params;
2978
2979         params = kzalloc(sizeof(*params), GFP_KERNEL);
2980         if (!params) {
2981                 bt_dev_err(hdev, "out of memory");
2982                 return NULL;
2983         }
2984
2985         bacpy(&params->addr, addr);
2986         params->addr_type = addr_type;
2987
2988         list_add(&params->list, &hdev->le_conn_params);
2989         INIT_LIST_HEAD(&params->action);
2990
2991         params->conn_min_interval = hdev->le_conn_min_interval;
2992         params->conn_max_interval = hdev->le_conn_max_interval;
2993         params->conn_latency = hdev->le_conn_latency;
2994         params->supervision_timeout = hdev->le_supv_timeout;
2995         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2996
2997         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2998
2999         return params;
3000 }
3001
3002 static void hci_conn_params_free(struct hci_conn_params *params)
3003 {
3004         if (params->conn) {
3005                 hci_conn_drop(params->conn);
3006                 hci_conn_put(params->conn);
3007         }
3008
3009         list_del(&params->action);
3010         list_del(&params->list);
3011         kfree(params);
3012 }
3013
3014 /* This function requires the caller holds hdev->lock */
3015 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3016 {
3017         struct hci_conn_params *params;
3018
3019         params = hci_conn_params_lookup(hdev, addr, addr_type);
3020         if (!params)
3021                 return;
3022
3023         hci_conn_params_free(params);
3024
3025         hci_update_background_scan(hdev);
3026
3027         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3028 }
3029
3030 /* This function requires the caller holds hdev->lock */
3031 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3032 {
3033         struct hci_conn_params *params, *tmp;
3034
3035         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3036                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3037                         continue;
3038
3039                 /* If trying to estabilish one time connection to disabled
3040                  * device, leave the params, but mark them as just once.
3041                  */
3042                 if (params->explicit_connect) {
3043                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3044                         continue;
3045                 }
3046
3047                 list_del(&params->list);
3048                 kfree(params);
3049         }
3050
3051         BT_DBG("All LE disabled connection parameters were removed");
3052 }
3053
3054 /* This function requires the caller holds hdev->lock */
3055 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3056 {
3057         struct hci_conn_params *params, *tmp;
3058
3059         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3060                 hci_conn_params_free(params);
3061
3062         BT_DBG("All LE connection parameters were removed");
3063 }
3064
3065 /* Copy the Identity Address of the controller.
3066  *
3067  * If the controller has a public BD_ADDR, then by default use that one.
3068  * If this is a LE only controller without a public address, default to
3069  * the static random address.
3070  *
3071  * For debugging purposes it is possible to force controllers with a
3072  * public address to use the static random address instead.
3073  *
3074  * In case BR/EDR has been disabled on a dual-mode controller and
3075  * userspace has configured a static address, then that address
3076  * becomes the identity address instead of the public BR/EDR address.
3077  */
3078 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3079                                u8 *bdaddr_type)
3080 {
3081         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3082             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3083             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3084              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3085                 bacpy(bdaddr, &hdev->static_addr);
3086                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3087         } else {
3088                 bacpy(bdaddr, &hdev->bdaddr);
3089                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3090         }
3091 }
3092
3093 /* Alloc HCI device */
3094 struct hci_dev *hci_alloc_dev(void)
3095 {
3096         struct hci_dev *hdev;
3097
3098         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3099         if (!hdev)
3100                 return NULL;
3101
3102         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3103         hdev->esco_type = (ESCO_HV1);
3104         hdev->link_mode = (HCI_LM_ACCEPT);
3105         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3106         hdev->io_capability = 0x03;     /* No Input No Output */
3107         hdev->manufacturer = 0xffff;    /* Default to internal use */
3108         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3109         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3110         hdev->adv_instance_cnt = 0;
3111         hdev->cur_adv_instance = 0x00;
3112         hdev->adv_instance_timeout = 0;
3113
3114         hdev->sniff_max_interval = 800;
3115         hdev->sniff_min_interval = 80;
3116
3117         hdev->le_adv_channel_map = 0x07;
3118         hdev->le_adv_min_interval = 0x0800;
3119         hdev->le_adv_max_interval = 0x0800;
3120 #ifdef TIZEN_BT
3121         hdev->adv_filter_policy = 0x00;
3122         hdev->adv_type = 0x00;
3123 #endif
3124         hdev->le_scan_interval = 0x0060;
3125         hdev->le_scan_window = 0x0030;
3126         hdev->le_conn_min_interval = 0x0018;
3127         hdev->le_conn_max_interval = 0x0028;
3128         hdev->le_conn_latency = 0x0000;
3129         hdev->le_supv_timeout = 0x002a;
3130         hdev->le_def_tx_len = 0x001b;
3131         hdev->le_def_tx_time = 0x0148;
3132         hdev->le_max_tx_len = 0x001b;
3133         hdev->le_max_tx_time = 0x0148;
3134         hdev->le_max_rx_len = 0x001b;
3135         hdev->le_max_rx_time = 0x0148;
3136         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3137         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3138         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3139         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3140
3141         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3142         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3143         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3144         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3145
3146         mutex_init(&hdev->lock);
3147         mutex_init(&hdev->req_lock);
3148
3149         INIT_LIST_HEAD(&hdev->mgmt_pending);
3150         INIT_LIST_HEAD(&hdev->blacklist);
3151         INIT_LIST_HEAD(&hdev->whitelist);
3152         INIT_LIST_HEAD(&hdev->uuids);
3153         INIT_LIST_HEAD(&hdev->link_keys);
3154         INIT_LIST_HEAD(&hdev->long_term_keys);
3155         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3156         INIT_LIST_HEAD(&hdev->remote_oob_data);
3157         INIT_LIST_HEAD(&hdev->le_white_list);
3158         INIT_LIST_HEAD(&hdev->le_resolv_list);
3159         INIT_LIST_HEAD(&hdev->le_conn_params);
3160         INIT_LIST_HEAD(&hdev->pend_le_conns);
3161         INIT_LIST_HEAD(&hdev->pend_le_reports);
3162         INIT_LIST_HEAD(&hdev->conn_hash.list);
3163         INIT_LIST_HEAD(&hdev->adv_instances);
3164
3165         INIT_WORK(&hdev->rx_work, hci_rx_work);
3166         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3167         INIT_WORK(&hdev->tx_work, hci_tx_work);
3168         INIT_WORK(&hdev->power_on, hci_power_on);
3169         INIT_WORK(&hdev->error_reset, hci_error_reset);
3170
3171         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3172
3173         skb_queue_head_init(&hdev->rx_q);
3174         skb_queue_head_init(&hdev->cmd_q);
3175         skb_queue_head_init(&hdev->raw_q);
3176
3177         init_waitqueue_head(&hdev->req_wait_q);
3178
3179         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3180
3181         hci_request_setup(hdev);
3182
3183         hci_init_sysfs(hdev);
3184         discovery_init(hdev);
3185
3186         return hdev;
3187 }
3188 EXPORT_SYMBOL(hci_alloc_dev);
3189
3190 /* Free HCI device */
3191 void hci_free_dev(struct hci_dev *hdev)
3192 {
3193         /* will free via device release */
3194         put_device(&hdev->dev);
3195 }
3196 EXPORT_SYMBOL(hci_free_dev);
3197
3198 /* Register HCI device */
3199 int hci_register_dev(struct hci_dev *hdev)
3200 {
3201         int id, error;
3202
3203         if (!hdev->open || !hdev->close || !hdev->send)
3204                 return -EINVAL;
3205
3206         /* Do not allow HCI_AMP devices to register at index 0,
3207          * so the index can be used as the AMP controller ID.
3208          */
3209         switch (hdev->dev_type) {
3210         case HCI_PRIMARY:
3211                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3212                 break;
3213         case HCI_AMP:
3214                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3215                 break;
3216         default:
3217                 return -EINVAL;
3218         }
3219
3220         if (id < 0)
3221                 return id;
3222
3223         sprintf(hdev->name, "hci%d", id);
3224         hdev->id = id;
3225
3226         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3227
3228         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3229         if (!hdev->workqueue) {
3230                 error = -ENOMEM;
3231                 goto err;
3232         }
3233
3234         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3235                                                       hdev->name);
3236         if (!hdev->req_workqueue) {
3237                 destroy_workqueue(hdev->workqueue);
3238                 error = -ENOMEM;
3239                 goto err;
3240         }
3241
3242         if (!IS_ERR_OR_NULL(bt_debugfs))
3243                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3244
3245         dev_set_name(&hdev->dev, "%s", hdev->name);
3246
3247         error = device_add(&hdev->dev);
3248         if (error < 0)
3249                 goto err_wqueue;
3250
3251         hci_leds_init(hdev);
3252
3253         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3254                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3255                                     hdev);
3256         if (hdev->rfkill) {
3257                 if (rfkill_register(hdev->rfkill) < 0) {
3258                         rfkill_destroy(hdev->rfkill);
3259                         hdev->rfkill = NULL;
3260                 }
3261         }
3262
3263         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3264                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3265
3266         hci_dev_set_flag(hdev, HCI_SETUP);
3267         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3268
3269         if (hdev->dev_type == HCI_PRIMARY) {
3270                 /* Assume BR/EDR support until proven otherwise (such as
3271                  * through reading supported features during init.
3272                  */
3273                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3274         }
3275
3276         write_lock(&hci_dev_list_lock);
3277         list_add(&hdev->list, &hci_dev_list);
3278         write_unlock(&hci_dev_list_lock);
3279
3280         /* Devices that are marked for raw-only usage are unconfigured
3281          * and should not be included in normal operation.
3282          */
3283         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3284                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3285
3286         hci_sock_dev_event(hdev, HCI_DEV_REG);
3287         hci_dev_hold(hdev);
3288
3289         queue_work(hdev->req_workqueue, &hdev->power_on);
3290
3291         return id;
3292
3293 err_wqueue:
3294         destroy_workqueue(hdev->workqueue);
3295         destroy_workqueue(hdev->req_workqueue);
3296 err:
3297         ida_simple_remove(&hci_index_ida, hdev->id);
3298
3299         return error;
3300 }
3301 EXPORT_SYMBOL(hci_register_dev);
3302
3303 /* Unregister HCI device */
3304 void hci_unregister_dev(struct hci_dev *hdev)
3305 {
3306         int id;
3307
3308         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3309
3310         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3311
3312         id = hdev->id;
3313
3314         write_lock(&hci_dev_list_lock);
3315         list_del(&hdev->list);
3316         write_unlock(&hci_dev_list_lock);
3317
3318         cancel_work_sync(&hdev->power_on);
3319
3320         hci_dev_do_close(hdev);
3321
3322         if (!test_bit(HCI_INIT, &hdev->flags) &&
3323             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3324             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3325                 hci_dev_lock(hdev);
3326                 mgmt_index_removed(hdev);
3327                 hci_dev_unlock(hdev);
3328         }
3329
3330         /* mgmt_index_removed should take care of emptying the
3331          * pending list */
3332         BUG_ON(!list_empty(&hdev->mgmt_pending));
3333
3334         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3335
3336         if (hdev->rfkill) {
3337                 rfkill_unregister(hdev->rfkill);
3338                 rfkill_destroy(hdev->rfkill);
3339         }
3340
3341         device_del(&hdev->dev);
3342
3343         debugfs_remove_recursive(hdev->debugfs);
3344         kfree_const(hdev->hw_info);
3345         kfree_const(hdev->fw_info);
3346
3347         destroy_workqueue(hdev->workqueue);
3348         destroy_workqueue(hdev->req_workqueue);
3349
3350         hci_dev_lock(hdev);
3351         hci_bdaddr_list_clear(&hdev->blacklist);
3352         hci_bdaddr_list_clear(&hdev->whitelist);
3353         hci_uuids_clear(hdev);
3354         hci_link_keys_clear(hdev);
3355         hci_smp_ltks_clear(hdev);
3356         hci_smp_irks_clear(hdev);
3357         hci_remote_oob_data_clear(hdev);
3358         hci_adv_instances_clear(hdev);
3359         hci_bdaddr_list_clear(&hdev->le_white_list);
3360         hci_bdaddr_list_clear(&hdev->le_resolv_list);
3361         hci_conn_params_clear_all(hdev);
3362         hci_discovery_filter_clear(hdev);
3363         hci_dev_unlock(hdev);
3364
3365         hci_dev_put(hdev);
3366
3367         ida_simple_remove(&hci_index_ida, id);
3368 }
3369 EXPORT_SYMBOL(hci_unregister_dev);
3370
3371 /* Suspend HCI device */
3372 int hci_suspend_dev(struct hci_dev *hdev)
3373 {
3374         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3375         return 0;
3376 }
3377 EXPORT_SYMBOL(hci_suspend_dev);
3378
3379 /* Resume HCI device */
3380 int hci_resume_dev(struct hci_dev *hdev)
3381 {
3382         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3383         return 0;
3384 }
3385 EXPORT_SYMBOL(hci_resume_dev);
3386
3387 /* Reset HCI device */
3388 int hci_reset_dev(struct hci_dev *hdev)
3389 {
3390         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3391         struct sk_buff *skb;
3392
3393         skb = bt_skb_alloc(3, GFP_ATOMIC);
3394         if (!skb)
3395                 return -ENOMEM;
3396
3397         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3398         skb_put_data(skb, hw_err, 3);
3399
3400         /* Send Hardware Error to upper stack */
3401         return hci_recv_frame(hdev, skb);
3402 }
3403 EXPORT_SYMBOL(hci_reset_dev);
3404
3405 /* Receive frame from HCI drivers */
3406 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3407 {
3408         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3409                       && !test_bit(HCI_INIT, &hdev->flags))) {
3410                 kfree_skb(skb);
3411                 return -ENXIO;
3412         }
3413
3414         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3415             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3416             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3417                 kfree_skb(skb);
3418                 return -EINVAL;
3419         }
3420
3421         /* Incoming skb */
3422         bt_cb(skb)->incoming = 1;
3423
3424         /* Time stamp */
3425         __net_timestamp(skb);
3426
3427         skb_queue_tail(&hdev->rx_q, skb);
3428         queue_work(hdev->workqueue, &hdev->rx_work);
3429
3430         return 0;
3431 }
3432 EXPORT_SYMBOL(hci_recv_frame);
3433
3434 /* Receive diagnostic message from HCI drivers */
3435 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3436 {
3437         /* Mark as diagnostic packet */
3438         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3439
3440         /* Time stamp */
3441         __net_timestamp(skb);
3442
3443         skb_queue_tail(&hdev->rx_q, skb);
3444         queue_work(hdev->workqueue, &hdev->rx_work);
3445
3446         return 0;
3447 }
3448 EXPORT_SYMBOL(hci_recv_diag);
3449
3450 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3451 {
3452         va_list vargs;
3453
3454         va_start(vargs, fmt);
3455         kfree_const(hdev->hw_info);
3456         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3457         va_end(vargs);
3458 }
3459 EXPORT_SYMBOL(hci_set_hw_info);
3460
3461 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3462 {
3463         va_list vargs;
3464
3465         va_start(vargs, fmt);
3466         kfree_const(hdev->fw_info);
3467         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3468         va_end(vargs);
3469 }
3470 EXPORT_SYMBOL(hci_set_fw_info);
3471
3472 /* ---- Interface to upper protocols ---- */
3473
3474 int hci_register_cb(struct hci_cb *cb)
3475 {
3476         BT_DBG("%p name %s", cb, cb->name);
3477
3478         mutex_lock(&hci_cb_list_lock);
3479         list_add_tail(&cb->list, &hci_cb_list);
3480         mutex_unlock(&hci_cb_list_lock);
3481
3482         return 0;
3483 }
3484 EXPORT_SYMBOL(hci_register_cb);
3485
3486 int hci_unregister_cb(struct hci_cb *cb)
3487 {
3488         BT_DBG("%p name %s", cb, cb->name);
3489
3490         mutex_lock(&hci_cb_list_lock);
3491         list_del(&cb->list);
3492         mutex_unlock(&hci_cb_list_lock);
3493
3494         return 0;
3495 }
3496 EXPORT_SYMBOL(hci_unregister_cb);
3497
3498 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3499 {
3500         int err;
3501
3502         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3503                skb->len);
3504
3505         /* Time stamp */
3506         __net_timestamp(skb);
3507
3508         /* Send copy to monitor */
3509         hci_send_to_monitor(hdev, skb);
3510
3511         if (atomic_read(&hdev->promisc)) {
3512                 /* Send copy to the sockets */
3513                 hci_send_to_sock(hdev, skb);
3514         }
3515
3516         /* Get rid of skb owner, prior to sending to the driver. */
3517         skb_orphan(skb);
3518
3519         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3520                 kfree_skb(skb);
3521                 return;
3522         }
3523
3524         err = hdev->send(hdev, skb);
3525         if (err < 0) {
3526                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3527                 kfree_skb(skb);
3528         }
3529 }
3530
3531 /* Send HCI command */
3532 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3533                  const void *param)
3534 {
3535         struct sk_buff *skb;
3536
3537         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3538
3539         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3540         if (!skb) {
3541                 bt_dev_err(hdev, "no memory for command");
3542                 return -ENOMEM;
3543         }
3544
3545         /* Stand-alone HCI commands must be flagged as
3546          * single-command requests.
3547          */
3548         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3549
3550         skb_queue_tail(&hdev->cmd_q, skb);
3551         queue_work(hdev->workqueue, &hdev->cmd_work);
3552
3553         return 0;
3554 }
3555
3556 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3557                    const void *param)
3558 {
3559         struct sk_buff *skb;
3560
3561         if (hci_opcode_ogf(opcode) != 0x3f) {
3562                 /* A controller receiving a command shall respond with either
3563                  * a Command Status Event or a Command Complete Event.
3564                  * Therefore, all standard HCI commands must be sent via the
3565                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3566                  * Some vendors do not comply with this rule for vendor-specific
3567                  * commands and do not return any event. We want to support
3568                  * unresponded commands for such cases only.
3569                  */
3570                 bt_dev_err(hdev, "unresponded command not supported");
3571                 return -EINVAL;
3572         }
3573
3574         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3575         if (!skb) {
3576                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3577                            opcode);
3578                 return -ENOMEM;
3579         }
3580
3581         hci_send_frame(hdev, skb);
3582
3583         return 0;
3584 }
3585 EXPORT_SYMBOL(__hci_cmd_send);
3586
3587 /* Get data from the previously sent command */
3588 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3589 {
3590         struct hci_command_hdr *hdr;
3591
3592         if (!hdev->sent_cmd)
3593                 return NULL;
3594
3595         hdr = (void *) hdev->sent_cmd->data;
3596
3597         if (hdr->opcode != cpu_to_le16(opcode))
3598                 return NULL;
3599
3600         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3601
3602         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3603 }
3604
3605 /* Send HCI command and wait for command commplete event */
3606 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3607                              const void *param, u32 timeout)
3608 {
3609         struct sk_buff *skb;
3610
3611         if (!test_bit(HCI_UP, &hdev->flags))
3612                 return ERR_PTR(-ENETDOWN);
3613
3614         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3615
3616         hci_req_sync_lock(hdev);
3617         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3618         hci_req_sync_unlock(hdev);
3619
3620         return skb;
3621 }
3622 EXPORT_SYMBOL(hci_cmd_sync);
3623
3624 /* Send ACL data */
3625 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3626 {
3627         struct hci_acl_hdr *hdr;
3628         int len = skb->len;
3629
3630         skb_push(skb, HCI_ACL_HDR_SIZE);
3631         skb_reset_transport_header(skb);
3632         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3633         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3634         hdr->dlen   = cpu_to_le16(len);
3635 }
3636
3637 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3638                           struct sk_buff *skb, __u16 flags)
3639 {
3640         struct hci_conn *conn = chan->conn;
3641         struct hci_dev *hdev = conn->hdev;
3642         struct sk_buff *list;
3643
3644         skb->len = skb_headlen(skb);
3645         skb->data_len = 0;
3646
3647         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3648
3649         switch (hdev->dev_type) {
3650         case HCI_PRIMARY:
3651                 hci_add_acl_hdr(skb, conn->handle, flags);
3652                 break;
3653         case HCI_AMP:
3654                 hci_add_acl_hdr(skb, chan->handle, flags);
3655                 break;
3656         default:
3657                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3658                 return;
3659         }
3660
3661         list = skb_shinfo(skb)->frag_list;
3662         if (!list) {
3663                 /* Non fragmented */
3664                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3665
3666                 skb_queue_tail(queue, skb);
3667         } else {
3668                 /* Fragmented */
3669                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3670
3671                 skb_shinfo(skb)->frag_list = NULL;
3672
3673                 /* Queue all fragments atomically. We need to use spin_lock_bh
3674                  * here because of 6LoWPAN links, as there this function is
3675                  * called from softirq and using normal spin lock could cause
3676                  * deadlocks.
3677                  */
3678                 spin_lock_bh(&queue->lock);
3679
3680                 __skb_queue_tail(queue, skb);
3681
3682                 flags &= ~ACL_START;
3683                 flags |= ACL_CONT;
3684                 do {
3685                         skb = list; list = list->next;
3686
3687                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3688                         hci_add_acl_hdr(skb, conn->handle, flags);
3689
3690                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3691
3692                         __skb_queue_tail(queue, skb);
3693                 } while (list);
3694
3695                 spin_unlock_bh(&queue->lock);
3696         }
3697 }
3698
3699 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3700 {
3701         struct hci_dev *hdev = chan->conn->hdev;
3702
3703         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3704
3705         hci_queue_acl(chan, &chan->data_q, skb, flags);
3706
3707         queue_work(hdev->workqueue, &hdev->tx_work);
3708 }
3709
3710 /* Send SCO data */
3711 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3712 {
3713         struct hci_dev *hdev = conn->hdev;
3714         struct hci_sco_hdr hdr;
3715
3716         BT_DBG("%s len %d", hdev->name, skb->len);
3717
3718         hdr.handle = cpu_to_le16(conn->handle);
3719         hdr.dlen   = skb->len;
3720
3721         skb_push(skb, HCI_SCO_HDR_SIZE);
3722         skb_reset_transport_header(skb);
3723         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3724
3725         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3726
3727         skb_queue_tail(&conn->data_q, skb);
3728         queue_work(hdev->workqueue, &hdev->tx_work);
3729 }
3730
3731 /* ---- HCI TX task (outgoing data) ---- */
3732
3733 /* HCI Connection scheduler */
3734 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3735                                      int *quote)
3736 {
3737         struct hci_conn_hash *h = &hdev->conn_hash;
3738         struct hci_conn *conn = NULL, *c;
3739         unsigned int num = 0, min = ~0;
3740
3741         /* We don't have to lock device here. Connections are always
3742          * added and removed with TX task disabled. */
3743
3744         rcu_read_lock();
3745
3746         list_for_each_entry_rcu(c, &h->list, list) {
3747                 if (c->type != type || skb_queue_empty(&c->data_q))
3748                         continue;
3749
3750                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3751                         continue;
3752
3753                 num++;
3754
3755                 if (c->sent < min) {
3756                         min  = c->sent;
3757                         conn = c;
3758                 }
3759
3760                 if (hci_conn_num(hdev, type) == num)
3761                         break;
3762         }
3763
3764         rcu_read_unlock();
3765
3766         if (conn) {
3767                 int cnt, q;
3768
3769                 switch (conn->type) {
3770                 case ACL_LINK:
3771                         cnt = hdev->acl_cnt;
3772                         break;
3773                 case SCO_LINK:
3774                 case ESCO_LINK:
3775                         cnt = hdev->sco_cnt;
3776                         break;
3777                 case LE_LINK:
3778                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3779                         break;
3780                 default:
3781                         cnt = 0;
3782                         bt_dev_err(hdev, "unknown link type %d", conn->type);
3783                 }
3784
3785                 q = cnt / num;
3786                 *quote = q ? q : 1;
3787         } else
3788                 *quote = 0;
3789
3790         BT_DBG("conn %p quote %d", conn, *quote);
3791         return conn;
3792 }
3793
3794 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3795 {
3796         struct hci_conn_hash *h = &hdev->conn_hash;
3797         struct hci_conn *c;
3798
3799         bt_dev_err(hdev, "link tx timeout");
3800
3801         rcu_read_lock();
3802
3803         /* Kill stalled connections */
3804         list_for_each_entry_rcu(c, &h->list, list) {
3805                 if (c->type == type && c->sent) {
3806                         bt_dev_err(hdev, "killing stalled connection %pMR",
3807                                    &c->dst);
3808                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3809                 }
3810         }
3811
3812         rcu_read_unlock();
3813 }
3814
3815 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3816                                       int *quote)
3817 {
3818         struct hci_conn_hash *h = &hdev->conn_hash;
3819         struct hci_chan *chan = NULL;
3820         unsigned int num = 0, min = ~0, cur_prio = 0;
3821         struct hci_conn *conn;
3822         int cnt, q, conn_num = 0;
3823
3824         BT_DBG("%s", hdev->name);
3825
3826         rcu_read_lock();
3827
3828         list_for_each_entry_rcu(conn, &h->list, list) {
3829                 struct hci_chan *tmp;
3830
3831                 if (conn->type != type)
3832                         continue;
3833
3834                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3835                         continue;
3836
3837                 conn_num++;
3838
3839                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3840                         struct sk_buff *skb;
3841
3842                         if (skb_queue_empty(&tmp->data_q))
3843                                 continue;
3844
3845                         skb = skb_peek(&tmp->data_q);
3846                         if (skb->priority < cur_prio)
3847                                 continue;
3848
3849                         if (skb->priority > cur_prio) {
3850                                 num = 0;
3851                                 min = ~0;
3852                                 cur_prio = skb->priority;
3853                         }
3854
3855                         num++;
3856
3857                         if (conn->sent < min) {
3858                                 min  = conn->sent;
3859                                 chan = tmp;
3860                         }
3861                 }
3862
3863                 if (hci_conn_num(hdev, type) == conn_num)
3864                         break;
3865         }
3866
3867         rcu_read_unlock();
3868
3869         if (!chan)
3870                 return NULL;
3871
3872         switch (chan->conn->type) {
3873         case ACL_LINK:
3874                 cnt = hdev->acl_cnt;
3875                 break;
3876         case AMP_LINK:
3877                 cnt = hdev->block_cnt;
3878                 break;
3879         case SCO_LINK:
3880         case ESCO_LINK:
3881                 cnt = hdev->sco_cnt;
3882                 break;
3883         case LE_LINK:
3884                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3885                 break;
3886         default:
3887                 cnt = 0;
3888                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3889         }
3890
3891         q = cnt / num;
3892         *quote = q ? q : 1;
3893         BT_DBG("chan %p quote %d", chan, *quote);
3894         return chan;
3895 }
3896
3897 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3898 {
3899         struct hci_conn_hash *h = &hdev->conn_hash;
3900         struct hci_conn *conn;
3901         int num = 0;
3902
3903         BT_DBG("%s", hdev->name);
3904
3905         rcu_read_lock();
3906
3907         list_for_each_entry_rcu(conn, &h->list, list) {
3908                 struct hci_chan *chan;
3909
3910                 if (conn->type != type)
3911                         continue;
3912
3913                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3914                         continue;
3915
3916                 num++;
3917
3918                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3919                         struct sk_buff *skb;
3920
3921                         if (chan->sent) {
3922                                 chan->sent = 0;
3923                                 continue;
3924                         }
3925
3926                         if (skb_queue_empty(&chan->data_q))
3927                                 continue;
3928
3929                         skb = skb_peek(&chan->data_q);
3930                         if (skb->priority >= HCI_PRIO_MAX - 1)
3931                                 continue;
3932
3933                         skb->priority = HCI_PRIO_MAX - 1;
3934
3935                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3936                                skb->priority);
3937                 }
3938
3939                 if (hci_conn_num(hdev, type) == num)
3940                         break;
3941         }
3942
3943         rcu_read_unlock();
3944
3945 }
3946
3947 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3948 {
3949         /* Calculate count of blocks used by this packet */
3950         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3951 }
3952
3953 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3954 {
3955         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3956                 /* ACL tx timeout must be longer than maximum
3957                  * link supervision timeout (40.9 seconds) */
3958                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3959                                        HCI_ACL_TX_TIMEOUT))
3960                         hci_link_tx_to(hdev, ACL_LINK);
3961         }
3962 }
3963
3964 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3965 {
3966         unsigned int cnt = hdev->acl_cnt;
3967         struct hci_chan *chan;
3968         struct sk_buff *skb;
3969         int quote;
3970
3971         __check_timeout(hdev, cnt);
3972
3973         while (hdev->acl_cnt &&
3974                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3975                 u32 priority = (skb_peek(&chan->data_q))->priority;
3976                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3977                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3978                                skb->len, skb->priority);
3979
3980                         /* Stop if priority has changed */
3981                         if (skb->priority < priority)
3982                                 break;
3983
3984                         skb = skb_dequeue(&chan->data_q);
3985
3986                         hci_conn_enter_active_mode(chan->conn,
3987                                                    bt_cb(skb)->force_active);
3988
3989                         hci_send_frame(hdev, skb);
3990                         hdev->acl_last_tx = jiffies;
3991
3992                         hdev->acl_cnt--;
3993                         chan->sent++;
3994                         chan->conn->sent++;
3995                 }
3996         }
3997
3998         if (cnt != hdev->acl_cnt)
3999                 hci_prio_recalculate(hdev, ACL_LINK);
4000 }
4001
4002 static void hci_sched_acl_blk(struct hci_dev *hdev)
4003 {
4004         unsigned int cnt = hdev->block_cnt;
4005         struct hci_chan *chan;
4006         struct sk_buff *skb;
4007         int quote;
4008         u8 type;
4009
4010         __check_timeout(hdev, cnt);
4011
4012         BT_DBG("%s", hdev->name);
4013
4014         if (hdev->dev_type == HCI_AMP)
4015                 type = AMP_LINK;
4016         else
4017                 type = ACL_LINK;
4018
4019         while (hdev->block_cnt > 0 &&
4020                (chan = hci_chan_sent(hdev, type, &quote))) {
4021                 u32 priority = (skb_peek(&chan->data_q))->priority;
4022                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4023                         int blocks;
4024
4025                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4026                                skb->len, skb->priority);
4027
4028                         /* Stop if priority has changed */
4029                         if (skb->priority < priority)
4030                                 break;
4031
4032                         skb = skb_dequeue(&chan->data_q);
4033
4034                         blocks = __get_blocks(hdev, skb);
4035                         if (blocks > hdev->block_cnt)
4036                                 return;
4037
4038                         hci_conn_enter_active_mode(chan->conn,
4039                                                    bt_cb(skb)->force_active);
4040
4041                         hci_send_frame(hdev, skb);
4042                         hdev->acl_last_tx = jiffies;
4043
4044                         hdev->block_cnt -= blocks;
4045                         quote -= blocks;
4046
4047                         chan->sent += blocks;
4048                         chan->conn->sent += blocks;
4049                 }
4050         }
4051
4052         if (cnt != hdev->block_cnt)
4053                 hci_prio_recalculate(hdev, type);
4054 }
4055
4056 static void hci_sched_acl(struct hci_dev *hdev)
4057 {
4058         BT_DBG("%s", hdev->name);
4059
4060         /* No ACL link over BR/EDR controller */
4061         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4062                 return;
4063
4064         /* No AMP link over AMP controller */
4065         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4066                 return;
4067
4068         switch (hdev->flow_ctl_mode) {
4069         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4070                 hci_sched_acl_pkt(hdev);
4071                 break;
4072
4073         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4074                 hci_sched_acl_blk(hdev);
4075                 break;
4076         }
4077 }
4078
4079 /* Schedule SCO */
4080 static void hci_sched_sco(struct hci_dev *hdev)
4081 {
4082         struct hci_conn *conn;
4083         struct sk_buff *skb;
4084         int quote;
4085
4086         BT_DBG("%s", hdev->name);
4087
4088         if (!hci_conn_num(hdev, SCO_LINK))
4089                 return;
4090
4091         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4092                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4093                         BT_DBG("skb %p len %d", skb, skb->len);
4094                         hci_send_frame(hdev, skb);
4095
4096                         conn->sent++;
4097                         if (conn->sent == ~0)
4098                                 conn->sent = 0;
4099                 }
4100         }
4101 }
4102
4103 static void hci_sched_esco(struct hci_dev *hdev)
4104 {
4105         struct hci_conn *conn;
4106         struct sk_buff *skb;
4107         int quote;
4108
4109         BT_DBG("%s", hdev->name);
4110
4111         if (!hci_conn_num(hdev, ESCO_LINK))
4112                 return;
4113
4114         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4115                                                      &quote))) {
4116                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4117                         BT_DBG("skb %p len %d", skb, skb->len);
4118                         hci_send_frame(hdev, skb);
4119
4120                         conn->sent++;
4121                         if (conn->sent == ~0)
4122                                 conn->sent = 0;
4123                 }
4124         }
4125 }
4126
4127 static void hci_sched_le(struct hci_dev *hdev)
4128 {
4129         struct hci_chan *chan;
4130         struct sk_buff *skb;
4131         int quote, cnt, tmp;
4132
4133         BT_DBG("%s", hdev->name);
4134
4135         if (!hci_conn_num(hdev, LE_LINK))
4136                 return;
4137
4138         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4139                 /* LE tx timeout must be longer than maximum
4140                  * link supervision timeout (40.9 seconds) */
4141                 if (!hdev->le_cnt && hdev->le_pkts &&
4142                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4143                         hci_link_tx_to(hdev, LE_LINK);
4144         }
4145
4146         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4147         tmp = cnt;
4148         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4149                 u32 priority = (skb_peek(&chan->data_q))->priority;
4150                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4151                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4152                                skb->len, skb->priority);
4153
4154                         /* Stop if priority has changed */
4155                         if (skb->priority < priority)
4156                                 break;
4157
4158                         skb = skb_dequeue(&chan->data_q);
4159
4160                         hci_send_frame(hdev, skb);
4161                         hdev->le_last_tx = jiffies;
4162
4163                         cnt--;
4164                         chan->sent++;
4165                         chan->conn->sent++;
4166                 }
4167         }
4168
4169         if (hdev->le_pkts)
4170                 hdev->le_cnt = cnt;
4171         else
4172                 hdev->acl_cnt = cnt;
4173
4174         if (cnt != tmp)
4175                 hci_prio_recalculate(hdev, LE_LINK);
4176 }
4177
4178 static void hci_tx_work(struct work_struct *work)
4179 {
4180         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4181         struct sk_buff *skb;
4182
4183         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4184                hdev->sco_cnt, hdev->le_cnt);
4185
4186         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4187                 /* Schedule queues and send stuff to HCI driver */
4188                 hci_sched_acl(hdev);
4189                 hci_sched_sco(hdev);
4190                 hci_sched_esco(hdev);
4191                 hci_sched_le(hdev);
4192         }
4193
4194         /* Send next queued raw (unknown type) packet */
4195         while ((skb = skb_dequeue(&hdev->raw_q)))
4196                 hci_send_frame(hdev, skb);
4197 }
4198
4199 /* ----- HCI RX task (incoming data processing) ----- */
4200
4201 /* ACL data packet */
4202 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4203 {
4204         struct hci_acl_hdr *hdr = (void *) skb->data;
4205         struct hci_conn *conn;
4206         __u16 handle, flags;
4207
4208         skb_pull(skb, HCI_ACL_HDR_SIZE);
4209
4210         handle = __le16_to_cpu(hdr->handle);
4211         flags  = hci_flags(handle);
4212         handle = hci_handle(handle);
4213
4214         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4215                handle, flags);
4216
4217         hdev->stat.acl_rx++;
4218
4219         hci_dev_lock(hdev);
4220         conn = hci_conn_hash_lookup_handle(hdev, handle);
4221         hci_dev_unlock(hdev);
4222
4223         if (conn) {
4224                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4225
4226                 /* Send to upper protocol */
4227                 l2cap_recv_acldata(conn, skb, flags);
4228                 return;
4229         } else {
4230                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4231                            handle);
4232         }
4233
4234         kfree_skb(skb);
4235 }
4236
4237 /* SCO data packet */
4238 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4239 {
4240         struct hci_sco_hdr *hdr = (void *) skb->data;
4241         struct hci_conn *conn;
4242         __u16 handle;
4243
4244         skb_pull(skb, HCI_SCO_HDR_SIZE);
4245
4246         handle = __le16_to_cpu(hdr->handle);
4247
4248         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4249
4250         hdev->stat.sco_rx++;
4251
4252         hci_dev_lock(hdev);
4253         conn = hci_conn_hash_lookup_handle(hdev, handle);
4254         hci_dev_unlock(hdev);
4255
4256         if (conn) {
4257                 /* Send to upper protocol */
4258                 sco_recv_scodata(conn, skb);
4259                 return;
4260         } else {
4261                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4262                            handle);
4263         }
4264
4265         kfree_skb(skb);
4266 }
4267
4268 static bool hci_req_is_complete(struct hci_dev *hdev)
4269 {
4270         struct sk_buff *skb;
4271
4272         skb = skb_peek(&hdev->cmd_q);
4273         if (!skb)
4274                 return true;
4275
4276         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4277 }
4278
4279 static void hci_resend_last(struct hci_dev *hdev)
4280 {
4281         struct hci_command_hdr *sent;
4282         struct sk_buff *skb;
4283         u16 opcode;
4284
4285         if (!hdev->sent_cmd)
4286                 return;
4287
4288         sent = (void *) hdev->sent_cmd->data;
4289         opcode = __le16_to_cpu(sent->opcode);
4290         if (opcode == HCI_OP_RESET)
4291                 return;
4292
4293         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4294         if (!skb)
4295                 return;
4296
4297         skb_queue_head(&hdev->cmd_q, skb);
4298         queue_work(hdev->workqueue, &hdev->cmd_work);
4299 }
4300
4301 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4302                           hci_req_complete_t *req_complete,
4303                           hci_req_complete_skb_t *req_complete_skb)
4304 {
4305         struct sk_buff *skb;
4306         unsigned long flags;
4307
4308         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4309
4310         /* If the completed command doesn't match the last one that was
4311          * sent we need to do special handling of it.
4312          */
4313         if (!hci_sent_cmd_data(hdev, opcode)) {
4314                 /* Some CSR based controllers generate a spontaneous
4315                  * reset complete event during init and any pending
4316                  * command will never be completed. In such a case we
4317                  * need to resend whatever was the last sent
4318                  * command.
4319                  */
4320                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4321                         hci_resend_last(hdev);
4322
4323                 return;
4324         }
4325
4326         /* If we reach this point this event matches the last command sent */
4327         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4328
4329         /* If the command succeeded and there's still more commands in
4330          * this request the request is not yet complete.
4331          */
4332         if (!status && !hci_req_is_complete(hdev))
4333                 return;
4334
4335         /* If this was the last command in a request the complete
4336          * callback would be found in hdev->sent_cmd instead of the
4337          * command queue (hdev->cmd_q).
4338          */
4339         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4340                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4341                 return;
4342         }
4343
4344         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4345                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4346                 return;
4347         }
4348
4349         /* Remove all pending commands belonging to this request */
4350         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4351         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4352                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4353                         __skb_queue_head(&hdev->cmd_q, skb);
4354                         break;
4355                 }
4356
4357                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4358                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4359                 else
4360                         *req_complete = bt_cb(skb)->hci.req_complete;
4361                 kfree_skb(skb);
4362         }
4363         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4364 }
4365
4366 static void hci_rx_work(struct work_struct *work)
4367 {
4368         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4369         struct sk_buff *skb;
4370
4371         BT_DBG("%s", hdev->name);
4372
4373         while ((skb = skb_dequeue(&hdev->rx_q))) {
4374                 /* Send copy to monitor */
4375                 hci_send_to_monitor(hdev, skb);
4376
4377                 if (atomic_read(&hdev->promisc)) {
4378                         /* Send copy to the sockets */
4379                         hci_send_to_sock(hdev, skb);
4380                 }
4381
4382                 /* If the device has been opened in HCI_USER_CHANNEL,
4383                  * the userspace has exclusive access to device.
4384                  * When device is HCI_INIT, we still need to process
4385                  * the data packets to the driver in order
4386                  * to complete its setup().
4387                  */
4388                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4389                     !test_bit(HCI_INIT, &hdev->flags)) {
4390                         kfree_skb(skb);
4391                         continue;
4392                 }
4393
4394                 if (test_bit(HCI_INIT, &hdev->flags)) {
4395                         /* Don't process data packets in this states. */
4396                         switch (hci_skb_pkt_type(skb)) {
4397                         case HCI_ACLDATA_PKT:
4398                         case HCI_SCODATA_PKT:
4399                                 kfree_skb(skb);
4400                                 continue;
4401                         }
4402                 }
4403
4404                 /* Process frame */
4405                 switch (hci_skb_pkt_type(skb)) {
4406                 case HCI_EVENT_PKT:
4407                         BT_DBG("%s Event packet", hdev->name);
4408                         hci_event_packet(hdev, skb);
4409                         break;
4410
4411                 case HCI_ACLDATA_PKT:
4412                         BT_DBG("%s ACL data packet", hdev->name);
4413                         hci_acldata_packet(hdev, skb);
4414                         break;
4415
4416                 case HCI_SCODATA_PKT:
4417                         BT_DBG("%s SCO data packet", hdev->name);
4418                         hci_scodata_packet(hdev, skb);
4419                         break;
4420
4421                 default:
4422                         kfree_skb(skb);
4423                         break;
4424                 }
4425         }
4426 }
4427
4428 static void hci_cmd_work(struct work_struct *work)
4429 {
4430         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4431         struct sk_buff *skb;
4432
4433         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4434                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4435
4436         /* Send queued commands */
4437         if (atomic_read(&hdev->cmd_cnt)) {
4438                 skb = skb_dequeue(&hdev->cmd_q);
4439                 if (!skb)
4440                         return;
4441
4442                 kfree_skb(hdev->sent_cmd);
4443
4444                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4445                 if (hdev->sent_cmd) {
4446                         if (hci_req_status_pend(hdev))
4447                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4448                         atomic_dec(&hdev->cmd_cnt);
4449                         hci_send_frame(hdev, skb);
4450                         if (test_bit(HCI_RESET, &hdev->flags))
4451                                 cancel_delayed_work(&hdev->cmd_timer);
4452                         else
4453                                 schedule_delayed_work(&hdev->cmd_timer,
4454                                                       HCI_CMD_TIMEOUT);
4455                 } else {
4456                         skb_queue_head(&hdev->cmd_q, skb);
4457                         queue_work(hdev->workqueue, &hdev->cmd_work);
4458                 }
4459         }
4460 }