bb84ff5fb98a27877310a902ebbedb21877010d7
[platform/kernel/linux-rpi.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48
49 static void hci_rx_work(struct work_struct *work);
50 static void hci_cmd_work(struct work_struct *work);
51 static void hci_tx_work(struct work_struct *work);
52
53 /* HCI device list */
54 LIST_HEAD(hci_dev_list);
55 DEFINE_RWLOCK(hci_dev_list_lock);
56
57 /* HCI callback list */
58 LIST_HEAD(hci_cb_list);
59 DEFINE_MUTEX(hci_cb_list_lock);
60
61 /* HCI ID Numbering */
62 static DEFINE_IDA(hci_index_ida);
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         bool enable;
84         int err;
85
86         if (!test_bit(HCI_UP, &hdev->flags))
87                 return -ENETDOWN;
88
89         err = kstrtobool_from_user(user_buf, count, &enable);
90         if (err)
91                 return err;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         bool enable;
139         int err;
140
141         err = kstrtobool_from_user(user_buf, count, &enable);
142         if (err)
143                 return err;
144
145         /* When the diagnostic flags are not persistent and the transport
146          * is not active or in user channel operation, then there is no need
147          * for the vendor callback. Instead just store the desired value and
148          * the setting will be programmed when the controller gets powered on.
149          */
150         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151             (!test_bit(HCI_RUNNING, &hdev->flags) ||
152              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
153                 goto done;
154
155         hci_req_sync_lock(hdev);
156         err = hdev->set_diag(hdev, enable);
157         hci_req_sync_unlock(hdev);
158
159         if (err < 0)
160                 return err;
161
162 done:
163         if (enable)
164                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165         else
166                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167
168         return count;
169 }
170
171 static const struct file_operations vendor_diag_fops = {
172         .open           = simple_open,
173         .read           = vendor_diag_read,
174         .write          = vendor_diag_write,
175         .llseek         = default_llseek,
176 };
177
178 static void hci_debugfs_create_basic(struct hci_dev *hdev)
179 {
180         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181                             &dut_mode_fops);
182
183         if (hdev->set_diag)
184                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185                                     &vendor_diag_fops);
186 }
187
188 static int hci_reset_req(struct hci_request *req, unsigned long opt)
189 {
190         BT_DBG("%s %ld", req->hdev->name, opt);
191
192         /* Reset device */
193         set_bit(HCI_RESET, &req->hdev->flags);
194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
195         return 0;
196 }
197
198 static void bredr_init(struct hci_request *req)
199 {
200         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202         /* Read Local Supported Features */
203         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
204
205         /* Read Local Version */
206         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207
208         /* Read BD Address */
209         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
210 }
211
212 static void amp_init1(struct hci_request *req)
213 {
214         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
215
216         /* Read Local Version */
217         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218
219         /* Read Local Supported Commands */
220         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221
222         /* Read Local AMP Info */
223         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
224
225         /* Read Data Blk size */
226         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
227
228         /* Read Flow Control Mode */
229         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230
231         /* Read Location Data */
232         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
233 }
234
235 static int amp_init2(struct hci_request *req)
236 {
237         /* Read Local Supported Features. Not all AMP controllers
238          * support this so it's placed conditionally in the second
239          * stage init.
240          */
241         if (req->hdev->commands[14] & 0x20)
242                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
243
244         return 0;
245 }
246
247 static int hci_init1_req(struct hci_request *req, unsigned long opt)
248 {
249         struct hci_dev *hdev = req->hdev;
250
251         BT_DBG("%s %ld", hdev->name, opt);
252
253         /* Reset */
254         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255                 hci_reset_req(req, 0);
256
257         switch (hdev->dev_type) {
258         case HCI_PRIMARY:
259                 bredr_init(req);
260                 break;
261         case HCI_AMP:
262                 amp_init1(req);
263                 break;
264         default:
265                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
266                 break;
267         }
268
269         return 0;
270 }
271
272 static void bredr_setup(struct hci_request *req)
273 {
274         __le16 param;
275         __u8 flt_type;
276
277         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
278         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
279
280         /* Read Class of Device */
281         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
282
283         /* Read Local Name */
284         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
285
286         /* Read Voice Setting */
287         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
288
289         /* Read Number of Supported IAC */
290         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291
292         /* Read Current IAC LAP */
293         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294
295         /* Clear Event Filters */
296         flt_type = HCI_FLT_CLEAR_ALL;
297         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
298
299         /* Connection accept timeout ~20 secs */
300         param = cpu_to_le16(0x7d00);
301         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
302 }
303
304 static void le_setup(struct hci_request *req)
305 {
306         struct hci_dev *hdev = req->hdev;
307
308         /* Read LE Buffer Size */
309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
310
311         /* Read LE Local Supported Features */
312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
313
314         /* Read LE Supported States */
315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316
317         /* LE-only controllers have LE implicitly enabled */
318         if (!lmp_bredr_capable(hdev))
319                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
320 }
321
322 static void hci_setup_event_mask(struct hci_request *req)
323 {
324         struct hci_dev *hdev = req->hdev;
325
326         /* The second byte is 0xff instead of 0x9f (two reserved bits
327          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328          * command otherwise.
329          */
330         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331
332         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333          * any event mask for pre 1.2 devices.
334          */
335         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336                 return;
337
338         if (lmp_bredr_capable(hdev)) {
339                 events[4] |= 0x01; /* Flow Specification Complete */
340         } else {
341                 /* Use a different default for LE-only devices */
342                 memset(events, 0, sizeof(events));
343                 events[1] |= 0x20; /* Command Complete */
344                 events[1] |= 0x40; /* Command Status */
345                 events[1] |= 0x80; /* Hardware Error */
346
347                 /* If the controller supports the Disconnect command, enable
348                  * the corresponding event. In addition enable packet flow
349                  * control related events.
350                  */
351                 if (hdev->commands[0] & 0x20) {
352                         events[0] |= 0x10; /* Disconnection Complete */
353                         events[2] |= 0x04; /* Number of Completed Packets */
354                         events[3] |= 0x02; /* Data Buffer Overflow */
355                 }
356
357                 /* If the controller supports the Read Remote Version
358                  * Information command, enable the corresponding event.
359                  */
360                 if (hdev->commands[2] & 0x80)
361                         events[1] |= 0x08; /* Read Remote Version Information
362                                             * Complete
363                                             */
364
365                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366                         events[0] |= 0x80; /* Encryption Change */
367                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
368                 }
369         }
370
371         if (lmp_inq_rssi_capable(hdev) ||
372             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373                 events[4] |= 0x02; /* Inquiry Result with RSSI */
374
375         if (lmp_ext_feat_capable(hdev))
376                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377
378         if (lmp_esco_capable(hdev)) {
379                 events[5] |= 0x08; /* Synchronous Connection Complete */
380                 events[5] |= 0x10; /* Synchronous Connection Changed */
381         }
382
383         if (lmp_sniffsubr_capable(hdev))
384                 events[5] |= 0x20; /* Sniff Subrating */
385
386         if (lmp_pause_enc_capable(hdev))
387                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388
389         if (lmp_ext_inq_capable(hdev))
390                 events[5] |= 0x40; /* Extended Inquiry Result */
391
392         if (lmp_no_flush_capable(hdev))
393                 events[7] |= 0x01; /* Enhanced Flush Complete */
394
395         if (lmp_lsto_capable(hdev))
396                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397
398         if (lmp_ssp_capable(hdev)) {
399                 events[6] |= 0x01;      /* IO Capability Request */
400                 events[6] |= 0x02;      /* IO Capability Response */
401                 events[6] |= 0x04;      /* User Confirmation Request */
402                 events[6] |= 0x08;      /* User Passkey Request */
403                 events[6] |= 0x10;      /* Remote OOB Data Request */
404                 events[6] |= 0x20;      /* Simple Pairing Complete */
405                 events[7] |= 0x04;      /* User Passkey Notification */
406                 events[7] |= 0x08;      /* Keypress Notification */
407                 events[7] |= 0x10;      /* Remote Host Supported
408                                          * Features Notification
409                                          */
410         }
411
412         if (lmp_le_capable(hdev))
413                 events[7] |= 0x20;      /* LE Meta-Event */
414
415         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
416 }
417
418 static int hci_init2_req(struct hci_request *req, unsigned long opt)
419 {
420         struct hci_dev *hdev = req->hdev;
421
422         if (hdev->dev_type == HCI_AMP)
423                 return amp_init2(req);
424
425         if (lmp_bredr_capable(hdev))
426                 bredr_setup(req);
427         else
428                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
429
430         if (lmp_le_capable(hdev))
431                 le_setup(req);
432
433         /* All Bluetooth 1.2 and later controllers should support the
434          * HCI command for reading the local supported commands.
435          *
436          * Unfortunately some controllers indicate Bluetooth 1.2 support,
437          * but do not have support for this command. If that is the case,
438          * the driver can quirk the behavior and skip reading the local
439          * supported commands.
440          */
441         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
444
445         if (lmp_ssp_capable(hdev)) {
446                 /* When SSP is available, then the host features page
447                  * should also be available as well. However some
448                  * controllers list the max_page as 0 as long as SSP
449                  * has not been enabled. To achieve proper debugging
450                  * output, force the minimum max_page to 1 at least.
451                  */
452                 hdev->max_page = 0x01;
453
454                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
455                         u8 mode = 0x01;
456
457                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458                                     sizeof(mode), &mode);
459                 } else {
460                         struct hci_cp_write_eir cp;
461
462                         memset(hdev->eir, 0, sizeof(hdev->eir));
463                         memset(&cp, 0, sizeof(cp));
464
465                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
466                 }
467         }
468
469         if (lmp_inq_rssi_capable(hdev) ||
470             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
471                 u8 mode;
472
473                 /* If Extended Inquiry Result events are supported, then
474                  * they are clearly preferred over Inquiry Result with RSSI
475                  * events.
476                  */
477                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478
479                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480         }
481
482         if (lmp_inq_tx_pwr_capable(hdev))
483                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
484
485         if (lmp_ext_feat_capable(hdev)) {
486                 struct hci_cp_read_local_ext_features cp;
487
488                 cp.page = 0x01;
489                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490                             sizeof(cp), &cp);
491         }
492
493         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
494                 u8 enable = 1;
495                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496                             &enable);
497         }
498
499         return 0;
500 }
501
502 static void hci_setup_link_policy(struct hci_request *req)
503 {
504         struct hci_dev *hdev = req->hdev;
505         struct hci_cp_write_def_link_policy cp;
506         u16 link_policy = 0;
507
508         if (lmp_rswitch_capable(hdev))
509                 link_policy |= HCI_LP_RSWITCH;
510         if (lmp_hold_capable(hdev))
511                 link_policy |= HCI_LP_HOLD;
512         if (lmp_sniff_capable(hdev))
513                 link_policy |= HCI_LP_SNIFF;
514         if (lmp_park_capable(hdev))
515                 link_policy |= HCI_LP_PARK;
516
517         cp.policy = cpu_to_le16(link_policy);
518         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
519 }
520
521 static void hci_set_le_support(struct hci_request *req)
522 {
523         struct hci_dev *hdev = req->hdev;
524         struct hci_cp_write_le_host_supported cp;
525
526         /* LE-only devices do not support explicit enablement */
527         if (!lmp_bredr_capable(hdev))
528                 return;
529
530         memset(&cp, 0, sizeof(cp));
531
532         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
533                 cp.le = 0x01;
534                 cp.simul = 0x00;
535         }
536
537         if (cp.le != lmp_host_le_capable(hdev))
538                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539                             &cp);
540 }
541
542 static void hci_set_event_mask_page_2(struct hci_request *req)
543 {
544         struct hci_dev *hdev = req->hdev;
545         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546         bool changed = false;
547
548         /* If Connectionless Peripheral Broadcast central role is supported
549          * enable all necessary events for it.
550          */
551         if (lmp_cpb_central_capable(hdev)) {
552                 events[1] |= 0x40;      /* Triggered Clock Capture */
553                 events[1] |= 0x80;      /* Synchronization Train Complete */
554                 events[2] |= 0x10;      /* Peripheral Page Response Timeout */
555                 events[2] |= 0x20;      /* CPB Channel Map Change */
556                 changed = true;
557         }
558
559         /* If Connectionless Peripheral Broadcast peripheral role is supported
560          * enable all necessary events for it.
561          */
562         if (lmp_cpb_peripheral_capable(hdev)) {
563                 events[2] |= 0x01;      /* Synchronization Train Received */
564                 events[2] |= 0x02;      /* CPB Receive */
565                 events[2] |= 0x04;      /* CPB Timeout */
566                 events[2] |= 0x08;      /* Truncated Page Complete */
567                 changed = true;
568         }
569
570         /* Enable Authenticated Payload Timeout Expired event if supported */
571         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
572                 events[2] |= 0x80;
573                 changed = true;
574         }
575
576         /* Some Broadcom based controllers indicate support for Set Event
577          * Mask Page 2 command, but then actually do not support it. Since
578          * the default value is all bits set to zero, the command is only
579          * required if the event mask has to be changed. In case no change
580          * to the event mask is needed, skip this command.
581          */
582         if (changed)
583                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584                             sizeof(events), events);
585 }
586
587 static int hci_init3_req(struct hci_request *req, unsigned long opt)
588 {
589         struct hci_dev *hdev = req->hdev;
590         u8 p;
591
592         hci_setup_event_mask(req);
593
594         if (hdev->commands[6] & 0x20 &&
595             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596                 struct hci_cp_read_stored_link_key cp;
597
598                 bacpy(&cp.bdaddr, BDADDR_ANY);
599                 cp.read_all = 0x01;
600                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601         }
602
603         if (hdev->commands[5] & 0x10)
604                 hci_setup_link_policy(req);
605
606         if (hdev->commands[8] & 0x01)
607                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608
609         if (hdev->commands[18] & 0x04 &&
610             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
611                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
612
613         /* Some older Broadcom based Bluetooth 1.2 controllers do not
614          * support the Read Page Scan Type command. Check support for
615          * this command in the bit mask of supported commands.
616          */
617         if (hdev->commands[13] & 0x01)
618                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
619
620         if (lmp_le_capable(hdev)) {
621                 u8 events[8];
622
623                 memset(events, 0, sizeof(events));
624
625                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626                         events[0] |= 0x10;      /* LE Long Term Key Request */
627
628                 /* If controller supports the Connection Parameters Request
629                  * Link Layer Procedure, enable the corresponding event.
630                  */
631                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632                         events[0] |= 0x20;      /* LE Remote Connection
633                                                  * Parameter Request
634                                                  */
635
636                 /* If the controller supports the Data Length Extension
637                  * feature, enable the corresponding event.
638                  */
639                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640                         events[0] |= 0x40;      /* LE Data Length Change */
641
642                 /* If the controller supports LL Privacy feature, enable
643                  * the corresponding event.
644                  */
645                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646                         events[1] |= 0x02;      /* LE Enhanced Connection
647                                                  * Complete
648                                                  */
649
650                 /* If the controller supports Extended Scanner Filter
651                  * Policies, enable the corresponding event.
652                  */
653                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654                         events[1] |= 0x04;      /* LE Direct Advertising
655                                                  * Report
656                                                  */
657
658                 /* If the controller supports Channel Selection Algorithm #2
659                  * feature, enable the corresponding event.
660                  */
661                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662                         events[2] |= 0x08;      /* LE Channel Selection
663                                                  * Algorithm
664                                                  */
665
666                 /* If the controller supports the LE Set Scan Enable command,
667                  * enable the corresponding advertising report event.
668                  */
669                 if (hdev->commands[26] & 0x08)
670                         events[0] |= 0x02;      /* LE Advertising Report */
671
672                 /* If the controller supports the LE Create Connection
673                  * command, enable the corresponding event.
674                  */
675                 if (hdev->commands[26] & 0x10)
676                         events[0] |= 0x01;      /* LE Connection Complete */
677
678                 /* If the controller supports the LE Connection Update
679                  * command, enable the corresponding event.
680                  */
681                 if (hdev->commands[27] & 0x04)
682                         events[0] |= 0x04;      /* LE Connection Update
683                                                  * Complete
684                                                  */
685
686                 /* If the controller supports the LE Read Remote Used Features
687                  * command, enable the corresponding event.
688                  */
689                 if (hdev->commands[27] & 0x20)
690                         events[0] |= 0x08;      /* LE Read Remote Used
691                                                  * Features Complete
692                                                  */
693
694                 /* If the controller supports the LE Read Local P-256
695                  * Public Key command, enable the corresponding event.
696                  */
697                 if (hdev->commands[34] & 0x02)
698                         events[0] |= 0x80;      /* LE Read Local P-256
699                                                  * Public Key Complete
700                                                  */
701
702                 /* If the controller supports the LE Generate DHKey
703                  * command, enable the corresponding event.
704                  */
705                 if (hdev->commands[34] & 0x04)
706                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
707
708                 /* If the controller supports the LE Set Default PHY or
709                  * LE Set PHY commands, enable the corresponding event.
710                  */
711                 if (hdev->commands[35] & (0x20 | 0x40))
712                         events[1] |= 0x08;        /* LE PHY Update Complete */
713
714                 /* If the controller supports LE Set Extended Scan Parameters
715                  * and LE Set Extended Scan Enable commands, enable the
716                  * corresponding event.
717                  */
718                 if (use_ext_scan(hdev))
719                         events[1] |= 0x10;      /* LE Extended Advertising
720                                                  * Report
721                                                  */
722
723                 /* If the controller supports the LE Extended Advertising
724                  * command, enable the corresponding event.
725                  */
726                 if (ext_adv_capable(hdev))
727                         events[2] |= 0x02;      /* LE Advertising Set
728                                                  * Terminated
729                                                  */
730
731                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
732                             events);
733
734                 /* Read LE Advertising Channel TX Power */
735                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736                         /* HCI TS spec forbids mixing of legacy and extended
737                          * advertising commands wherein READ_ADV_TX_POWER is
738                          * also included. So do not call it if extended adv
739                          * is supported otherwise controller will return
740                          * COMMAND_DISALLOWED for extended commands.
741                          */
742                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
743                 }
744
745                 if ((hdev->commands[38] & 0x80) &&
746                     !test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) {
747                         /* Read LE Min/Max Tx Power*/
748                         hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
749                                     0, NULL);
750                 }
751
752                 if (hdev->commands[26] & 0x40) {
753                         /* Read LE Accept List Size */
754                         hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
755                                     0, NULL);
756                 }
757
758                 if (hdev->commands[26] & 0x80) {
759                         /* Clear LE Accept List */
760                         hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
761                 }
762
763                 if (hdev->commands[34] & 0x40) {
764                         /* Read LE Resolving List Size */
765                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
766                                     0, NULL);
767                 }
768
769                 if (hdev->commands[34] & 0x20) {
770                         /* Clear LE Resolving List */
771                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
772                 }
773
774                 if (hdev->commands[35] & 0x04) {
775                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
776
777                         /* Set RPA timeout */
778                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
779                                     &rpa_timeout);
780                 }
781
782                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
783                         /* Read LE Maximum Data Length */
784                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
785
786                         /* Read LE Suggested Default Data Length */
787                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
788                 }
789
790                 if (ext_adv_capable(hdev)) {
791                         /* Read LE Number of Supported Advertising Sets */
792                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
793                                     0, NULL);
794                 }
795
796                 hci_set_le_support(req);
797         }
798
799         /* Read features beyond page 1 if available */
800         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
801                 struct hci_cp_read_local_ext_features cp;
802
803                 cp.page = p;
804                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
805                             sizeof(cp), &cp);
806         }
807
808         return 0;
809 }
810
811 static int hci_init4_req(struct hci_request *req, unsigned long opt)
812 {
813         struct hci_dev *hdev = req->hdev;
814
815         /* Some Broadcom based Bluetooth controllers do not support the
816          * Delete Stored Link Key command. They are clearly indicating its
817          * absence in the bit mask of supported commands.
818          *
819          * Check the supported commands and only if the command is marked
820          * as supported send it. If not supported assume that the controller
821          * does not have actual support for stored link keys which makes this
822          * command redundant anyway.
823          *
824          * Some controllers indicate that they support handling deleting
825          * stored link keys, but they don't. The quirk lets a driver
826          * just disable this command.
827          */
828         if (hdev->commands[6] & 0x80 &&
829             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
830                 struct hci_cp_delete_stored_link_key cp;
831
832                 bacpy(&cp.bdaddr, BDADDR_ANY);
833                 cp.delete_all = 0x01;
834                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
835                             sizeof(cp), &cp);
836         }
837
838         /* Set event mask page 2 if the HCI command for it is supported */
839         if (hdev->commands[22] & 0x04)
840                 hci_set_event_mask_page_2(req);
841
842         /* Read local codec list if the HCI command is supported */
843         if (hdev->commands[29] & 0x20)
844                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
845
846         /* Read local pairing options if the HCI command is supported */
847         if (hdev->commands[41] & 0x08)
848                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
849
850         /* Get MWS transport configuration if the HCI command is supported */
851         if (hdev->commands[30] & 0x08)
852                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
853
854         /* Check for Synchronization Train support */
855         if (lmp_sync_train_capable(hdev))
856                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
857
858         /* Enable Secure Connections if supported and configured */
859         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
860             bredr_sc_enabled(hdev)) {
861                 u8 support = 0x01;
862
863                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
864                             sizeof(support), &support);
865         }
866
867         /* Set erroneous data reporting if supported to the wideband speech
868          * setting value
869          */
870         if (hdev->commands[18] & 0x08 &&
871             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
872                 bool enabled = hci_dev_test_flag(hdev,
873                                                  HCI_WIDEBAND_SPEECH_ENABLED);
874
875                 if (enabled !=
876                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
877                         struct hci_cp_write_def_err_data_reporting cp;
878
879                         cp.err_data_reporting = enabled ?
880                                                 ERR_DATA_REPORTING_ENABLED :
881                                                 ERR_DATA_REPORTING_DISABLED;
882
883                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
884                                     sizeof(cp), &cp);
885                 }
886         }
887
888         /* Set Suggested Default Data Length to maximum if supported */
889         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
890                 struct hci_cp_le_write_def_data_len cp;
891
892                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
893                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
894                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
895         }
896
897         /* Set Default PHY parameters if command is supported */
898         if (hdev->commands[35] & 0x20) {
899                 struct hci_cp_le_set_default_phy cp;
900
901                 cp.all_phys = 0x00;
902                 cp.tx_phys = hdev->le_tx_def_phys;
903                 cp.rx_phys = hdev->le_rx_def_phys;
904
905                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
906         }
907
908         return 0;
909 }
910
911 static int __hci_init(struct hci_dev *hdev)
912 {
913         int err;
914
915         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
916         if (err < 0)
917                 return err;
918
919         if (hci_dev_test_flag(hdev, HCI_SETUP))
920                 hci_debugfs_create_basic(hdev);
921
922         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
923         if (err < 0)
924                 return err;
925
926         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
927          * BR/EDR/LE type controllers. AMP controllers only need the
928          * first two stages of init.
929          */
930         if (hdev->dev_type != HCI_PRIMARY)
931                 return 0;
932
933         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
934         if (err < 0)
935                 return err;
936
937         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
938         if (err < 0)
939                 return err;
940
941         /* This function is only called when the controller is actually in
942          * configured state. When the controller is marked as unconfigured,
943          * this initialization procedure is not run.
944          *
945          * It means that it is possible that a controller runs through its
946          * setup phase and then discovers missing settings. If that is the
947          * case, then this function will not be called. It then will only
948          * be called during the config phase.
949          *
950          * So only when in setup phase or config phase, create the debugfs
951          * entries and register the SMP channels.
952          */
953         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
954             !hci_dev_test_flag(hdev, HCI_CONFIG))
955                 return 0;
956
957         hci_debugfs_create_common(hdev);
958
959         if (lmp_bredr_capable(hdev))
960                 hci_debugfs_create_bredr(hdev);
961
962         if (lmp_le_capable(hdev))
963                 hci_debugfs_create_le(hdev);
964
965         return 0;
966 }
967
968 static int hci_init0_req(struct hci_request *req, unsigned long opt)
969 {
970         struct hci_dev *hdev = req->hdev;
971
972         BT_DBG("%s %ld", hdev->name, opt);
973
974         /* Reset */
975         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
976                 hci_reset_req(req, 0);
977
978         /* Read Local Version */
979         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
980
981         /* Read BD Address */
982         if (hdev->set_bdaddr)
983                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
984
985         return 0;
986 }
987
988 static int __hci_unconf_init(struct hci_dev *hdev)
989 {
990         int err;
991
992         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
993                 return 0;
994
995         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
996         if (err < 0)
997                 return err;
998
999         if (hci_dev_test_flag(hdev, HCI_SETUP))
1000                 hci_debugfs_create_basic(hdev);
1001
1002         return 0;
1003 }
1004
1005 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1006 {
1007         __u8 scan = opt;
1008
1009         BT_DBG("%s %x", req->hdev->name, scan);
1010
1011         /* Inquiry and Page scans */
1012         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1013         return 0;
1014 }
1015
1016 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1017 {
1018         __u8 auth = opt;
1019
1020         BT_DBG("%s %x", req->hdev->name, auth);
1021
1022         /* Authentication */
1023         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1024         return 0;
1025 }
1026
1027 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1028 {
1029         __u8 encrypt = opt;
1030
1031         BT_DBG("%s %x", req->hdev->name, encrypt);
1032
1033         /* Encryption */
1034         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1035         return 0;
1036 }
1037
1038 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1039 {
1040         __le16 policy = cpu_to_le16(opt);
1041
1042         BT_DBG("%s %x", req->hdev->name, policy);
1043
1044         /* Default link policy */
1045         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1046         return 0;
1047 }
1048
1049 /* Get HCI device by index.
1050  * Device is held on return. */
1051 struct hci_dev *hci_dev_get(int index)
1052 {
1053         struct hci_dev *hdev = NULL, *d;
1054
1055         BT_DBG("%d", index);
1056
1057         if (index < 0)
1058                 return NULL;
1059
1060         read_lock(&hci_dev_list_lock);
1061         list_for_each_entry(d, &hci_dev_list, list) {
1062                 if (d->id == index) {
1063                         hdev = hci_dev_hold(d);
1064                         break;
1065                 }
1066         }
1067         read_unlock(&hci_dev_list_lock);
1068         return hdev;
1069 }
1070
1071 /* ---- Inquiry support ---- */
1072
1073 bool hci_discovery_active(struct hci_dev *hdev)
1074 {
1075         struct discovery_state *discov = &hdev->discovery;
1076
1077         switch (discov->state) {
1078         case DISCOVERY_FINDING:
1079         case DISCOVERY_RESOLVING:
1080                 return true;
1081
1082         default:
1083                 return false;
1084         }
1085 }
1086
1087 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1088 {
1089         int old_state = hdev->discovery.state;
1090
1091         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1092
1093         if (old_state == state)
1094                 return;
1095
1096         hdev->discovery.state = state;
1097
1098         switch (state) {
1099         case DISCOVERY_STOPPED:
1100                 hci_update_background_scan(hdev);
1101
1102                 if (old_state != DISCOVERY_STARTING)
1103                         mgmt_discovering(hdev, 0);
1104                 break;
1105         case DISCOVERY_STARTING:
1106                 break;
1107         case DISCOVERY_FINDING:
1108                 mgmt_discovering(hdev, 1);
1109                 break;
1110         case DISCOVERY_RESOLVING:
1111                 break;
1112         case DISCOVERY_STOPPING:
1113                 break;
1114         }
1115 }
1116
1117 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1118 {
1119         struct discovery_state *cache = &hdev->discovery;
1120         struct inquiry_entry *p, *n;
1121
1122         list_for_each_entry_safe(p, n, &cache->all, all) {
1123                 list_del(&p->all);
1124                 kfree(p);
1125         }
1126
1127         INIT_LIST_HEAD(&cache->unknown);
1128         INIT_LIST_HEAD(&cache->resolve);
1129 }
1130
1131 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1132                                                bdaddr_t *bdaddr)
1133 {
1134         struct discovery_state *cache = &hdev->discovery;
1135         struct inquiry_entry *e;
1136
1137         BT_DBG("cache %p, %pMR", cache, bdaddr);
1138
1139         list_for_each_entry(e, &cache->all, all) {
1140                 if (!bacmp(&e->data.bdaddr, bdaddr))
1141                         return e;
1142         }
1143
1144         return NULL;
1145 }
1146
1147 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1148                                                        bdaddr_t *bdaddr)
1149 {
1150         struct discovery_state *cache = &hdev->discovery;
1151         struct inquiry_entry *e;
1152
1153         BT_DBG("cache %p, %pMR", cache, bdaddr);
1154
1155         list_for_each_entry(e, &cache->unknown, list) {
1156                 if (!bacmp(&e->data.bdaddr, bdaddr))
1157                         return e;
1158         }
1159
1160         return NULL;
1161 }
1162
1163 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1164                                                        bdaddr_t *bdaddr,
1165                                                        int state)
1166 {
1167         struct discovery_state *cache = &hdev->discovery;
1168         struct inquiry_entry *e;
1169
1170         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1171
1172         list_for_each_entry(e, &cache->resolve, list) {
1173                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1174                         return e;
1175                 if (!bacmp(&e->data.bdaddr, bdaddr))
1176                         return e;
1177         }
1178
1179         return NULL;
1180 }
1181
1182 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1183                                       struct inquiry_entry *ie)
1184 {
1185         struct discovery_state *cache = &hdev->discovery;
1186         struct list_head *pos = &cache->resolve;
1187         struct inquiry_entry *p;
1188
1189         list_del(&ie->list);
1190
1191         list_for_each_entry(p, &cache->resolve, list) {
1192                 if (p->name_state != NAME_PENDING &&
1193                     abs(p->data.rssi) >= abs(ie->data.rssi))
1194                         break;
1195                 pos = &p->list;
1196         }
1197
1198         list_add(&ie->list, pos);
1199 }
1200
1201 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1202                              bool name_known)
1203 {
1204         struct discovery_state *cache = &hdev->discovery;
1205         struct inquiry_entry *ie;
1206         u32 flags = 0;
1207
1208         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1209
1210         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1211
1212         if (!data->ssp_mode)
1213                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1214
1215         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1216         if (ie) {
1217                 if (!ie->data.ssp_mode)
1218                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1219
1220                 if (ie->name_state == NAME_NEEDED &&
1221                     data->rssi != ie->data.rssi) {
1222                         ie->data.rssi = data->rssi;
1223                         hci_inquiry_cache_update_resolve(hdev, ie);
1224                 }
1225
1226                 goto update;
1227         }
1228
1229         /* Entry not in the cache. Add new one. */
1230         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1231         if (!ie) {
1232                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1233                 goto done;
1234         }
1235
1236         list_add(&ie->all, &cache->all);
1237
1238         if (name_known) {
1239                 ie->name_state = NAME_KNOWN;
1240         } else {
1241                 ie->name_state = NAME_NOT_KNOWN;
1242                 list_add(&ie->list, &cache->unknown);
1243         }
1244
1245 update:
1246         if (name_known && ie->name_state != NAME_KNOWN &&
1247             ie->name_state != NAME_PENDING) {
1248                 ie->name_state = NAME_KNOWN;
1249                 list_del(&ie->list);
1250         }
1251
1252         memcpy(&ie->data, data, sizeof(*data));
1253         ie->timestamp = jiffies;
1254         cache->timestamp = jiffies;
1255
1256         if (ie->name_state == NAME_NOT_KNOWN)
1257                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1258
1259 done:
1260         return flags;
1261 }
1262
1263 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1264 {
1265         struct discovery_state *cache = &hdev->discovery;
1266         struct inquiry_info *info = (struct inquiry_info *) buf;
1267         struct inquiry_entry *e;
1268         int copied = 0;
1269
1270         list_for_each_entry(e, &cache->all, all) {
1271                 struct inquiry_data *data = &e->data;
1272
1273                 if (copied >= num)
1274                         break;
1275
1276                 bacpy(&info->bdaddr, &data->bdaddr);
1277                 info->pscan_rep_mode    = data->pscan_rep_mode;
1278                 info->pscan_period_mode = data->pscan_period_mode;
1279                 info->pscan_mode        = data->pscan_mode;
1280                 memcpy(info->dev_class, data->dev_class, 3);
1281                 info->clock_offset      = data->clock_offset;
1282
1283                 info++;
1284                 copied++;
1285         }
1286
1287         BT_DBG("cache %p, copied %d", cache, copied);
1288         return copied;
1289 }
1290
1291 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1292 {
1293         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1294         struct hci_dev *hdev = req->hdev;
1295         struct hci_cp_inquiry cp;
1296
1297         BT_DBG("%s", hdev->name);
1298
1299         if (test_bit(HCI_INQUIRY, &hdev->flags))
1300                 return 0;
1301
1302         /* Start Inquiry */
1303         memcpy(&cp.lap, &ir->lap, 3);
1304         cp.length  = ir->length;
1305         cp.num_rsp = ir->num_rsp;
1306         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1307
1308         return 0;
1309 }
1310
1311 int hci_inquiry(void __user *arg)
1312 {
1313         __u8 __user *ptr = arg;
1314         struct hci_inquiry_req ir;
1315         struct hci_dev *hdev;
1316         int err = 0, do_inquiry = 0, max_rsp;
1317         long timeo;
1318         __u8 *buf;
1319
1320         if (copy_from_user(&ir, ptr, sizeof(ir)))
1321                 return -EFAULT;
1322
1323         hdev = hci_dev_get(ir.dev_id);
1324         if (!hdev)
1325                 return -ENODEV;
1326
1327         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1328                 err = -EBUSY;
1329                 goto done;
1330         }
1331
1332         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1333                 err = -EOPNOTSUPP;
1334                 goto done;
1335         }
1336
1337         if (hdev->dev_type != HCI_PRIMARY) {
1338                 err = -EOPNOTSUPP;
1339                 goto done;
1340         }
1341
1342         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1343                 err = -EOPNOTSUPP;
1344                 goto done;
1345         }
1346
1347         /* Restrict maximum inquiry length to 60 seconds */
1348         if (ir.length > 60) {
1349                 err = -EINVAL;
1350                 goto done;
1351         }
1352
1353         hci_dev_lock(hdev);
1354         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1355             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1356                 hci_inquiry_cache_flush(hdev);
1357                 do_inquiry = 1;
1358         }
1359         hci_dev_unlock(hdev);
1360
1361         timeo = ir.length * msecs_to_jiffies(2000);
1362
1363         if (do_inquiry) {
1364                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1365                                    timeo, NULL);
1366                 if (err < 0)
1367                         goto done;
1368
1369                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1370                  * cleared). If it is interrupted by a signal, return -EINTR.
1371                  */
1372                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1373                                 TASK_INTERRUPTIBLE)) {
1374                         err = -EINTR;
1375                         goto done;
1376                 }
1377         }
1378
1379         /* for unlimited number of responses we will use buffer with
1380          * 255 entries
1381          */
1382         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1383
1384         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1385          * copy it to the user space.
1386          */
1387         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1388         if (!buf) {
1389                 err = -ENOMEM;
1390                 goto done;
1391         }
1392
1393         hci_dev_lock(hdev);
1394         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1395         hci_dev_unlock(hdev);
1396
1397         BT_DBG("num_rsp %d", ir.num_rsp);
1398
1399         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1400                 ptr += sizeof(ir);
1401                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1402                                  ir.num_rsp))
1403                         err = -EFAULT;
1404         } else
1405                 err = -EFAULT;
1406
1407         kfree(buf);
1408
1409 done:
1410         hci_dev_put(hdev);
1411         return err;
1412 }
1413
1414 /**
1415  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1416  *                                     (BD_ADDR) for a HCI device from
1417  *                                     a firmware node property.
1418  * @hdev:       The HCI device
1419  *
1420  * Search the firmware node for 'local-bd-address'.
1421  *
1422  * All-zero BD addresses are rejected, because those could be properties
1423  * that exist in the firmware tables, but were not updated by the firmware. For
1424  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1425  */
1426 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1427 {
1428         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1429         bdaddr_t ba;
1430         int ret;
1431
1432         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1433                                             (u8 *)&ba, sizeof(ba));
1434         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1435                 return;
1436
1437         bacpy(&hdev->public_addr, &ba);
1438 }
1439
1440 static int hci_dev_do_open(struct hci_dev *hdev)
1441 {
1442         int ret = 0;
1443
1444         BT_DBG("%s %p", hdev->name, hdev);
1445
1446         hci_req_sync_lock(hdev);
1447
1448         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1449                 ret = -ENODEV;
1450                 goto done;
1451         }
1452
1453         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1454             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1455                 /* Check for rfkill but allow the HCI setup stage to
1456                  * proceed (which in itself doesn't cause any RF activity).
1457                  */
1458                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1459                         ret = -ERFKILL;
1460                         goto done;
1461                 }
1462
1463                 /* Check for valid public address or a configured static
1464                  * random address, but let the HCI setup proceed to
1465                  * be able to determine if there is a public address
1466                  * or not.
1467                  *
1468                  * In case of user channel usage, it is not important
1469                  * if a public address or static random address is
1470                  * available.
1471                  *
1472                  * This check is only valid for BR/EDR controllers
1473                  * since AMP controllers do not have an address.
1474                  */
1475                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1476                     hdev->dev_type == HCI_PRIMARY &&
1477                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1478                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1479                         ret = -EADDRNOTAVAIL;
1480                         goto done;
1481                 }
1482         }
1483
1484         if (test_bit(HCI_UP, &hdev->flags)) {
1485                 ret = -EALREADY;
1486                 goto done;
1487         }
1488
1489         if (hdev->open(hdev)) {
1490                 ret = -EIO;
1491                 goto done;
1492         }
1493
1494         set_bit(HCI_RUNNING, &hdev->flags);
1495         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1496
1497         atomic_set(&hdev->cmd_cnt, 1);
1498         set_bit(HCI_INIT, &hdev->flags);
1499
1500         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1501             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1502                 bool invalid_bdaddr;
1503
1504                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1505
1506                 if (hdev->setup)
1507                         ret = hdev->setup(hdev);
1508
1509                 /* The transport driver can set the quirk to mark the
1510                  * BD_ADDR invalid before creating the HCI device or in
1511                  * its setup callback.
1512                  */
1513                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1514                                           &hdev->quirks);
1515
1516                 if (ret)
1517                         goto setup_failed;
1518
1519                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1520                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1521                                 hci_dev_get_bd_addr_from_property(hdev);
1522
1523                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1524                             hdev->set_bdaddr) {
1525                                 ret = hdev->set_bdaddr(hdev,
1526                                                        &hdev->public_addr);
1527
1528                                 /* If setting of the BD_ADDR from the device
1529                                  * property succeeds, then treat the address
1530                                  * as valid even if the invalid BD_ADDR
1531                                  * quirk indicates otherwise.
1532                                  */
1533                                 if (!ret)
1534                                         invalid_bdaddr = false;
1535                         }
1536                 }
1537
1538 setup_failed:
1539                 /* The transport driver can set these quirks before
1540                  * creating the HCI device or in its setup callback.
1541                  *
1542                  * For the invalid BD_ADDR quirk it is possible that
1543                  * it becomes a valid address if the bootloader does
1544                  * provide it (see above).
1545                  *
1546                  * In case any of them is set, the controller has to
1547                  * start up as unconfigured.
1548                  */
1549                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1550                     invalid_bdaddr)
1551                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1552
1553                 /* For an unconfigured controller it is required to
1554                  * read at least the version information provided by
1555                  * the Read Local Version Information command.
1556                  *
1557                  * If the set_bdaddr driver callback is provided, then
1558                  * also the original Bluetooth public device address
1559                  * will be read using the Read BD Address command.
1560                  */
1561                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1562                         ret = __hci_unconf_init(hdev);
1563         }
1564
1565         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1566                 /* If public address change is configured, ensure that
1567                  * the address gets programmed. If the driver does not
1568                  * support changing the public address, fail the power
1569                  * on procedure.
1570                  */
1571                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1572                     hdev->set_bdaddr)
1573                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1574                 else
1575                         ret = -EADDRNOTAVAIL;
1576         }
1577
1578         if (!ret) {
1579                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1580                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1581                         ret = __hci_init(hdev);
1582                         if (!ret && hdev->post_init)
1583                                 ret = hdev->post_init(hdev);
1584                 }
1585         }
1586
1587         /* If the HCI Reset command is clearing all diagnostic settings,
1588          * then they need to be reprogrammed after the init procedure
1589          * completed.
1590          */
1591         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1592             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1593             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1594                 ret = hdev->set_diag(hdev, true);
1595
1596         msft_do_open(hdev);
1597         aosp_do_open(hdev);
1598
1599         clear_bit(HCI_INIT, &hdev->flags);
1600
1601         if (!ret) {
1602                 hci_dev_hold(hdev);
1603                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1604                 hci_adv_instances_set_rpa_expired(hdev, true);
1605                 set_bit(HCI_UP, &hdev->flags);
1606                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1607                 hci_leds_update_powered(hdev, true);
1608                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1609                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1610                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1611                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1612                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1613                     hdev->dev_type == HCI_PRIMARY) {
1614                         ret = __hci_req_hci_power_on(hdev);
1615                         mgmt_power_on(hdev, ret);
1616                 }
1617         } else {
1618                 /* Init failed, cleanup */
1619                 flush_work(&hdev->tx_work);
1620
1621                 /* Since hci_rx_work() is possible to awake new cmd_work
1622                  * it should be flushed first to avoid unexpected call of
1623                  * hci_cmd_work()
1624                  */
1625                 flush_work(&hdev->rx_work);
1626                 flush_work(&hdev->cmd_work);
1627
1628                 skb_queue_purge(&hdev->cmd_q);
1629                 skb_queue_purge(&hdev->rx_q);
1630
1631                 if (hdev->flush)
1632                         hdev->flush(hdev);
1633
1634                 if (hdev->sent_cmd) {
1635                         kfree_skb(hdev->sent_cmd);
1636                         hdev->sent_cmd = NULL;
1637                 }
1638
1639                 clear_bit(HCI_RUNNING, &hdev->flags);
1640                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1641
1642                 hdev->close(hdev);
1643                 hdev->flags &= BIT(HCI_RAW);
1644         }
1645
1646 done:
1647         hci_req_sync_unlock(hdev);
1648         return ret;
1649 }
1650
1651 /* ---- HCI ioctl helpers ---- */
1652
1653 int hci_dev_open(__u16 dev)
1654 {
1655         struct hci_dev *hdev;
1656         int err;
1657
1658         hdev = hci_dev_get(dev);
1659         if (!hdev)
1660                 return -ENODEV;
1661
1662         /* Devices that are marked as unconfigured can only be powered
1663          * up as user channel. Trying to bring them up as normal devices
1664          * will result into a failure. Only user channel operation is
1665          * possible.
1666          *
1667          * When this function is called for a user channel, the flag
1668          * HCI_USER_CHANNEL will be set first before attempting to
1669          * open the device.
1670          */
1671         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1672             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1673                 err = -EOPNOTSUPP;
1674                 goto done;
1675         }
1676
1677         /* We need to ensure that no other power on/off work is pending
1678          * before proceeding to call hci_dev_do_open. This is
1679          * particularly important if the setup procedure has not yet
1680          * completed.
1681          */
1682         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1683                 cancel_delayed_work(&hdev->power_off);
1684
1685         /* After this call it is guaranteed that the setup procedure
1686          * has finished. This means that error conditions like RFKILL
1687          * or no valid public or static random address apply.
1688          */
1689         flush_workqueue(hdev->req_workqueue);
1690
1691         /* For controllers not using the management interface and that
1692          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1693          * so that pairing works for them. Once the management interface
1694          * is in use this bit will be cleared again and userspace has
1695          * to explicitly enable it.
1696          */
1697         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1698             !hci_dev_test_flag(hdev, HCI_MGMT))
1699                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1700
1701         err = hci_dev_do_open(hdev);
1702
1703 done:
1704         hci_dev_put(hdev);
1705         return err;
1706 }
1707
1708 /* This function requires the caller holds hdev->lock */
1709 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1710 {
1711         struct hci_conn_params *p;
1712
1713         list_for_each_entry(p, &hdev->le_conn_params, list) {
1714                 if (p->conn) {
1715                         hci_conn_drop(p->conn);
1716                         hci_conn_put(p->conn);
1717                         p->conn = NULL;
1718                 }
1719                 list_del_init(&p->action);
1720         }
1721
1722         BT_DBG("All LE pending actions cleared");
1723 }
1724
1725 int hci_dev_do_close(struct hci_dev *hdev)
1726 {
1727         bool auto_off;
1728         int err = 0;
1729
1730         BT_DBG("%s %p", hdev->name, hdev);
1731
1732         cancel_delayed_work(&hdev->power_off);
1733         cancel_delayed_work(&hdev->ncmd_timer);
1734
1735         hci_request_cancel_all(hdev);
1736         hci_req_sync_lock(hdev);
1737
1738         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1739             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1740             test_bit(HCI_UP, &hdev->flags)) {
1741                 /* Execute vendor specific shutdown routine */
1742                 if (hdev->shutdown)
1743                         err = hdev->shutdown(hdev);
1744         }
1745
1746         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1747                 cancel_delayed_work_sync(&hdev->cmd_timer);
1748                 hci_req_sync_unlock(hdev);
1749                 return err;
1750         }
1751
1752         hci_leds_update_powered(hdev, false);
1753
1754         /* Flush RX and TX works */
1755         flush_work(&hdev->tx_work);
1756         flush_work(&hdev->rx_work);
1757
1758         if (hdev->discov_timeout > 0) {
1759                 hdev->discov_timeout = 0;
1760                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1761                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1762         }
1763
1764         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1765                 cancel_delayed_work(&hdev->service_cache);
1766
1767         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1768                 struct adv_info *adv_instance;
1769
1770                 cancel_delayed_work_sync(&hdev->rpa_expired);
1771
1772                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1773                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1774         }
1775
1776         /* Avoid potential lockdep warnings from the *_flush() calls by
1777          * ensuring the workqueue is empty up front.
1778          */
1779         drain_workqueue(hdev->workqueue);
1780
1781         hci_dev_lock(hdev);
1782
1783         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1784
1785         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1786
1787         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1788             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1789             hci_dev_test_flag(hdev, HCI_MGMT))
1790                 __mgmt_power_off(hdev);
1791
1792         hci_inquiry_cache_flush(hdev);
1793         hci_pend_le_actions_clear(hdev);
1794         hci_conn_hash_flush(hdev);
1795         hci_dev_unlock(hdev);
1796
1797         smp_unregister(hdev);
1798
1799         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1800
1801         aosp_do_close(hdev);
1802         msft_do_close(hdev);
1803
1804         if (hdev->flush)
1805                 hdev->flush(hdev);
1806
1807         /* Reset device */
1808         skb_queue_purge(&hdev->cmd_q);
1809         atomic_set(&hdev->cmd_cnt, 1);
1810         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1811             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1812                 set_bit(HCI_INIT, &hdev->flags);
1813                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1814                 clear_bit(HCI_INIT, &hdev->flags);
1815         }
1816
1817         /* flush cmd  work */
1818         flush_work(&hdev->cmd_work);
1819
1820         /* Drop queues */
1821         skb_queue_purge(&hdev->rx_q);
1822         skb_queue_purge(&hdev->cmd_q);
1823         skb_queue_purge(&hdev->raw_q);
1824
1825         /* Drop last sent command */
1826         if (hdev->sent_cmd) {
1827                 cancel_delayed_work_sync(&hdev->cmd_timer);
1828                 kfree_skb(hdev->sent_cmd);
1829                 hdev->sent_cmd = NULL;
1830         }
1831
1832         clear_bit(HCI_RUNNING, &hdev->flags);
1833         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1834
1835         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1836                 wake_up(&hdev->suspend_wait_q);
1837
1838         /* After this point our queues are empty
1839          * and no tasks are scheduled. */
1840         hdev->close(hdev);
1841
1842         /* Clear flags */
1843         hdev->flags &= BIT(HCI_RAW);
1844         hci_dev_clear_volatile_flags(hdev);
1845
1846         /* Controller radio is available but is currently powered down */
1847         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1848
1849         memset(hdev->eir, 0, sizeof(hdev->eir));
1850         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1851         bacpy(&hdev->random_addr, BDADDR_ANY);
1852
1853         hci_req_sync_unlock(hdev);
1854
1855         hci_dev_put(hdev);
1856         return err;
1857 }
1858
1859 int hci_dev_close(__u16 dev)
1860 {
1861         struct hci_dev *hdev;
1862         int err;
1863
1864         hdev = hci_dev_get(dev);
1865         if (!hdev)
1866                 return -ENODEV;
1867
1868         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1869                 err = -EBUSY;
1870                 goto done;
1871         }
1872
1873         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1874                 cancel_delayed_work(&hdev->power_off);
1875
1876         err = hci_dev_do_close(hdev);
1877
1878 done:
1879         hci_dev_put(hdev);
1880         return err;
1881 }
1882
1883 static int hci_dev_do_reset(struct hci_dev *hdev)
1884 {
1885         int ret;
1886
1887         BT_DBG("%s %p", hdev->name, hdev);
1888
1889         hci_req_sync_lock(hdev);
1890
1891         /* Drop queues */
1892         skb_queue_purge(&hdev->rx_q);
1893         skb_queue_purge(&hdev->cmd_q);
1894
1895         /* Avoid potential lockdep warnings from the *_flush() calls by
1896          * ensuring the workqueue is empty up front.
1897          */
1898         drain_workqueue(hdev->workqueue);
1899
1900         hci_dev_lock(hdev);
1901         hci_inquiry_cache_flush(hdev);
1902         hci_conn_hash_flush(hdev);
1903         hci_dev_unlock(hdev);
1904
1905         if (hdev->flush)
1906                 hdev->flush(hdev);
1907
1908         atomic_set(&hdev->cmd_cnt, 1);
1909         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1910
1911         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1912
1913         hci_req_sync_unlock(hdev);
1914         return ret;
1915 }
1916
1917 int hci_dev_reset(__u16 dev)
1918 {
1919         struct hci_dev *hdev;
1920         int err;
1921
1922         hdev = hci_dev_get(dev);
1923         if (!hdev)
1924                 return -ENODEV;
1925
1926         if (!test_bit(HCI_UP, &hdev->flags)) {
1927                 err = -ENETDOWN;
1928                 goto done;
1929         }
1930
1931         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1932                 err = -EBUSY;
1933                 goto done;
1934         }
1935
1936         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1937                 err = -EOPNOTSUPP;
1938                 goto done;
1939         }
1940
1941         err = hci_dev_do_reset(hdev);
1942
1943 done:
1944         hci_dev_put(hdev);
1945         return err;
1946 }
1947
1948 int hci_dev_reset_stat(__u16 dev)
1949 {
1950         struct hci_dev *hdev;
1951         int ret = 0;
1952
1953         hdev = hci_dev_get(dev);
1954         if (!hdev)
1955                 return -ENODEV;
1956
1957         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1958                 ret = -EBUSY;
1959                 goto done;
1960         }
1961
1962         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1963                 ret = -EOPNOTSUPP;
1964                 goto done;
1965         }
1966
1967         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1968
1969 done:
1970         hci_dev_put(hdev);
1971         return ret;
1972 }
1973
1974 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1975 {
1976         bool conn_changed, discov_changed;
1977
1978         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1979
1980         if ((scan & SCAN_PAGE))
1981                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1982                                                           HCI_CONNECTABLE);
1983         else
1984                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1985                                                            HCI_CONNECTABLE);
1986
1987         if ((scan & SCAN_INQUIRY)) {
1988                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1989                                                             HCI_DISCOVERABLE);
1990         } else {
1991                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1992                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1993                                                              HCI_DISCOVERABLE);
1994         }
1995
1996         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1997                 return;
1998
1999         if (conn_changed || discov_changed) {
2000                 /* In case this was disabled through mgmt */
2001                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2002
2003                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2004                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
2005
2006                 mgmt_new_settings(hdev);
2007         }
2008 }
2009
2010 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2011 {
2012         struct hci_dev *hdev;
2013         struct hci_dev_req dr;
2014         int err = 0;
2015
2016         if (copy_from_user(&dr, arg, sizeof(dr)))
2017                 return -EFAULT;
2018
2019         hdev = hci_dev_get(dr.dev_id);
2020         if (!hdev)
2021                 return -ENODEV;
2022
2023         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2024                 err = -EBUSY;
2025                 goto done;
2026         }
2027
2028         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2029                 err = -EOPNOTSUPP;
2030                 goto done;
2031         }
2032
2033         if (hdev->dev_type != HCI_PRIMARY) {
2034                 err = -EOPNOTSUPP;
2035                 goto done;
2036         }
2037
2038         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2039                 err = -EOPNOTSUPP;
2040                 goto done;
2041         }
2042
2043         switch (cmd) {
2044         case HCISETAUTH:
2045                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2046                                    HCI_INIT_TIMEOUT, NULL);
2047                 break;
2048
2049         case HCISETENCRYPT:
2050                 if (!lmp_encrypt_capable(hdev)) {
2051                         err = -EOPNOTSUPP;
2052                         break;
2053                 }
2054
2055                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2056                         /* Auth must be enabled first */
2057                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2058                                            HCI_INIT_TIMEOUT, NULL);
2059                         if (err)
2060                                 break;
2061                 }
2062
2063                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2064                                    HCI_INIT_TIMEOUT, NULL);
2065                 break;
2066
2067         case HCISETSCAN:
2068                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2069                                    HCI_INIT_TIMEOUT, NULL);
2070
2071                 /* Ensure that the connectable and discoverable states
2072                  * get correctly modified as this was a non-mgmt change.
2073                  */
2074                 if (!err)
2075                         hci_update_scan_state(hdev, dr.dev_opt);
2076                 break;
2077
2078         case HCISETLINKPOL:
2079                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2080                                    HCI_INIT_TIMEOUT, NULL);
2081                 break;
2082
2083         case HCISETLINKMODE:
2084                 hdev->link_mode = ((__u16) dr.dev_opt) &
2085                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2086                 break;
2087
2088         case HCISETPTYPE:
2089                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2090                         break;
2091
2092                 hdev->pkt_type = (__u16) dr.dev_opt;
2093                 mgmt_phy_configuration_changed(hdev, NULL);
2094                 break;
2095
2096         case HCISETACLMTU:
2097                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2098                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2099                 break;
2100
2101         case HCISETSCOMTU:
2102                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2103                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2104                 break;
2105
2106         default:
2107                 err = -EINVAL;
2108                 break;
2109         }
2110
2111 done:
2112         hci_dev_put(hdev);
2113         return err;
2114 }
2115
2116 int hci_get_dev_list(void __user *arg)
2117 {
2118         struct hci_dev *hdev;
2119         struct hci_dev_list_req *dl;
2120         struct hci_dev_req *dr;
2121         int n = 0, size, err;
2122         __u16 dev_num;
2123
2124         if (get_user(dev_num, (__u16 __user *) arg))
2125                 return -EFAULT;
2126
2127         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2128                 return -EINVAL;
2129
2130         size = sizeof(*dl) + dev_num * sizeof(*dr);
2131
2132         dl = kzalloc(size, GFP_KERNEL);
2133         if (!dl)
2134                 return -ENOMEM;
2135
2136         dr = dl->dev_req;
2137
2138         read_lock(&hci_dev_list_lock);
2139         list_for_each_entry(hdev, &hci_dev_list, list) {
2140                 unsigned long flags = hdev->flags;
2141
2142                 /* When the auto-off is configured it means the transport
2143                  * is running, but in that case still indicate that the
2144                  * device is actually down.
2145                  */
2146                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2147                         flags &= ~BIT(HCI_UP);
2148
2149                 (dr + n)->dev_id  = hdev->id;
2150                 (dr + n)->dev_opt = flags;
2151
2152                 if (++n >= dev_num)
2153                         break;
2154         }
2155         read_unlock(&hci_dev_list_lock);
2156
2157         dl->dev_num = n;
2158         size = sizeof(*dl) + n * sizeof(*dr);
2159
2160         err = copy_to_user(arg, dl, size);
2161         kfree(dl);
2162
2163         return err ? -EFAULT : 0;
2164 }
2165
2166 int hci_get_dev_info(void __user *arg)
2167 {
2168         struct hci_dev *hdev;
2169         struct hci_dev_info di;
2170         unsigned long flags;
2171         int err = 0;
2172
2173         if (copy_from_user(&di, arg, sizeof(di)))
2174                 return -EFAULT;
2175
2176         hdev = hci_dev_get(di.dev_id);
2177         if (!hdev)
2178                 return -ENODEV;
2179
2180         /* When the auto-off is configured it means the transport
2181          * is running, but in that case still indicate that the
2182          * device is actually down.
2183          */
2184         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2185                 flags = hdev->flags & ~BIT(HCI_UP);
2186         else
2187                 flags = hdev->flags;
2188
2189         strcpy(di.name, hdev->name);
2190         di.bdaddr   = hdev->bdaddr;
2191         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2192         di.flags    = flags;
2193         di.pkt_type = hdev->pkt_type;
2194         if (lmp_bredr_capable(hdev)) {
2195                 di.acl_mtu  = hdev->acl_mtu;
2196                 di.acl_pkts = hdev->acl_pkts;
2197                 di.sco_mtu  = hdev->sco_mtu;
2198                 di.sco_pkts = hdev->sco_pkts;
2199         } else {
2200                 di.acl_mtu  = hdev->le_mtu;
2201                 di.acl_pkts = hdev->le_pkts;
2202                 di.sco_mtu  = 0;
2203                 di.sco_pkts = 0;
2204         }
2205         di.link_policy = hdev->link_policy;
2206         di.link_mode   = hdev->link_mode;
2207
2208         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2209         memcpy(&di.features, &hdev->features, sizeof(di.features));
2210
2211         if (copy_to_user(arg, &di, sizeof(di)))
2212                 err = -EFAULT;
2213
2214         hci_dev_put(hdev);
2215
2216         return err;
2217 }
2218
2219 /* ---- Interface to HCI drivers ---- */
2220
2221 static int hci_rfkill_set_block(void *data, bool blocked)
2222 {
2223         struct hci_dev *hdev = data;
2224
2225         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2226
2227         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2228                 return -EBUSY;
2229
2230         if (blocked) {
2231                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2232                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2233                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2234                         hci_dev_do_close(hdev);
2235         } else {
2236                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2237         }
2238
2239         return 0;
2240 }
2241
2242 static const struct rfkill_ops hci_rfkill_ops = {
2243         .set_block = hci_rfkill_set_block,
2244 };
2245
2246 static void hci_power_on(struct work_struct *work)
2247 {
2248         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2249         int err;
2250
2251         BT_DBG("%s", hdev->name);
2252
2253         if (test_bit(HCI_UP, &hdev->flags) &&
2254             hci_dev_test_flag(hdev, HCI_MGMT) &&
2255             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2256                 cancel_delayed_work(&hdev->power_off);
2257                 hci_req_sync_lock(hdev);
2258                 err = __hci_req_hci_power_on(hdev);
2259                 hci_req_sync_unlock(hdev);
2260                 mgmt_power_on(hdev, err);
2261                 return;
2262         }
2263
2264         err = hci_dev_do_open(hdev);
2265         if (err < 0) {
2266                 hci_dev_lock(hdev);
2267                 mgmt_set_powered_failed(hdev, err);
2268                 hci_dev_unlock(hdev);
2269                 return;
2270         }
2271
2272         /* During the HCI setup phase, a few error conditions are
2273          * ignored and they need to be checked now. If they are still
2274          * valid, it is important to turn the device back off.
2275          */
2276         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2277             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2278             (hdev->dev_type == HCI_PRIMARY &&
2279              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2280              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2281                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2282                 hci_dev_do_close(hdev);
2283         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2284                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2285                                    HCI_AUTO_OFF_TIMEOUT);
2286         }
2287
2288         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2289                 /* For unconfigured devices, set the HCI_RAW flag
2290                  * so that userspace can easily identify them.
2291                  */
2292                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2293                         set_bit(HCI_RAW, &hdev->flags);
2294
2295                 /* For fully configured devices, this will send
2296                  * the Index Added event. For unconfigured devices,
2297                  * it will send Unconfigued Index Added event.
2298                  *
2299                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2300                  * and no event will be send.
2301                  */
2302                 mgmt_index_added(hdev);
2303         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2304                 /* When the controller is now configured, then it
2305                  * is important to clear the HCI_RAW flag.
2306                  */
2307                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2308                         clear_bit(HCI_RAW, &hdev->flags);
2309
2310                 /* Powering on the controller with HCI_CONFIG set only
2311                  * happens with the transition from unconfigured to
2312                  * configured. This will send the Index Added event.
2313                  */
2314                 mgmt_index_added(hdev);
2315         }
2316 }
2317
2318 static void hci_power_off(struct work_struct *work)
2319 {
2320         struct hci_dev *hdev = container_of(work, struct hci_dev,
2321                                             power_off.work);
2322
2323         BT_DBG("%s", hdev->name);
2324
2325         hci_dev_do_close(hdev);
2326 }
2327
2328 static void hci_error_reset(struct work_struct *work)
2329 {
2330         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2331
2332         BT_DBG("%s", hdev->name);
2333
2334         if (hdev->hw_error)
2335                 hdev->hw_error(hdev, hdev->hw_error_code);
2336         else
2337                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2338
2339         if (hci_dev_do_close(hdev))
2340                 return;
2341
2342         hci_dev_do_open(hdev);
2343 }
2344
2345 void hci_uuids_clear(struct hci_dev *hdev)
2346 {
2347         struct bt_uuid *uuid, *tmp;
2348
2349         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2350                 list_del(&uuid->list);
2351                 kfree(uuid);
2352         }
2353 }
2354
2355 void hci_link_keys_clear(struct hci_dev *hdev)
2356 {
2357         struct link_key *key;
2358
2359         list_for_each_entry(key, &hdev->link_keys, list) {
2360                 list_del_rcu(&key->list);
2361                 kfree_rcu(key, rcu);
2362         }
2363 }
2364
2365 void hci_smp_ltks_clear(struct hci_dev *hdev)
2366 {
2367         struct smp_ltk *k;
2368
2369         list_for_each_entry(k, &hdev->long_term_keys, list) {
2370                 list_del_rcu(&k->list);
2371                 kfree_rcu(k, rcu);
2372         }
2373 }
2374
2375 void hci_smp_irks_clear(struct hci_dev *hdev)
2376 {
2377         struct smp_irk *k;
2378
2379         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2380                 list_del_rcu(&k->list);
2381                 kfree_rcu(k, rcu);
2382         }
2383 }
2384
2385 void hci_blocked_keys_clear(struct hci_dev *hdev)
2386 {
2387         struct blocked_key *b;
2388
2389         list_for_each_entry(b, &hdev->blocked_keys, list) {
2390                 list_del_rcu(&b->list);
2391                 kfree_rcu(b, rcu);
2392         }
2393 }
2394
2395 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2396 {
2397         bool blocked = false;
2398         struct blocked_key *b;
2399
2400         rcu_read_lock();
2401         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2402                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2403                         blocked = true;
2404                         break;
2405                 }
2406         }
2407
2408         rcu_read_unlock();
2409         return blocked;
2410 }
2411
2412 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2413 {
2414         struct link_key *k;
2415
2416         rcu_read_lock();
2417         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2418                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2419                         rcu_read_unlock();
2420
2421                         if (hci_is_blocked_key(hdev,
2422                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2423                                                k->val)) {
2424                                 bt_dev_warn_ratelimited(hdev,
2425                                                         "Link key blocked for %pMR",
2426                                                         &k->bdaddr);
2427                                 return NULL;
2428                         }
2429
2430                         return k;
2431                 }
2432         }
2433         rcu_read_unlock();
2434
2435         return NULL;
2436 }
2437
2438 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2439                                u8 key_type, u8 old_key_type)
2440 {
2441         /* Legacy key */
2442         if (key_type < 0x03)
2443                 return true;
2444
2445         /* Debug keys are insecure so don't store them persistently */
2446         if (key_type == HCI_LK_DEBUG_COMBINATION)
2447                 return false;
2448
2449         /* Changed combination key and there's no previous one */
2450         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2451                 return false;
2452
2453         /* Security mode 3 case */
2454         if (!conn)
2455                 return true;
2456
2457         /* BR/EDR key derived using SC from an LE link */
2458         if (conn->type == LE_LINK)
2459                 return true;
2460
2461         /* Neither local nor remote side had no-bonding as requirement */
2462         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2463                 return true;
2464
2465         /* Local side had dedicated bonding as requirement */
2466         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2467                 return true;
2468
2469         /* Remote side had dedicated bonding as requirement */
2470         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2471                 return true;
2472
2473         /* If none of the above criteria match, then don't store the key
2474          * persistently */
2475         return false;
2476 }
2477
2478 static u8 ltk_role(u8 type)
2479 {
2480         if (type == SMP_LTK)
2481                 return HCI_ROLE_MASTER;
2482
2483         return HCI_ROLE_SLAVE;
2484 }
2485
2486 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2487                              u8 addr_type, u8 role)
2488 {
2489         struct smp_ltk *k;
2490
2491         rcu_read_lock();
2492         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2493                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2494                         continue;
2495
2496                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2497                         rcu_read_unlock();
2498
2499                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2500                                                k->val)) {
2501                                 bt_dev_warn_ratelimited(hdev,
2502                                                         "LTK blocked for %pMR",
2503                                                         &k->bdaddr);
2504                                 return NULL;
2505                         }
2506
2507                         return k;
2508                 }
2509         }
2510         rcu_read_unlock();
2511
2512         return NULL;
2513 }
2514
2515 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2516 {
2517         struct smp_irk *irk_to_return = NULL;
2518         struct smp_irk *irk;
2519
2520         rcu_read_lock();
2521         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2522                 if (!bacmp(&irk->rpa, rpa)) {
2523                         irk_to_return = irk;
2524                         goto done;
2525                 }
2526         }
2527
2528         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2529                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2530                         bacpy(&irk->rpa, rpa);
2531                         irk_to_return = irk;
2532                         goto done;
2533                 }
2534         }
2535
2536 done:
2537         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2538                                                 irk_to_return->val)) {
2539                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2540                                         &irk_to_return->bdaddr);
2541                 irk_to_return = NULL;
2542         }
2543
2544         rcu_read_unlock();
2545
2546         return irk_to_return;
2547 }
2548
2549 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2550                                      u8 addr_type)
2551 {
2552         struct smp_irk *irk_to_return = NULL;
2553         struct smp_irk *irk;
2554
2555         /* Identity Address must be public or static random */
2556         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2557                 return NULL;
2558
2559         rcu_read_lock();
2560         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2561                 if (addr_type == irk->addr_type &&
2562                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2563                         irk_to_return = irk;
2564                         goto done;
2565                 }
2566         }
2567
2568 done:
2569
2570         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2571                                                 irk_to_return->val)) {
2572                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2573                                         &irk_to_return->bdaddr);
2574                 irk_to_return = NULL;
2575         }
2576
2577         rcu_read_unlock();
2578
2579         return irk_to_return;
2580 }
2581
2582 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2583                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2584                                   u8 pin_len, bool *persistent)
2585 {
2586         struct link_key *key, *old_key;
2587         u8 old_key_type;
2588
2589         old_key = hci_find_link_key(hdev, bdaddr);
2590         if (old_key) {
2591                 old_key_type = old_key->type;
2592                 key = old_key;
2593         } else {
2594                 old_key_type = conn ? conn->key_type : 0xff;
2595                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2596                 if (!key)
2597                         return NULL;
2598                 list_add_rcu(&key->list, &hdev->link_keys);
2599         }
2600
2601         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2602
2603         /* Some buggy controller combinations generate a changed
2604          * combination key for legacy pairing even when there's no
2605          * previous key */
2606         if (type == HCI_LK_CHANGED_COMBINATION &&
2607             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2608                 type = HCI_LK_COMBINATION;
2609                 if (conn)
2610                         conn->key_type = type;
2611         }
2612
2613         bacpy(&key->bdaddr, bdaddr);
2614         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2615         key->pin_len = pin_len;
2616
2617         if (type == HCI_LK_CHANGED_COMBINATION)
2618                 key->type = old_key_type;
2619         else
2620                 key->type = type;
2621
2622         if (persistent)
2623                 *persistent = hci_persistent_key(hdev, conn, type,
2624                                                  old_key_type);
2625
2626         return key;
2627 }
2628
2629 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2630                             u8 addr_type, u8 type, u8 authenticated,
2631                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2632 {
2633         struct smp_ltk *key, *old_key;
2634         u8 role = ltk_role(type);
2635
2636         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2637         if (old_key)
2638                 key = old_key;
2639         else {
2640                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2641                 if (!key)
2642                         return NULL;
2643                 list_add_rcu(&key->list, &hdev->long_term_keys);
2644         }
2645
2646         bacpy(&key->bdaddr, bdaddr);
2647         key->bdaddr_type = addr_type;
2648         memcpy(key->val, tk, sizeof(key->val));
2649         key->authenticated = authenticated;
2650         key->ediv = ediv;
2651         key->rand = rand;
2652         key->enc_size = enc_size;
2653         key->type = type;
2654
2655         return key;
2656 }
2657
2658 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2659                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2660 {
2661         struct smp_irk *irk;
2662
2663         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2664         if (!irk) {
2665                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2666                 if (!irk)
2667                         return NULL;
2668
2669                 bacpy(&irk->bdaddr, bdaddr);
2670                 irk->addr_type = addr_type;
2671
2672                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2673         }
2674
2675         memcpy(irk->val, val, 16);
2676         bacpy(&irk->rpa, rpa);
2677
2678         return irk;
2679 }
2680
2681 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2682 {
2683         struct link_key *key;
2684
2685         key = hci_find_link_key(hdev, bdaddr);
2686         if (!key)
2687                 return -ENOENT;
2688
2689         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2690
2691         list_del_rcu(&key->list);
2692         kfree_rcu(key, rcu);
2693
2694         return 0;
2695 }
2696
2697 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2698 {
2699         struct smp_ltk *k;
2700         int removed = 0;
2701
2702         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2703                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2704                         continue;
2705
2706                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2707
2708                 list_del_rcu(&k->list);
2709                 kfree_rcu(k, rcu);
2710                 removed++;
2711         }
2712
2713         return removed ? 0 : -ENOENT;
2714 }
2715
2716 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2717 {
2718         struct smp_irk *k;
2719
2720         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2721                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2722                         continue;
2723
2724                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2725
2726                 list_del_rcu(&k->list);
2727                 kfree_rcu(k, rcu);
2728         }
2729 }
2730
2731 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2732 {
2733         struct smp_ltk *k;
2734         struct smp_irk *irk;
2735         u8 addr_type;
2736
2737         if (type == BDADDR_BREDR) {
2738                 if (hci_find_link_key(hdev, bdaddr))
2739                         return true;
2740                 return false;
2741         }
2742
2743         /* Convert to HCI addr type which struct smp_ltk uses */
2744         if (type == BDADDR_LE_PUBLIC)
2745                 addr_type = ADDR_LE_DEV_PUBLIC;
2746         else
2747                 addr_type = ADDR_LE_DEV_RANDOM;
2748
2749         irk = hci_get_irk(hdev, bdaddr, addr_type);
2750         if (irk) {
2751                 bdaddr = &irk->bdaddr;
2752                 addr_type = irk->addr_type;
2753         }
2754
2755         rcu_read_lock();
2756         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2757                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2758                         rcu_read_unlock();
2759                         return true;
2760                 }
2761         }
2762         rcu_read_unlock();
2763
2764         return false;
2765 }
2766
2767 /* HCI command timer function */
2768 static void hci_cmd_timeout(struct work_struct *work)
2769 {
2770         struct hci_dev *hdev = container_of(work, struct hci_dev,
2771                                             cmd_timer.work);
2772
2773         if (hdev->sent_cmd) {
2774                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2775                 u16 opcode = __le16_to_cpu(sent->opcode);
2776
2777                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2778         } else {
2779                 bt_dev_err(hdev, "command tx timeout");
2780         }
2781
2782         if (hdev->cmd_timeout)
2783                 hdev->cmd_timeout(hdev);
2784
2785         atomic_set(&hdev->cmd_cnt, 1);
2786         queue_work(hdev->workqueue, &hdev->cmd_work);
2787 }
2788
2789 /* HCI ncmd timer function */
2790 static void hci_ncmd_timeout(struct work_struct *work)
2791 {
2792         struct hci_dev *hdev = container_of(work, struct hci_dev,
2793                                             ncmd_timer.work);
2794
2795         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
2796
2797         /* During HCI_INIT phase no events can be injected if the ncmd timer
2798          * triggers since the procedure has its own timeout handling.
2799          */
2800         if (test_bit(HCI_INIT, &hdev->flags))
2801                 return;
2802
2803         /* This is an irrecoverable state, inject hardware error event */
2804         hci_reset_dev(hdev);
2805 }
2806
2807 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2808                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2809 {
2810         struct oob_data *data;
2811
2812         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2813                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2814                         continue;
2815                 if (data->bdaddr_type != bdaddr_type)
2816                         continue;
2817                 return data;
2818         }
2819
2820         return NULL;
2821 }
2822
2823 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2824                                u8 bdaddr_type)
2825 {
2826         struct oob_data *data;
2827
2828         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2829         if (!data)
2830                 return -ENOENT;
2831
2832         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2833
2834         list_del(&data->list);
2835         kfree(data);
2836
2837         return 0;
2838 }
2839
2840 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2841 {
2842         struct oob_data *data, *n;
2843
2844         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2845                 list_del(&data->list);
2846                 kfree(data);
2847         }
2848 }
2849
2850 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2851                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2852                             u8 *hash256, u8 *rand256)
2853 {
2854         struct oob_data *data;
2855
2856         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2857         if (!data) {
2858                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2859                 if (!data)
2860                         return -ENOMEM;
2861
2862                 bacpy(&data->bdaddr, bdaddr);
2863                 data->bdaddr_type = bdaddr_type;
2864                 list_add(&data->list, &hdev->remote_oob_data);
2865         }
2866
2867         if (hash192 && rand192) {
2868                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2869                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2870                 if (hash256 && rand256)
2871                         data->present = 0x03;
2872         } else {
2873                 memset(data->hash192, 0, sizeof(data->hash192));
2874                 memset(data->rand192, 0, sizeof(data->rand192));
2875                 if (hash256 && rand256)
2876                         data->present = 0x02;
2877                 else
2878                         data->present = 0x00;
2879         }
2880
2881         if (hash256 && rand256) {
2882                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2883                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2884         } else {
2885                 memset(data->hash256, 0, sizeof(data->hash256));
2886                 memset(data->rand256, 0, sizeof(data->rand256));
2887                 if (hash192 && rand192)
2888                         data->present = 0x01;
2889         }
2890
2891         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2892
2893         return 0;
2894 }
2895
2896 /* This function requires the caller holds hdev->lock */
2897 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2898 {
2899         struct adv_info *adv_instance;
2900
2901         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2902                 if (adv_instance->instance == instance)
2903                         return adv_instance;
2904         }
2905
2906         return NULL;
2907 }
2908
2909 /* This function requires the caller holds hdev->lock */
2910 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2911 {
2912         struct adv_info *cur_instance;
2913
2914         cur_instance = hci_find_adv_instance(hdev, instance);
2915         if (!cur_instance)
2916                 return NULL;
2917
2918         if (cur_instance == list_last_entry(&hdev->adv_instances,
2919                                             struct adv_info, list))
2920                 return list_first_entry(&hdev->adv_instances,
2921                                                  struct adv_info, list);
2922         else
2923                 return list_next_entry(cur_instance, list);
2924 }
2925
2926 /* This function requires the caller holds hdev->lock */
2927 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2928 {
2929         struct adv_info *adv_instance;
2930
2931         adv_instance = hci_find_adv_instance(hdev, instance);
2932         if (!adv_instance)
2933                 return -ENOENT;
2934
2935         BT_DBG("%s removing %dMR", hdev->name, instance);
2936
2937         if (hdev->cur_adv_instance == instance) {
2938                 if (hdev->adv_instance_timeout) {
2939                         cancel_delayed_work(&hdev->adv_instance_expire);
2940                         hdev->adv_instance_timeout = 0;
2941                 }
2942                 hdev->cur_adv_instance = 0x00;
2943         }
2944
2945         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2946
2947         list_del(&adv_instance->list);
2948         kfree(adv_instance);
2949
2950         hdev->adv_instance_cnt--;
2951
2952         return 0;
2953 }
2954
2955 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2956 {
2957         struct adv_info *adv_instance, *n;
2958
2959         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2960                 adv_instance->rpa_expired = rpa_expired;
2961 }
2962
2963 /* This function requires the caller holds hdev->lock */
2964 void hci_adv_instances_clear(struct hci_dev *hdev)
2965 {
2966         struct adv_info *adv_instance, *n;
2967
2968         if (hdev->adv_instance_timeout) {
2969                 cancel_delayed_work(&hdev->adv_instance_expire);
2970                 hdev->adv_instance_timeout = 0;
2971         }
2972
2973         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2974                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2975                 list_del(&adv_instance->list);
2976                 kfree(adv_instance);
2977         }
2978
2979         hdev->adv_instance_cnt = 0;
2980         hdev->cur_adv_instance = 0x00;
2981 }
2982
2983 static void adv_instance_rpa_expired(struct work_struct *work)
2984 {
2985         struct adv_info *adv_instance = container_of(work, struct adv_info,
2986                                                      rpa_expired_cb.work);
2987
2988         BT_DBG("");
2989
2990         adv_instance->rpa_expired = true;
2991 }
2992
2993 /* This function requires the caller holds hdev->lock */
2994 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2995                          u16 adv_data_len, u8 *adv_data,
2996                          u16 scan_rsp_len, u8 *scan_rsp_data,
2997                          u16 timeout, u16 duration, s8 tx_power,
2998                          u32 min_interval, u32 max_interval)
2999 {
3000         struct adv_info *adv_instance;
3001
3002         adv_instance = hci_find_adv_instance(hdev, instance);
3003         if (adv_instance) {
3004                 memset(adv_instance->adv_data, 0,
3005                        sizeof(adv_instance->adv_data));
3006                 memset(adv_instance->scan_rsp_data, 0,
3007                        sizeof(adv_instance->scan_rsp_data));
3008         } else {
3009                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
3010                     instance < 1 || instance > hdev->le_num_of_adv_sets)
3011                         return -EOVERFLOW;
3012
3013                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
3014                 if (!adv_instance)
3015                         return -ENOMEM;
3016
3017                 adv_instance->pending = true;
3018                 adv_instance->instance = instance;
3019                 list_add(&adv_instance->list, &hdev->adv_instances);
3020                 hdev->adv_instance_cnt++;
3021         }
3022
3023         adv_instance->flags = flags;
3024         adv_instance->adv_data_len = adv_data_len;
3025         adv_instance->scan_rsp_len = scan_rsp_len;
3026         adv_instance->min_interval = min_interval;
3027         adv_instance->max_interval = max_interval;
3028         adv_instance->tx_power = tx_power;
3029
3030         if (adv_data_len)
3031                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3032
3033         if (scan_rsp_len)
3034                 memcpy(adv_instance->scan_rsp_data,
3035                        scan_rsp_data, scan_rsp_len);
3036
3037         adv_instance->timeout = timeout;
3038         adv_instance->remaining_time = timeout;
3039
3040         if (duration == 0)
3041                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3042         else
3043                 adv_instance->duration = duration;
3044
3045         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3046                           adv_instance_rpa_expired);
3047
3048         BT_DBG("%s for %dMR", hdev->name, instance);
3049
3050         return 0;
3051 }
3052
3053 /* This function requires the caller holds hdev->lock */
3054 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3055                               u16 adv_data_len, u8 *adv_data,
3056                               u16 scan_rsp_len, u8 *scan_rsp_data)
3057 {
3058         struct adv_info *adv_instance;
3059
3060         adv_instance = hci_find_adv_instance(hdev, instance);
3061
3062         /* If advertisement doesn't exist, we can't modify its data */
3063         if (!adv_instance)
3064                 return -ENOENT;
3065
3066         if (adv_data_len) {
3067                 memset(adv_instance->adv_data, 0,
3068                        sizeof(adv_instance->adv_data));
3069                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3070                 adv_instance->adv_data_len = adv_data_len;
3071         }
3072
3073         if (scan_rsp_len) {
3074                 memset(adv_instance->scan_rsp_data, 0,
3075                        sizeof(adv_instance->scan_rsp_data));
3076                 memcpy(adv_instance->scan_rsp_data,
3077                        scan_rsp_data, scan_rsp_len);
3078                 adv_instance->scan_rsp_len = scan_rsp_len;
3079         }
3080
3081         return 0;
3082 }
3083
3084 /* This function requires the caller holds hdev->lock */
3085 void hci_adv_monitors_clear(struct hci_dev *hdev)
3086 {
3087         struct adv_monitor *monitor;
3088         int handle;
3089
3090         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3091                 hci_free_adv_monitor(hdev, monitor);
3092
3093         idr_destroy(&hdev->adv_monitors_idr);
3094 }
3095
3096 /* Frees the monitor structure and do some bookkeepings.
3097  * This function requires the caller holds hdev->lock.
3098  */
3099 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3100 {
3101         struct adv_pattern *pattern;
3102         struct adv_pattern *tmp;
3103
3104         if (!monitor)
3105                 return;
3106
3107         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3108                 list_del(&pattern->list);
3109                 kfree(pattern);
3110         }
3111
3112         if (monitor->handle)
3113                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3114
3115         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3116                 hdev->adv_monitors_cnt--;
3117                 mgmt_adv_monitor_removed(hdev, monitor->handle);
3118         }
3119
3120         kfree(monitor);
3121 }
3122
3123 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3124 {
3125         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3126 }
3127
3128 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3129 {
3130         return mgmt_remove_adv_monitor_complete(hdev, status);
3131 }
3132
3133 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3134  * also attempts to forward the request to the controller.
3135  * Returns true if request is forwarded (result is pending), false otherwise.
3136  * This function requires the caller holds hdev->lock.
3137  */
3138 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3139                          int *err)
3140 {
3141         int min, max, handle;
3142
3143         *err = 0;
3144
3145         if (!monitor) {
3146                 *err = -EINVAL;
3147                 return false;
3148         }
3149
3150         min = HCI_MIN_ADV_MONITOR_HANDLE;
3151         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3152         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3153                            GFP_KERNEL);
3154         if (handle < 0) {
3155                 *err = handle;
3156                 return false;
3157         }
3158
3159         monitor->handle = handle;
3160
3161         if (!hdev_is_powered(hdev))
3162                 return false;
3163
3164         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3165         case HCI_ADV_MONITOR_EXT_NONE:
3166                 hci_update_background_scan(hdev);
3167                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3168                 /* Message was not forwarded to controller - not an error */
3169                 return false;
3170         case HCI_ADV_MONITOR_EXT_MSFT:
3171                 *err = msft_add_monitor_pattern(hdev, monitor);
3172                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3173                            *err);
3174                 break;
3175         }
3176
3177         return (*err == 0);
3178 }
3179
3180 /* Attempts to tell the controller and free the monitor. If somehow the
3181  * controller doesn't have a corresponding handle, remove anyway.
3182  * Returns true if request is forwarded (result is pending), false otherwise.
3183  * This function requires the caller holds hdev->lock.
3184  */
3185 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3186                                    struct adv_monitor *monitor,
3187                                    u16 handle, int *err)
3188 {
3189         *err = 0;
3190
3191         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3192         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3193                 goto free_monitor;
3194         case HCI_ADV_MONITOR_EXT_MSFT:
3195                 *err = msft_remove_monitor(hdev, monitor, handle);
3196                 break;
3197         }
3198
3199         /* In case no matching handle registered, just free the monitor */
3200         if (*err == -ENOENT)
3201                 goto free_monitor;
3202
3203         return (*err == 0);
3204
3205 free_monitor:
3206         if (*err == -ENOENT)
3207                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3208                             monitor->handle);
3209         hci_free_adv_monitor(hdev, monitor);
3210
3211         *err = 0;
3212         return false;
3213 }
3214
3215 /* Returns true if request is forwarded (result is pending), false otherwise.
3216  * This function requires the caller holds hdev->lock.
3217  */
3218 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3219 {
3220         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3221         bool pending;
3222
3223         if (!monitor) {
3224                 *err = -EINVAL;
3225                 return false;
3226         }
3227
3228         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3229         if (!*err && !pending)
3230                 hci_update_background_scan(hdev);
3231
3232         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3233                    hdev->name, handle, *err, pending ? "" : "not ");
3234
3235         return pending;
3236 }
3237
3238 /* Returns true if request is forwarded (result is pending), false otherwise.
3239  * This function requires the caller holds hdev->lock.
3240  */
3241 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3242 {
3243         struct adv_monitor *monitor;
3244         int idr_next_id = 0;
3245         bool pending = false;
3246         bool update = false;
3247
3248         *err = 0;
3249
3250         while (!*err && !pending) {
3251                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3252                 if (!monitor)
3253                         break;
3254
3255                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3256
3257                 if (!*err && !pending)
3258                         update = true;
3259         }
3260
3261         if (update)
3262                 hci_update_background_scan(hdev);
3263
3264         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3265                    hdev->name, *err, pending ? "" : "not ");
3266
3267         return pending;
3268 }
3269
3270 /* This function requires the caller holds hdev->lock */
3271 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3272 {
3273         return !idr_is_empty(&hdev->adv_monitors_idr);
3274 }
3275
3276 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3277 {
3278         if (msft_monitor_supported(hdev))
3279                 return HCI_ADV_MONITOR_EXT_MSFT;
3280
3281         return HCI_ADV_MONITOR_EXT_NONE;
3282 }
3283
3284 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3285                                          bdaddr_t *bdaddr, u8 type)
3286 {
3287         struct bdaddr_list *b;
3288
3289         list_for_each_entry(b, bdaddr_list, list) {
3290                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3291                         return b;
3292         }
3293
3294         return NULL;
3295 }
3296
3297 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3298                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3299                                 u8 type)
3300 {
3301         struct bdaddr_list_with_irk *b;
3302
3303         list_for_each_entry(b, bdaddr_list, list) {
3304                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3305                         return b;
3306         }
3307
3308         return NULL;
3309 }
3310
3311 struct bdaddr_list_with_flags *
3312 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3313                                   bdaddr_t *bdaddr, u8 type)
3314 {
3315         struct bdaddr_list_with_flags *b;
3316
3317         list_for_each_entry(b, bdaddr_list, list) {
3318                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3319                         return b;
3320         }
3321
3322         return NULL;
3323 }
3324
3325 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3326 {
3327         struct bdaddr_list *b, *n;
3328
3329         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3330                 list_del(&b->list);
3331                 kfree(b);
3332         }
3333 }
3334
3335 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3336 {
3337         struct bdaddr_list *entry;
3338
3339         if (!bacmp(bdaddr, BDADDR_ANY))
3340                 return -EBADF;
3341
3342         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3343                 return -EEXIST;
3344
3345         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3346         if (!entry)
3347                 return -ENOMEM;
3348
3349         bacpy(&entry->bdaddr, bdaddr);
3350         entry->bdaddr_type = type;
3351
3352         list_add(&entry->list, list);
3353
3354         return 0;
3355 }
3356
3357 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3358                                         u8 type, u8 *peer_irk, u8 *local_irk)
3359 {
3360         struct bdaddr_list_with_irk *entry;
3361
3362         if (!bacmp(bdaddr, BDADDR_ANY))
3363                 return -EBADF;
3364
3365         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3366                 return -EEXIST;
3367
3368         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3369         if (!entry)
3370                 return -ENOMEM;
3371
3372         bacpy(&entry->bdaddr, bdaddr);
3373         entry->bdaddr_type = type;
3374
3375         if (peer_irk)
3376                 memcpy(entry->peer_irk, peer_irk, 16);
3377
3378         if (local_irk)
3379                 memcpy(entry->local_irk, local_irk, 16);
3380
3381         list_add(&entry->list, list);
3382
3383         return 0;
3384 }
3385
3386 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3387                                    u8 type, u32 flags)
3388 {
3389         struct bdaddr_list_with_flags *entry;
3390
3391         if (!bacmp(bdaddr, BDADDR_ANY))
3392                 return -EBADF;
3393
3394         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3395                 return -EEXIST;
3396
3397         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3398         if (!entry)
3399                 return -ENOMEM;
3400
3401         bacpy(&entry->bdaddr, bdaddr);
3402         entry->bdaddr_type = type;
3403         entry->current_flags = flags;
3404
3405         list_add(&entry->list, list);
3406
3407         return 0;
3408 }
3409
3410 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3411 {
3412         struct bdaddr_list *entry;
3413
3414         if (!bacmp(bdaddr, BDADDR_ANY)) {
3415                 hci_bdaddr_list_clear(list);
3416                 return 0;
3417         }
3418
3419         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3420         if (!entry)
3421                 return -ENOENT;
3422
3423         list_del(&entry->list);
3424         kfree(entry);
3425
3426         return 0;
3427 }
3428
3429 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3430                                                         u8 type)
3431 {
3432         struct bdaddr_list_with_irk *entry;
3433
3434         if (!bacmp(bdaddr, BDADDR_ANY)) {
3435                 hci_bdaddr_list_clear(list);
3436                 return 0;
3437         }
3438
3439         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3440         if (!entry)
3441                 return -ENOENT;
3442
3443         list_del(&entry->list);
3444         kfree(entry);
3445
3446         return 0;
3447 }
3448
3449 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3450                                    u8 type)
3451 {
3452         struct bdaddr_list_with_flags *entry;
3453
3454         if (!bacmp(bdaddr, BDADDR_ANY)) {
3455                 hci_bdaddr_list_clear(list);
3456                 return 0;
3457         }
3458
3459         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3460         if (!entry)
3461                 return -ENOENT;
3462
3463         list_del(&entry->list);
3464         kfree(entry);
3465
3466         return 0;
3467 }
3468
3469 /* This function requires the caller holds hdev->lock */
3470 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3471                                                bdaddr_t *addr, u8 addr_type)
3472 {
3473         struct hci_conn_params *params;
3474
3475         list_for_each_entry(params, &hdev->le_conn_params, list) {
3476                 if (bacmp(&params->addr, addr) == 0 &&
3477                     params->addr_type == addr_type) {
3478                         return params;
3479                 }
3480         }
3481
3482         return NULL;
3483 }
3484
3485 /* This function requires the caller holds hdev->lock */
3486 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3487                                                   bdaddr_t *addr, u8 addr_type)
3488 {
3489         struct hci_conn_params *param;
3490
3491         switch (addr_type) {
3492         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3493                 addr_type = ADDR_LE_DEV_PUBLIC;
3494                 break;
3495         case ADDR_LE_DEV_RANDOM_RESOLVED:
3496                 addr_type = ADDR_LE_DEV_RANDOM;
3497                 break;
3498         }
3499
3500         list_for_each_entry(param, list, action) {
3501                 if (bacmp(&param->addr, addr) == 0 &&
3502                     param->addr_type == addr_type)
3503                         return param;
3504         }
3505
3506         return NULL;
3507 }
3508
3509 /* This function requires the caller holds hdev->lock */
3510 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3511                                             bdaddr_t *addr, u8 addr_type)
3512 {
3513         struct hci_conn_params *params;
3514
3515         params = hci_conn_params_lookup(hdev, addr, addr_type);
3516         if (params)
3517                 return params;
3518
3519         params = kzalloc(sizeof(*params), GFP_KERNEL);
3520         if (!params) {
3521                 bt_dev_err(hdev, "out of memory");
3522                 return NULL;
3523         }
3524
3525         bacpy(&params->addr, addr);
3526         params->addr_type = addr_type;
3527
3528         list_add(&params->list, &hdev->le_conn_params);
3529         INIT_LIST_HEAD(&params->action);
3530
3531         params->conn_min_interval = hdev->le_conn_min_interval;
3532         params->conn_max_interval = hdev->le_conn_max_interval;
3533         params->conn_latency = hdev->le_conn_latency;
3534         params->supervision_timeout = hdev->le_supv_timeout;
3535         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3536
3537         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3538
3539         return params;
3540 }
3541
3542 static void hci_conn_params_free(struct hci_conn_params *params)
3543 {
3544         if (params->conn) {
3545                 hci_conn_drop(params->conn);
3546                 hci_conn_put(params->conn);
3547         }
3548
3549         list_del(&params->action);
3550         list_del(&params->list);
3551         kfree(params);
3552 }
3553
3554 /* This function requires the caller holds hdev->lock */
3555 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3556 {
3557         struct hci_conn_params *params;
3558
3559         params = hci_conn_params_lookup(hdev, addr, addr_type);
3560         if (!params)
3561                 return;
3562
3563         hci_conn_params_free(params);
3564
3565         hci_update_background_scan(hdev);
3566
3567         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3568 }
3569
3570 /* This function requires the caller holds hdev->lock */
3571 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3572 {
3573         struct hci_conn_params *params, *tmp;
3574
3575         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3576                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3577                         continue;
3578
3579                 /* If trying to establish one time connection to disabled
3580                  * device, leave the params, but mark them as just once.
3581                  */
3582                 if (params->explicit_connect) {
3583                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3584                         continue;
3585                 }
3586
3587                 list_del(&params->list);
3588                 kfree(params);
3589         }
3590
3591         BT_DBG("All LE disabled connection parameters were removed");
3592 }
3593
3594 /* This function requires the caller holds hdev->lock */
3595 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3596 {
3597         struct hci_conn_params *params, *tmp;
3598
3599         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3600                 hci_conn_params_free(params);
3601
3602         BT_DBG("All LE connection parameters were removed");
3603 }
3604
3605 /* Copy the Identity Address of the controller.
3606  *
3607  * If the controller has a public BD_ADDR, then by default use that one.
3608  * If this is a LE only controller without a public address, default to
3609  * the static random address.
3610  *
3611  * For debugging purposes it is possible to force controllers with a
3612  * public address to use the static random address instead.
3613  *
3614  * In case BR/EDR has been disabled on a dual-mode controller and
3615  * userspace has configured a static address, then that address
3616  * becomes the identity address instead of the public BR/EDR address.
3617  */
3618 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3619                                u8 *bdaddr_type)
3620 {
3621         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3622             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3623             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3624              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3625                 bacpy(bdaddr, &hdev->static_addr);
3626                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3627         } else {
3628                 bacpy(bdaddr, &hdev->bdaddr);
3629                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3630         }
3631 }
3632
3633 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3634 {
3635         int i;
3636
3637         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3638                 clear_bit(i, hdev->suspend_tasks);
3639
3640         wake_up(&hdev->suspend_wait_q);
3641 }
3642
3643 static int hci_suspend_wait_event(struct hci_dev *hdev)
3644 {
3645 #define WAKE_COND                                                              \
3646         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3647          __SUSPEND_NUM_TASKS)
3648
3649         int i;
3650         int ret = wait_event_timeout(hdev->suspend_wait_q,
3651                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3652
3653         if (ret == 0) {
3654                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3655                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3656                         if (test_bit(i, hdev->suspend_tasks))
3657                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3658                         clear_bit(i, hdev->suspend_tasks);
3659                 }
3660
3661                 ret = -ETIMEDOUT;
3662         } else {
3663                 ret = 0;
3664         }
3665
3666         return ret;
3667 }
3668
3669 static void hci_prepare_suspend(struct work_struct *work)
3670 {
3671         struct hci_dev *hdev =
3672                 container_of(work, struct hci_dev, suspend_prepare);
3673
3674         hci_dev_lock(hdev);
3675         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3676         hci_dev_unlock(hdev);
3677 }
3678
3679 static int hci_change_suspend_state(struct hci_dev *hdev,
3680                                     enum suspended_state next)
3681 {
3682         hdev->suspend_state_next = next;
3683         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3684         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3685         return hci_suspend_wait_event(hdev);
3686 }
3687
3688 static void hci_clear_wake_reason(struct hci_dev *hdev)
3689 {
3690         hci_dev_lock(hdev);
3691
3692         hdev->wake_reason = 0;
3693         bacpy(&hdev->wake_addr, BDADDR_ANY);
3694         hdev->wake_addr_type = 0;
3695
3696         hci_dev_unlock(hdev);
3697 }
3698
3699 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3700                                 void *data)
3701 {
3702         struct hci_dev *hdev =
3703                 container_of(nb, struct hci_dev, suspend_notifier);
3704         int ret = 0;
3705         u8 state = BT_RUNNING;
3706
3707         /* If powering down, wait for completion. */
3708         if (mgmt_powering_down(hdev)) {
3709                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3710                 ret = hci_suspend_wait_event(hdev);
3711                 if (ret)
3712                         goto done;
3713         }
3714
3715         /* Suspend notifier should only act on events when powered. */
3716         if (!hdev_is_powered(hdev) ||
3717             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3718                 goto done;
3719
3720         if (action == PM_SUSPEND_PREPARE) {
3721                 /* Suspend consists of two actions:
3722                  *  - First, disconnect everything and make the controller not
3723                  *    connectable (disabling scanning)
3724                  *  - Second, program event filter/accept list and enable scan
3725                  */
3726                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3727                 if (!ret)
3728                         state = BT_SUSPEND_DISCONNECT;
3729
3730                 /* Only configure accept list if disconnect succeeded and wake
3731                  * isn't being prevented.
3732                  */
3733                 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3734                         ret = hci_change_suspend_state(hdev,
3735                                                 BT_SUSPEND_CONFIGURE_WAKE);
3736                         if (!ret)
3737                                 state = BT_SUSPEND_CONFIGURE_WAKE;
3738                 }
3739
3740                 hci_clear_wake_reason(hdev);
3741                 mgmt_suspending(hdev, state);
3742
3743         } else if (action == PM_POST_SUSPEND) {
3744                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3745
3746                 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3747                               hdev->wake_addr_type);
3748         }
3749
3750 done:
3751         /* We always allow suspend even if suspend preparation failed and
3752          * attempt to recover in resume.
3753          */
3754         if (ret)
3755                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3756                            action, ret);
3757
3758         return NOTIFY_DONE;
3759 }
3760
3761 /* Alloc HCI device */
3762 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
3763 {
3764         struct hci_dev *hdev;
3765         unsigned int alloc_size;
3766
3767         alloc_size = sizeof(*hdev);
3768         if (sizeof_priv) {
3769                 /* Fixme: May need ALIGN-ment? */
3770                 alloc_size += sizeof_priv;
3771         }
3772
3773         hdev = kzalloc(alloc_size, GFP_KERNEL);
3774         if (!hdev)
3775                 return NULL;
3776
3777         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3778         hdev->esco_type = (ESCO_HV1);
3779         hdev->link_mode = (HCI_LM_ACCEPT);
3780         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3781         hdev->io_capability = 0x03;     /* No Input No Output */
3782         hdev->manufacturer = 0xffff;    /* Default to internal use */
3783         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3784         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3785         hdev->adv_instance_cnt = 0;
3786         hdev->cur_adv_instance = 0x00;
3787         hdev->adv_instance_timeout = 0;
3788
3789         hdev->advmon_allowlist_duration = 300;
3790         hdev->advmon_no_filter_duration = 500;
3791         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
3792
3793         hdev->sniff_max_interval = 800;
3794         hdev->sniff_min_interval = 80;
3795
3796         hdev->le_adv_channel_map = 0x07;
3797         hdev->le_adv_min_interval = 0x0800;
3798         hdev->le_adv_max_interval = 0x0800;
3799         hdev->le_scan_interval = 0x0060;
3800         hdev->le_scan_window = 0x0030;
3801         hdev->le_scan_int_suspend = 0x0400;
3802         hdev->le_scan_window_suspend = 0x0012;
3803         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3804         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3805         hdev->le_scan_int_adv_monitor = 0x0060;
3806         hdev->le_scan_window_adv_monitor = 0x0030;
3807         hdev->le_scan_int_connect = 0x0060;
3808         hdev->le_scan_window_connect = 0x0060;
3809         hdev->le_conn_min_interval = 0x0018;
3810         hdev->le_conn_max_interval = 0x0028;
3811         hdev->le_conn_latency = 0x0000;
3812         hdev->le_supv_timeout = 0x002a;
3813         hdev->le_def_tx_len = 0x001b;
3814         hdev->le_def_tx_time = 0x0148;
3815         hdev->le_max_tx_len = 0x001b;
3816         hdev->le_max_tx_time = 0x0148;
3817         hdev->le_max_rx_len = 0x001b;
3818         hdev->le_max_rx_time = 0x0148;
3819         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3820         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3821         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3822         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3823         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3824         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3825         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3826         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3827         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3828
3829         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3830         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3831         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3832         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3833         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3834         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3835
3836         /* default 1.28 sec page scan */
3837         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3838         hdev->def_page_scan_int = 0x0800;
3839         hdev->def_page_scan_window = 0x0012;
3840
3841         mutex_init(&hdev->lock);
3842         mutex_init(&hdev->req_lock);
3843
3844         INIT_LIST_HEAD(&hdev->mgmt_pending);
3845         INIT_LIST_HEAD(&hdev->reject_list);
3846         INIT_LIST_HEAD(&hdev->accept_list);
3847         INIT_LIST_HEAD(&hdev->uuids);
3848         INIT_LIST_HEAD(&hdev->link_keys);
3849         INIT_LIST_HEAD(&hdev->long_term_keys);
3850         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3851         INIT_LIST_HEAD(&hdev->remote_oob_data);
3852         INIT_LIST_HEAD(&hdev->le_accept_list);
3853         INIT_LIST_HEAD(&hdev->le_resolv_list);
3854         INIT_LIST_HEAD(&hdev->le_conn_params);
3855         INIT_LIST_HEAD(&hdev->pend_le_conns);
3856         INIT_LIST_HEAD(&hdev->pend_le_reports);
3857         INIT_LIST_HEAD(&hdev->conn_hash.list);
3858         INIT_LIST_HEAD(&hdev->adv_instances);
3859         INIT_LIST_HEAD(&hdev->blocked_keys);
3860
3861         INIT_WORK(&hdev->rx_work, hci_rx_work);
3862         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3863         INIT_WORK(&hdev->tx_work, hci_tx_work);
3864         INIT_WORK(&hdev->power_on, hci_power_on);
3865         INIT_WORK(&hdev->error_reset, hci_error_reset);
3866         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3867
3868         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3869
3870         skb_queue_head_init(&hdev->rx_q);
3871         skb_queue_head_init(&hdev->cmd_q);
3872         skb_queue_head_init(&hdev->raw_q);
3873
3874         init_waitqueue_head(&hdev->req_wait_q);
3875         init_waitqueue_head(&hdev->suspend_wait_q);
3876
3877         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3878         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
3879
3880         hci_request_setup(hdev);
3881
3882         hci_init_sysfs(hdev);
3883         discovery_init(hdev);
3884
3885         return hdev;
3886 }
3887 EXPORT_SYMBOL(hci_alloc_dev_priv);
3888
3889 /* Free HCI device */
3890 void hci_free_dev(struct hci_dev *hdev)
3891 {
3892         /* will free via device release */
3893         put_device(&hdev->dev);
3894 }
3895 EXPORT_SYMBOL(hci_free_dev);
3896
3897 /* Register HCI device */
3898 int hci_register_dev(struct hci_dev *hdev)
3899 {
3900         int id, error;
3901
3902         if (!hdev->open || !hdev->close || !hdev->send)
3903                 return -EINVAL;
3904
3905         /* Do not allow HCI_AMP devices to register at index 0,
3906          * so the index can be used as the AMP controller ID.
3907          */
3908         switch (hdev->dev_type) {
3909         case HCI_PRIMARY:
3910                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3911                 break;
3912         case HCI_AMP:
3913                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3914                 break;
3915         default:
3916                 return -EINVAL;
3917         }
3918
3919         if (id < 0)
3920                 return id;
3921
3922         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
3923         hdev->id = id;
3924
3925         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3926
3927         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3928         if (!hdev->workqueue) {
3929                 error = -ENOMEM;
3930                 goto err;
3931         }
3932
3933         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3934                                                       hdev->name);
3935         if (!hdev->req_workqueue) {
3936                 destroy_workqueue(hdev->workqueue);
3937                 error = -ENOMEM;
3938                 goto err;
3939         }
3940
3941         if (!IS_ERR_OR_NULL(bt_debugfs))
3942                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3943
3944         dev_set_name(&hdev->dev, "%s", hdev->name);
3945
3946         error = device_add(&hdev->dev);
3947         if (error < 0)
3948                 goto err_wqueue;
3949
3950         hci_leds_init(hdev);
3951
3952         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3953                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3954                                     hdev);
3955         if (hdev->rfkill) {
3956                 if (rfkill_register(hdev->rfkill) < 0) {
3957                         rfkill_destroy(hdev->rfkill);
3958                         hdev->rfkill = NULL;
3959                 }
3960         }
3961
3962         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3963                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3964
3965         hci_dev_set_flag(hdev, HCI_SETUP);
3966         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3967
3968         if (hdev->dev_type == HCI_PRIMARY) {
3969                 /* Assume BR/EDR support until proven otherwise (such as
3970                  * through reading supported features during init.
3971                  */
3972                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3973         }
3974
3975         write_lock(&hci_dev_list_lock);
3976         list_add(&hdev->list, &hci_dev_list);
3977         write_unlock(&hci_dev_list_lock);
3978
3979         /* Devices that are marked for raw-only usage are unconfigured
3980          * and should not be included in normal operation.
3981          */
3982         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3983                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3984
3985         hci_sock_dev_event(hdev, HCI_DEV_REG);
3986         hci_dev_hold(hdev);
3987
3988         if (!hdev->suspend_notifier.notifier_call &&
3989             !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3990                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3991                 error = register_pm_notifier(&hdev->suspend_notifier);
3992                 if (error)
3993                         goto err_wqueue;
3994         }
3995
3996         queue_work(hdev->req_workqueue, &hdev->power_on);
3997
3998         idr_init(&hdev->adv_monitors_idr);
3999
4000         return id;
4001
4002 err_wqueue:
4003         debugfs_remove_recursive(hdev->debugfs);
4004         destroy_workqueue(hdev->workqueue);
4005         destroy_workqueue(hdev->req_workqueue);
4006 err:
4007         ida_simple_remove(&hci_index_ida, hdev->id);
4008
4009         return error;
4010 }
4011 EXPORT_SYMBOL(hci_register_dev);
4012
4013 /* Unregister HCI device */
4014 void hci_unregister_dev(struct hci_dev *hdev)
4015 {
4016         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4017
4018         hci_dev_set_flag(hdev, HCI_UNREGISTER);
4019
4020         write_lock(&hci_dev_list_lock);
4021         list_del(&hdev->list);
4022         write_unlock(&hci_dev_list_lock);
4023
4024         cancel_work_sync(&hdev->power_on);
4025
4026         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
4027                 hci_suspend_clear_tasks(hdev);
4028                 unregister_pm_notifier(&hdev->suspend_notifier);
4029                 cancel_work_sync(&hdev->suspend_prepare);
4030         }
4031
4032         hci_dev_do_close(hdev);
4033
4034         if (!test_bit(HCI_INIT, &hdev->flags) &&
4035             !hci_dev_test_flag(hdev, HCI_SETUP) &&
4036             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4037                 hci_dev_lock(hdev);
4038                 mgmt_index_removed(hdev);
4039                 hci_dev_unlock(hdev);
4040         }
4041
4042         /* mgmt_index_removed should take care of emptying the
4043          * pending list */
4044         BUG_ON(!list_empty(&hdev->mgmt_pending));
4045
4046         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4047
4048         if (hdev->rfkill) {
4049                 rfkill_unregister(hdev->rfkill);
4050                 rfkill_destroy(hdev->rfkill);
4051         }
4052
4053         device_del(&hdev->dev);
4054         /* Actual cleanup is deferred until hci_release_dev(). */
4055         hci_dev_put(hdev);
4056 }
4057 EXPORT_SYMBOL(hci_unregister_dev);
4058
4059 /* Release HCI device */
4060 void hci_release_dev(struct hci_dev *hdev)
4061 {
4062         debugfs_remove_recursive(hdev->debugfs);
4063         kfree_const(hdev->hw_info);
4064         kfree_const(hdev->fw_info);
4065
4066         destroy_workqueue(hdev->workqueue);
4067         destroy_workqueue(hdev->req_workqueue);
4068
4069         hci_dev_lock(hdev);
4070         hci_bdaddr_list_clear(&hdev->reject_list);
4071         hci_bdaddr_list_clear(&hdev->accept_list);
4072         hci_uuids_clear(hdev);
4073         hci_link_keys_clear(hdev);
4074         hci_smp_ltks_clear(hdev);
4075         hci_smp_irks_clear(hdev);
4076         hci_remote_oob_data_clear(hdev);
4077         hci_adv_instances_clear(hdev);
4078         hci_adv_monitors_clear(hdev);
4079         hci_bdaddr_list_clear(&hdev->le_accept_list);
4080         hci_bdaddr_list_clear(&hdev->le_resolv_list);
4081         hci_conn_params_clear_all(hdev);
4082         hci_discovery_filter_clear(hdev);
4083         hci_blocked_keys_clear(hdev);
4084         hci_dev_unlock(hdev);
4085
4086         ida_simple_remove(&hci_index_ida, hdev->id);
4087         kfree_skb(hdev->sent_cmd);
4088         kfree(hdev);
4089 }
4090 EXPORT_SYMBOL(hci_release_dev);
4091
4092 /* Suspend HCI device */
4093 int hci_suspend_dev(struct hci_dev *hdev)
4094 {
4095         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4096         return 0;
4097 }
4098 EXPORT_SYMBOL(hci_suspend_dev);
4099
4100 /* Resume HCI device */
4101 int hci_resume_dev(struct hci_dev *hdev)
4102 {
4103         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4104         return 0;
4105 }
4106 EXPORT_SYMBOL(hci_resume_dev);
4107
4108 /* Reset HCI device */
4109 int hci_reset_dev(struct hci_dev *hdev)
4110 {
4111         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4112         struct sk_buff *skb;
4113
4114         skb = bt_skb_alloc(3, GFP_ATOMIC);
4115         if (!skb)
4116                 return -ENOMEM;
4117
4118         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4119         skb_put_data(skb, hw_err, 3);
4120
4121         bt_dev_err(hdev, "Injecting HCI hardware error event");
4122
4123         /* Send Hardware Error to upper stack */
4124         return hci_recv_frame(hdev, skb);
4125 }
4126 EXPORT_SYMBOL(hci_reset_dev);
4127
4128 /* Receive frame from HCI drivers */
4129 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4130 {
4131         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4132                       && !test_bit(HCI_INIT, &hdev->flags))) {
4133                 kfree_skb(skb);
4134                 return -ENXIO;
4135         }
4136
4137         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4138             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4139             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4140             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4141                 kfree_skb(skb);
4142                 return -EINVAL;
4143         }
4144
4145         /* Incoming skb */
4146         bt_cb(skb)->incoming = 1;
4147
4148         /* Time stamp */
4149         __net_timestamp(skb);
4150
4151         skb_queue_tail(&hdev->rx_q, skb);
4152         queue_work(hdev->workqueue, &hdev->rx_work);
4153
4154         return 0;
4155 }
4156 EXPORT_SYMBOL(hci_recv_frame);
4157
4158 /* Receive diagnostic message from HCI drivers */
4159 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4160 {
4161         /* Mark as diagnostic packet */
4162         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4163
4164         /* Time stamp */
4165         __net_timestamp(skb);
4166
4167         skb_queue_tail(&hdev->rx_q, skb);
4168         queue_work(hdev->workqueue, &hdev->rx_work);
4169
4170         return 0;
4171 }
4172 EXPORT_SYMBOL(hci_recv_diag);
4173
4174 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4175 {
4176         va_list vargs;
4177
4178         va_start(vargs, fmt);
4179         kfree_const(hdev->hw_info);
4180         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4181         va_end(vargs);
4182 }
4183 EXPORT_SYMBOL(hci_set_hw_info);
4184
4185 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4186 {
4187         va_list vargs;
4188
4189         va_start(vargs, fmt);
4190         kfree_const(hdev->fw_info);
4191         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4192         va_end(vargs);
4193 }
4194 EXPORT_SYMBOL(hci_set_fw_info);
4195
4196 /* ---- Interface to upper protocols ---- */
4197
4198 int hci_register_cb(struct hci_cb *cb)
4199 {
4200         BT_DBG("%p name %s", cb, cb->name);
4201
4202         mutex_lock(&hci_cb_list_lock);
4203         list_add_tail(&cb->list, &hci_cb_list);
4204         mutex_unlock(&hci_cb_list_lock);
4205
4206         return 0;
4207 }
4208 EXPORT_SYMBOL(hci_register_cb);
4209
4210 int hci_unregister_cb(struct hci_cb *cb)
4211 {
4212         BT_DBG("%p name %s", cb, cb->name);
4213
4214         mutex_lock(&hci_cb_list_lock);
4215         list_del(&cb->list);
4216         mutex_unlock(&hci_cb_list_lock);
4217
4218         return 0;
4219 }
4220 EXPORT_SYMBOL(hci_unregister_cb);
4221
4222 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4223 {
4224         int err;
4225
4226         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4227                skb->len);
4228
4229         /* Time stamp */
4230         __net_timestamp(skb);
4231
4232         /* Send copy to monitor */
4233         hci_send_to_monitor(hdev, skb);
4234
4235         if (atomic_read(&hdev->promisc)) {
4236                 /* Send copy to the sockets */
4237                 hci_send_to_sock(hdev, skb);
4238         }
4239
4240         /* Get rid of skb owner, prior to sending to the driver. */
4241         skb_orphan(skb);
4242
4243         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4244                 kfree_skb(skb);
4245                 return;
4246         }
4247
4248         err = hdev->send(hdev, skb);
4249         if (err < 0) {
4250                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4251                 kfree_skb(skb);
4252         }
4253 }
4254
4255 /* Send HCI command */
4256 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4257                  const void *param)
4258 {
4259         struct sk_buff *skb;
4260
4261         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4262
4263         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4264         if (!skb) {
4265                 bt_dev_err(hdev, "no memory for command");
4266                 return -ENOMEM;
4267         }
4268
4269         /* Stand-alone HCI commands must be flagged as
4270          * single-command requests.
4271          */
4272         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4273
4274         skb_queue_tail(&hdev->cmd_q, skb);
4275         queue_work(hdev->workqueue, &hdev->cmd_work);
4276
4277         return 0;
4278 }
4279
4280 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4281                    const void *param)
4282 {
4283         struct sk_buff *skb;
4284
4285         if (hci_opcode_ogf(opcode) != 0x3f) {
4286                 /* A controller receiving a command shall respond with either
4287                  * a Command Status Event or a Command Complete Event.
4288                  * Therefore, all standard HCI commands must be sent via the
4289                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4290                  * Some vendors do not comply with this rule for vendor-specific
4291                  * commands and do not return any event. We want to support
4292                  * unresponded commands for such cases only.
4293                  */
4294                 bt_dev_err(hdev, "unresponded command not supported");
4295                 return -EINVAL;
4296         }
4297
4298         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4299         if (!skb) {
4300                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4301                            opcode);
4302                 return -ENOMEM;
4303         }
4304
4305         hci_send_frame(hdev, skb);
4306
4307         return 0;
4308 }
4309 EXPORT_SYMBOL(__hci_cmd_send);
4310
4311 /* Get data from the previously sent command */
4312 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4313 {
4314         struct hci_command_hdr *hdr;
4315
4316         if (!hdev->sent_cmd)
4317                 return NULL;
4318
4319         hdr = (void *) hdev->sent_cmd->data;
4320
4321         if (hdr->opcode != cpu_to_le16(opcode))
4322                 return NULL;
4323
4324         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4325
4326         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4327 }
4328
4329 /* Send HCI command and wait for command complete event */
4330 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4331                              const void *param, u32 timeout)
4332 {
4333         struct sk_buff *skb;
4334
4335         if (!test_bit(HCI_UP, &hdev->flags))
4336                 return ERR_PTR(-ENETDOWN);
4337
4338         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4339
4340         hci_req_sync_lock(hdev);
4341         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4342         hci_req_sync_unlock(hdev);
4343
4344         return skb;
4345 }
4346 EXPORT_SYMBOL(hci_cmd_sync);
4347
4348 /* Send ACL data */
4349 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4350 {
4351         struct hci_acl_hdr *hdr;
4352         int len = skb->len;
4353
4354         skb_push(skb, HCI_ACL_HDR_SIZE);
4355         skb_reset_transport_header(skb);
4356         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4357         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4358         hdr->dlen   = cpu_to_le16(len);
4359 }
4360
4361 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4362                           struct sk_buff *skb, __u16 flags)
4363 {
4364         struct hci_conn *conn = chan->conn;
4365         struct hci_dev *hdev = conn->hdev;
4366         struct sk_buff *list;
4367
4368         skb->len = skb_headlen(skb);
4369         skb->data_len = 0;
4370
4371         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4372
4373         switch (hdev->dev_type) {
4374         case HCI_PRIMARY:
4375                 hci_add_acl_hdr(skb, conn->handle, flags);
4376                 break;
4377         case HCI_AMP:
4378                 hci_add_acl_hdr(skb, chan->handle, flags);
4379                 break;
4380         default:
4381                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4382                 return;
4383         }
4384
4385         list = skb_shinfo(skb)->frag_list;
4386         if (!list) {
4387                 /* Non fragmented */
4388                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4389
4390                 skb_queue_tail(queue, skb);
4391         } else {
4392                 /* Fragmented */
4393                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4394
4395                 skb_shinfo(skb)->frag_list = NULL;
4396
4397                 /* Queue all fragments atomically. We need to use spin_lock_bh
4398                  * here because of 6LoWPAN links, as there this function is
4399                  * called from softirq and using normal spin lock could cause
4400                  * deadlocks.
4401                  */
4402                 spin_lock_bh(&queue->lock);
4403
4404                 __skb_queue_tail(queue, skb);
4405
4406                 flags &= ~ACL_START;
4407                 flags |= ACL_CONT;
4408                 do {
4409                         skb = list; list = list->next;
4410
4411                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4412                         hci_add_acl_hdr(skb, conn->handle, flags);
4413
4414                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4415
4416                         __skb_queue_tail(queue, skb);
4417                 } while (list);
4418
4419                 spin_unlock_bh(&queue->lock);
4420         }
4421 }
4422
4423 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4424 {
4425         struct hci_dev *hdev = chan->conn->hdev;
4426
4427         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4428
4429         hci_queue_acl(chan, &chan->data_q, skb, flags);
4430
4431         queue_work(hdev->workqueue, &hdev->tx_work);
4432 }
4433
4434 /* Send SCO data */
4435 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4436 {
4437         struct hci_dev *hdev = conn->hdev;
4438         struct hci_sco_hdr hdr;
4439
4440         BT_DBG("%s len %d", hdev->name, skb->len);
4441
4442         hdr.handle = cpu_to_le16(conn->handle);
4443         hdr.dlen   = skb->len;
4444
4445         skb_push(skb, HCI_SCO_HDR_SIZE);
4446         skb_reset_transport_header(skb);
4447         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4448
4449         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4450
4451         skb_queue_tail(&conn->data_q, skb);
4452         queue_work(hdev->workqueue, &hdev->tx_work);
4453 }
4454
4455 /* ---- HCI TX task (outgoing data) ---- */
4456
4457 /* HCI Connection scheduler */
4458 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4459                                      int *quote)
4460 {
4461         struct hci_conn_hash *h = &hdev->conn_hash;
4462         struct hci_conn *conn = NULL, *c;
4463         unsigned int num = 0, min = ~0;
4464
4465         /* We don't have to lock device here. Connections are always
4466          * added and removed with TX task disabled. */
4467
4468         rcu_read_lock();
4469
4470         list_for_each_entry_rcu(c, &h->list, list) {
4471                 if (c->type != type || skb_queue_empty(&c->data_q))
4472                         continue;
4473
4474                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4475                         continue;
4476
4477                 num++;
4478
4479                 if (c->sent < min) {
4480                         min  = c->sent;
4481                         conn = c;
4482                 }
4483
4484                 if (hci_conn_num(hdev, type) == num)
4485                         break;
4486         }
4487
4488         rcu_read_unlock();
4489
4490         if (conn) {
4491                 int cnt, q;
4492
4493                 switch (conn->type) {
4494                 case ACL_LINK:
4495                         cnt = hdev->acl_cnt;
4496                         break;
4497                 case SCO_LINK:
4498                 case ESCO_LINK:
4499                         cnt = hdev->sco_cnt;
4500                         break;
4501                 case LE_LINK:
4502                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4503                         break;
4504                 default:
4505                         cnt = 0;
4506                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4507                 }
4508
4509                 q = cnt / num;
4510                 *quote = q ? q : 1;
4511         } else
4512                 *quote = 0;
4513
4514         BT_DBG("conn %p quote %d", conn, *quote);
4515         return conn;
4516 }
4517
4518 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4519 {
4520         struct hci_conn_hash *h = &hdev->conn_hash;
4521         struct hci_conn *c;
4522
4523         bt_dev_err(hdev, "link tx timeout");
4524
4525         rcu_read_lock();
4526
4527         /* Kill stalled connections */
4528         list_for_each_entry_rcu(c, &h->list, list) {
4529                 if (c->type == type && c->sent) {
4530                         bt_dev_err(hdev, "killing stalled connection %pMR",
4531                                    &c->dst);
4532                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4533                 }
4534         }
4535
4536         rcu_read_unlock();
4537 }
4538
4539 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4540                                       int *quote)
4541 {
4542         struct hci_conn_hash *h = &hdev->conn_hash;
4543         struct hci_chan *chan = NULL;
4544         unsigned int num = 0, min = ~0, cur_prio = 0;
4545         struct hci_conn *conn;
4546         int cnt, q, conn_num = 0;
4547
4548         BT_DBG("%s", hdev->name);
4549
4550         rcu_read_lock();
4551
4552         list_for_each_entry_rcu(conn, &h->list, list) {
4553                 struct hci_chan *tmp;
4554
4555                 if (conn->type != type)
4556                         continue;
4557
4558                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4559                         continue;
4560
4561                 conn_num++;
4562
4563                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4564                         struct sk_buff *skb;
4565
4566                         if (skb_queue_empty(&tmp->data_q))
4567                                 continue;
4568
4569                         skb = skb_peek(&tmp->data_q);
4570                         if (skb->priority < cur_prio)
4571                                 continue;
4572
4573                         if (skb->priority > cur_prio) {
4574                                 num = 0;
4575                                 min = ~0;
4576                                 cur_prio = skb->priority;
4577                         }
4578
4579                         num++;
4580
4581                         if (conn->sent < min) {
4582                                 min  = conn->sent;
4583                                 chan = tmp;
4584                         }
4585                 }
4586
4587                 if (hci_conn_num(hdev, type) == conn_num)
4588                         break;
4589         }
4590
4591         rcu_read_unlock();
4592
4593         if (!chan)
4594                 return NULL;
4595
4596         switch (chan->conn->type) {
4597         case ACL_LINK:
4598                 cnt = hdev->acl_cnt;
4599                 break;
4600         case AMP_LINK:
4601                 cnt = hdev->block_cnt;
4602                 break;
4603         case SCO_LINK:
4604         case ESCO_LINK:
4605                 cnt = hdev->sco_cnt;
4606                 break;
4607         case LE_LINK:
4608                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4609                 break;
4610         default:
4611                 cnt = 0;
4612                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4613         }
4614
4615         q = cnt / num;
4616         *quote = q ? q : 1;
4617         BT_DBG("chan %p quote %d", chan, *quote);
4618         return chan;
4619 }
4620
4621 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4622 {
4623         struct hci_conn_hash *h = &hdev->conn_hash;
4624         struct hci_conn *conn;
4625         int num = 0;
4626
4627         BT_DBG("%s", hdev->name);
4628
4629         rcu_read_lock();
4630
4631         list_for_each_entry_rcu(conn, &h->list, list) {
4632                 struct hci_chan *chan;
4633
4634                 if (conn->type != type)
4635                         continue;
4636
4637                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4638                         continue;
4639
4640                 num++;
4641
4642                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4643                         struct sk_buff *skb;
4644
4645                         if (chan->sent) {
4646                                 chan->sent = 0;
4647                                 continue;
4648                         }
4649
4650                         if (skb_queue_empty(&chan->data_q))
4651                                 continue;
4652
4653                         skb = skb_peek(&chan->data_q);
4654                         if (skb->priority >= HCI_PRIO_MAX - 1)
4655                                 continue;
4656
4657                         skb->priority = HCI_PRIO_MAX - 1;
4658
4659                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4660                                skb->priority);
4661                 }
4662
4663                 if (hci_conn_num(hdev, type) == num)
4664                         break;
4665         }
4666
4667         rcu_read_unlock();
4668
4669 }
4670
4671 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4672 {
4673         /* Calculate count of blocks used by this packet */
4674         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4675 }
4676
4677 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
4678 {
4679         unsigned long last_tx;
4680
4681         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4682                 return;
4683
4684         switch (type) {
4685         case LE_LINK:
4686                 last_tx = hdev->le_last_tx;
4687                 break;
4688         default:
4689                 last_tx = hdev->acl_last_tx;
4690                 break;
4691         }
4692
4693         /* tx timeout must be longer than maximum link supervision timeout
4694          * (40.9 seconds)
4695          */
4696         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
4697                 hci_link_tx_to(hdev, type);
4698 }
4699
4700 /* Schedule SCO */
4701 static void hci_sched_sco(struct hci_dev *hdev)
4702 {
4703         struct hci_conn *conn;
4704         struct sk_buff *skb;
4705         int quote;
4706
4707         BT_DBG("%s", hdev->name);
4708
4709         if (!hci_conn_num(hdev, SCO_LINK))
4710                 return;
4711
4712         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4713                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4714                         BT_DBG("skb %p len %d", skb, skb->len);
4715                         hci_send_frame(hdev, skb);
4716
4717                         conn->sent++;
4718                         if (conn->sent == ~0)
4719                                 conn->sent = 0;
4720                 }
4721         }
4722 }
4723
4724 static void hci_sched_esco(struct hci_dev *hdev)
4725 {
4726         struct hci_conn *conn;
4727         struct sk_buff *skb;
4728         int quote;
4729
4730         BT_DBG("%s", hdev->name);
4731
4732         if (!hci_conn_num(hdev, ESCO_LINK))
4733                 return;
4734
4735         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4736                                                      &quote))) {
4737                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4738                         BT_DBG("skb %p len %d", skb, skb->len);
4739                         hci_send_frame(hdev, skb);
4740
4741                         conn->sent++;
4742                         if (conn->sent == ~0)
4743                                 conn->sent = 0;
4744                 }
4745         }
4746 }
4747
4748 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4749 {
4750         unsigned int cnt = hdev->acl_cnt;
4751         struct hci_chan *chan;
4752         struct sk_buff *skb;
4753         int quote;
4754
4755         __check_timeout(hdev, cnt, ACL_LINK);
4756
4757         while (hdev->acl_cnt &&
4758                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4759                 u32 priority = (skb_peek(&chan->data_q))->priority;
4760                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4761                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4762                                skb->len, skb->priority);
4763
4764                         /* Stop if priority has changed */
4765                         if (skb->priority < priority)
4766                                 break;
4767
4768                         skb = skb_dequeue(&chan->data_q);
4769
4770                         hci_conn_enter_active_mode(chan->conn,
4771                                                    bt_cb(skb)->force_active);
4772
4773                         hci_send_frame(hdev, skb);
4774                         hdev->acl_last_tx = jiffies;
4775
4776                         hdev->acl_cnt--;
4777                         chan->sent++;
4778                         chan->conn->sent++;
4779
4780                         /* Send pending SCO packets right away */
4781                         hci_sched_sco(hdev);
4782                         hci_sched_esco(hdev);
4783                 }
4784         }
4785
4786         if (cnt != hdev->acl_cnt)
4787                 hci_prio_recalculate(hdev, ACL_LINK);
4788 }
4789
4790 static void hci_sched_acl_blk(struct hci_dev *hdev)
4791 {
4792         unsigned int cnt = hdev->block_cnt;
4793         struct hci_chan *chan;
4794         struct sk_buff *skb;
4795         int quote;
4796         u8 type;
4797
4798         BT_DBG("%s", hdev->name);
4799
4800         if (hdev->dev_type == HCI_AMP)
4801                 type = AMP_LINK;
4802         else
4803                 type = ACL_LINK;
4804
4805         __check_timeout(hdev, cnt, type);
4806
4807         while (hdev->block_cnt > 0 &&
4808                (chan = hci_chan_sent(hdev, type, &quote))) {
4809                 u32 priority = (skb_peek(&chan->data_q))->priority;
4810                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4811                         int blocks;
4812
4813                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4814                                skb->len, skb->priority);
4815
4816                         /* Stop if priority has changed */
4817                         if (skb->priority < priority)
4818                                 break;
4819
4820                         skb = skb_dequeue(&chan->data_q);
4821
4822                         blocks = __get_blocks(hdev, skb);
4823                         if (blocks > hdev->block_cnt)
4824                                 return;
4825
4826                         hci_conn_enter_active_mode(chan->conn,
4827                                                    bt_cb(skb)->force_active);
4828
4829                         hci_send_frame(hdev, skb);
4830                         hdev->acl_last_tx = jiffies;
4831
4832                         hdev->block_cnt -= blocks;
4833                         quote -= blocks;
4834
4835                         chan->sent += blocks;
4836                         chan->conn->sent += blocks;
4837                 }
4838         }
4839
4840         if (cnt != hdev->block_cnt)
4841                 hci_prio_recalculate(hdev, type);
4842 }
4843
4844 static void hci_sched_acl(struct hci_dev *hdev)
4845 {
4846         BT_DBG("%s", hdev->name);
4847
4848         /* No ACL link over BR/EDR controller */
4849         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4850                 return;
4851
4852         /* No AMP link over AMP controller */
4853         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4854                 return;
4855
4856         switch (hdev->flow_ctl_mode) {
4857         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4858                 hci_sched_acl_pkt(hdev);
4859                 break;
4860
4861         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4862                 hci_sched_acl_blk(hdev);
4863                 break;
4864         }
4865 }
4866
4867 static void hci_sched_le(struct hci_dev *hdev)
4868 {
4869         struct hci_chan *chan;
4870         struct sk_buff *skb;
4871         int quote, cnt, tmp;
4872
4873         BT_DBG("%s", hdev->name);
4874
4875         if (!hci_conn_num(hdev, LE_LINK))
4876                 return;
4877
4878         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4879
4880         __check_timeout(hdev, cnt, LE_LINK);
4881
4882         tmp = cnt;
4883         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4884                 u32 priority = (skb_peek(&chan->data_q))->priority;
4885                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4886                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4887                                skb->len, skb->priority);
4888
4889                         /* Stop if priority has changed */
4890                         if (skb->priority < priority)
4891                                 break;
4892
4893                         skb = skb_dequeue(&chan->data_q);
4894
4895                         hci_send_frame(hdev, skb);
4896                         hdev->le_last_tx = jiffies;
4897
4898                         cnt--;
4899                         chan->sent++;
4900                         chan->conn->sent++;
4901
4902                         /* Send pending SCO packets right away */
4903                         hci_sched_sco(hdev);
4904                         hci_sched_esco(hdev);
4905                 }
4906         }
4907
4908         if (hdev->le_pkts)
4909                 hdev->le_cnt = cnt;
4910         else
4911                 hdev->acl_cnt = cnt;
4912
4913         if (cnt != tmp)
4914                 hci_prio_recalculate(hdev, LE_LINK);
4915 }
4916
4917 static void hci_tx_work(struct work_struct *work)
4918 {
4919         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4920         struct sk_buff *skb;
4921
4922         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4923                hdev->sco_cnt, hdev->le_cnt);
4924
4925         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4926                 /* Schedule queues and send stuff to HCI driver */
4927                 hci_sched_sco(hdev);
4928                 hci_sched_esco(hdev);
4929                 hci_sched_acl(hdev);
4930                 hci_sched_le(hdev);
4931         }
4932
4933         /* Send next queued raw (unknown type) packet */
4934         while ((skb = skb_dequeue(&hdev->raw_q)))
4935                 hci_send_frame(hdev, skb);
4936 }
4937
4938 /* ----- HCI RX task (incoming data processing) ----- */
4939
4940 /* ACL data packet */
4941 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4942 {
4943         struct hci_acl_hdr *hdr = (void *) skb->data;
4944         struct hci_conn *conn;
4945         __u16 handle, flags;
4946
4947         skb_pull(skb, HCI_ACL_HDR_SIZE);
4948
4949         handle = __le16_to_cpu(hdr->handle);
4950         flags  = hci_flags(handle);
4951         handle = hci_handle(handle);
4952
4953         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4954                handle, flags);
4955
4956         hdev->stat.acl_rx++;
4957
4958         hci_dev_lock(hdev);
4959         conn = hci_conn_hash_lookup_handle(hdev, handle);
4960         hci_dev_unlock(hdev);
4961
4962         if (conn) {
4963                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4964
4965                 /* Send to upper protocol */
4966                 l2cap_recv_acldata(conn, skb, flags);
4967                 return;
4968         } else {
4969                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4970                            handle);
4971         }
4972
4973         kfree_skb(skb);
4974 }
4975
4976 /* SCO data packet */
4977 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4978 {
4979         struct hci_sco_hdr *hdr = (void *) skb->data;
4980         struct hci_conn *conn;
4981         __u16 handle, flags;
4982
4983         skb_pull(skb, HCI_SCO_HDR_SIZE);
4984
4985         handle = __le16_to_cpu(hdr->handle);
4986         flags  = hci_flags(handle);
4987         handle = hci_handle(handle);
4988
4989         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4990                handle, flags);
4991
4992         hdev->stat.sco_rx++;
4993
4994         hci_dev_lock(hdev);
4995         conn = hci_conn_hash_lookup_handle(hdev, handle);
4996         hci_dev_unlock(hdev);
4997
4998         if (conn) {
4999                 /* Send to upper protocol */
5000                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
5001                 sco_recv_scodata(conn, skb);
5002                 return;
5003         } else {
5004                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
5005                            handle);
5006         }
5007
5008         kfree_skb(skb);
5009 }
5010
5011 static bool hci_req_is_complete(struct hci_dev *hdev)
5012 {
5013         struct sk_buff *skb;
5014
5015         skb = skb_peek(&hdev->cmd_q);
5016         if (!skb)
5017                 return true;
5018
5019         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
5020 }
5021
5022 static void hci_resend_last(struct hci_dev *hdev)
5023 {
5024         struct hci_command_hdr *sent;
5025         struct sk_buff *skb;
5026         u16 opcode;
5027
5028         if (!hdev->sent_cmd)
5029                 return;
5030
5031         sent = (void *) hdev->sent_cmd->data;
5032         opcode = __le16_to_cpu(sent->opcode);
5033         if (opcode == HCI_OP_RESET)
5034                 return;
5035
5036         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5037         if (!skb)
5038                 return;
5039
5040         skb_queue_head(&hdev->cmd_q, skb);
5041         queue_work(hdev->workqueue, &hdev->cmd_work);
5042 }
5043
5044 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
5045                           hci_req_complete_t *req_complete,
5046                           hci_req_complete_skb_t *req_complete_skb)
5047 {
5048         struct sk_buff *skb;
5049         unsigned long flags;
5050
5051         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5052
5053         /* If the completed command doesn't match the last one that was
5054          * sent we need to do special handling of it.
5055          */
5056         if (!hci_sent_cmd_data(hdev, opcode)) {
5057                 /* Some CSR based controllers generate a spontaneous
5058                  * reset complete event during init and any pending
5059                  * command will never be completed. In such a case we
5060                  * need to resend whatever was the last sent
5061                  * command.
5062                  */
5063                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5064                         hci_resend_last(hdev);
5065
5066                 return;
5067         }
5068
5069         /* If we reach this point this event matches the last command sent */
5070         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5071
5072         /* If the command succeeded and there's still more commands in
5073          * this request the request is not yet complete.
5074          */
5075         if (!status && !hci_req_is_complete(hdev))
5076                 return;
5077
5078         /* If this was the last command in a request the complete
5079          * callback would be found in hdev->sent_cmd instead of the
5080          * command queue (hdev->cmd_q).
5081          */
5082         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5083                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5084                 return;
5085         }
5086
5087         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5088                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5089                 return;
5090         }
5091
5092         /* Remove all pending commands belonging to this request */
5093         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5094         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5095                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5096                         __skb_queue_head(&hdev->cmd_q, skb);
5097                         break;
5098                 }
5099
5100                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5101                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5102                 else
5103                         *req_complete = bt_cb(skb)->hci.req_complete;
5104                 kfree_skb(skb);
5105         }
5106         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5107 }
5108
5109 static void hci_rx_work(struct work_struct *work)
5110 {
5111         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5112         struct sk_buff *skb;
5113
5114         BT_DBG("%s", hdev->name);
5115
5116         while ((skb = skb_dequeue(&hdev->rx_q))) {
5117                 /* Send copy to monitor */
5118                 hci_send_to_monitor(hdev, skb);
5119
5120                 if (atomic_read(&hdev->promisc)) {
5121                         /* Send copy to the sockets */
5122                         hci_send_to_sock(hdev, skb);
5123                 }
5124
5125                 /* If the device has been opened in HCI_USER_CHANNEL,
5126                  * the userspace has exclusive access to device.
5127                  * When device is HCI_INIT, we still need to process
5128                  * the data packets to the driver in order
5129                  * to complete its setup().
5130                  */
5131                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5132                     !test_bit(HCI_INIT, &hdev->flags)) {
5133                         kfree_skb(skb);
5134                         continue;
5135                 }
5136
5137                 if (test_bit(HCI_INIT, &hdev->flags)) {
5138                         /* Don't process data packets in this states. */
5139                         switch (hci_skb_pkt_type(skb)) {
5140                         case HCI_ACLDATA_PKT:
5141                         case HCI_SCODATA_PKT:
5142                         case HCI_ISODATA_PKT:
5143                                 kfree_skb(skb);
5144                                 continue;
5145                         }
5146                 }
5147
5148                 /* Process frame */
5149                 switch (hci_skb_pkt_type(skb)) {
5150                 case HCI_EVENT_PKT:
5151                         BT_DBG("%s Event packet", hdev->name);
5152                         hci_event_packet(hdev, skb);
5153                         break;
5154
5155                 case HCI_ACLDATA_PKT:
5156                         BT_DBG("%s ACL data packet", hdev->name);
5157                         hci_acldata_packet(hdev, skb);
5158                         break;
5159
5160                 case HCI_SCODATA_PKT:
5161                         BT_DBG("%s SCO data packet", hdev->name);
5162                         hci_scodata_packet(hdev, skb);
5163                         break;
5164
5165                 default:
5166                         kfree_skb(skb);
5167                         break;
5168                 }
5169         }
5170 }
5171
5172 static void hci_cmd_work(struct work_struct *work)
5173 {
5174         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5175         struct sk_buff *skb;
5176
5177         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5178                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5179
5180         /* Send queued commands */
5181         if (atomic_read(&hdev->cmd_cnt)) {
5182                 skb = skb_dequeue(&hdev->cmd_q);
5183                 if (!skb)
5184                         return;
5185
5186                 kfree_skb(hdev->sent_cmd);
5187
5188                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5189                 if (hdev->sent_cmd) {
5190                         if (hci_req_status_pend(hdev))
5191                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5192                         atomic_dec(&hdev->cmd_cnt);
5193                         hci_send_frame(hdev, skb);
5194                         if (test_bit(HCI_RESET, &hdev->flags))
5195                                 cancel_delayed_work(&hdev->cmd_timer);
5196                         else
5197                                 schedule_delayed_work(&hdev->cmd_timer,
5198                                                       HCI_CMD_TIMEOUT);
5199                 } else {
5200                         skb_queue_head(&hdev->cmd_q, skb);
5201                         queue_work(hdev->workqueue, &hdev->cmd_work);
5202                 }
5203         }
5204 }